file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/quadcopter.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Quadcopter(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Quadcopter",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Quadcopter/quadcopter.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
position=translation,
orientation=orientation,
articulation_controller=None,
)
| 2,719 | Python | 39.597014 | 89 | 0.706878 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/ingenuity.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Ingenuity(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "ingenuity",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.array] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = (
assets_root_path + "/Isaac/Robots/Ingenuity/ingenuity.usd"
)
add_reference_to_stage(self._usd_path, prim_path)
scale = torch.tensor([0.01, 0.01, 0.01])
super().__init__(prim_path=prim_path, name=name, translation=translation, orientation=orientation, scale=scale)
| 2,802 | Python | 40.83582 | 119 | 0.711991 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/go1widow.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import PhysxSchema
class Go1Widow(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Go1Widow",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
# if self._usd_path is None:
# assets_root_path = get_assets_root_path()
# if assets_root_path is None:
# carb.log_error("Could not find nucleus server with /Isaac folder")
# self._usd_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
self._dof_names = ["FL_hip_joint",
"RL_hip_joint",
"FR_hip_joint",
"RR_hip_joint",
"FL_thigh_joint",
"RL_thigh_joint",
"FR_thigh_joint",
"RR_thigh_joint",
"FL_calf_joint",
"RL_calf_joint",
"FR_calf_joint",
"RR_calf_joint",
"widow_waist",
"widow_shoulder",
"widow_elbow",
"forearm_roll",
"widow_wrist_angle",
"widow_wrist_rotate",
"widow_forearm_roll",
"gripper",
"widow_left_finger",
"widow_right_finger",]
@property
def dof_names(self):
return self._dof_names
def set_go1widow_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(False)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.0)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
def prepare_contacts(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
if "_hip" not in str(link_prim.GetPrimPath()):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.CreateSleepThresholdAttr().Set(0)
cr_api = PhysxSchema.PhysxContactReportAPI.Apply(link_prim)
cr_api.CreateThresholdAttr().Set(0)
| 4,969 | Python | 42.217391 | 99 | 0.598913 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import PhysxSchema
class Anymal(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Anymal",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find nucleus server with /Isaac folder")
self._usd_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_base.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
self._dof_names = [
"LF_HAA",
"LH_HAA",
"RF_HAA",
"RH_HAA",
"LF_HFE",
"LH_HFE",
"RF_HFE",
"RH_HFE",
"LF_KFE",
"LH_KFE",
"RF_KFE",
"RH_KFE",
]
@property
def dof_names(self):
return self._dof_names
def set_anymal_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(False)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.0)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
def prepare_contacts(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
print(link_prim.GetPrimPath())
if "_HIP" not in str(link_prim.GetPrimPath()):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.CreateSleepThresholdAttr().Set(0)
cr_api = PhysxSchema.PhysxContactReportAPI.Apply(link_prim)
cr_api.CreateThresholdAttr().Set(0)
| 4,312 | Python | 38.935185 | 90 | 0.645872 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/cabinet_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CabinetView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "CabinetView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._drawers = RigidPrimView(
prim_paths_expr="/World/envs/.*/cabinet/drawer_top", name="drawers_view", reset_xform_properties=False
)
| 586 | Python | 28.349999 | 114 | 0.653584 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/shadow_hand_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class ShadowHandView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "ShadowHandView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._fingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/shadow_hand/robot0.*distal",
name="finger_view",
reset_xform_properties=False,
)
@property
def actuated_dof_indices(self):
return self._actuated_dof_indices
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self.actuated_joint_names = [
"robot0_WRJ1",
"robot0_WRJ0",
"robot0_FFJ3",
"robot0_FFJ2",
"robot0_FFJ1",
"robot0_MFJ3",
"robot0_MFJ2",
"robot0_MFJ1",
"robot0_RFJ3",
"robot0_RFJ2",
"robot0_RFJ1",
"robot0_LFJ4",
"robot0_LFJ3",
"robot0_LFJ2",
"robot0_LFJ1",
"robot0_THJ4",
"robot0_THJ3",
"robot0_THJ2",
"robot0_THJ1",
"robot0_THJ0",
]
self._actuated_dof_indices = list()
for joint_name in self.actuated_joint_names:
self._actuated_dof_indices.append(self.get_dof_index(joint_name))
self._actuated_dof_indices.sort()
limit_stiffness = torch.tensor([30.0] * self.num_fixed_tendons, device=self._device)
damping = torch.tensor([0.1] * self.num_fixed_tendons, device=self._device)
self.set_fixed_tendon_properties(dampings=damping, limit_stiffnesses=limit_stiffness)
fingertips = ["robot0_ffdistal", "robot0_mfdistal", "robot0_rfdistal", "robot0_lfdistal", "robot0_thdistal"]
self._sensor_indices = torch.tensor([self._body_indices[j] for j in fingertips], device=self._device, dtype=torch.long)
| 3,681 | Python | 38.591397 | 127 | 0.669383 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FrankaView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_link7", name="hands_view", reset_xform_properties=False
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger", name="lfingers_view", reset_xform_properties=False
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._gripper_indices = [self.get_dof_index("panda_finger_joint1"), self.get_dof_index("panda_finger_joint2")]
@property
def gripper_indices(self):
return self._gripper_indices
| 1,241 | Python | 32.567567 | 120 | 0.637389 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/aliengo_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class AliengoView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "AliengoView",
track_contact_forces=False,
prepare_contact_sensors=False,
stage = None
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
# knees_prim = stage.DefinePrim(path = "/World/envs/.*/aliengo/.*_THIGH", typeName = "Xform")
# base_prim = stage.DefinePrim(path = "/World/envs/aliengo/base", typeName = "Xform")
self._knees = RigidPrimView(
prim_paths_expr="/World/envs/.*/aliengo/.*_thigh$",
name="knees_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
self._base = RigidPrimView(
prim_paths_expr="/World/envs/.*/aliengo/trunk",
name="base_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
self._feet = RigidPrimView(
prim_paths_expr = "/World/envs/.*/aliengo/.*_foot$",
name = "feet_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
def get_knee_transforms(self):
return self._knees.get_world_poses()
def is_knee_below_threshold(self, threshold, ground_heights=None):
knee_pos, _ = self._knees.get_world_poses()
knee_heights = knee_pos.view((-1, 4, 3))[:, :, 2]
if ground_heights is not None:
knee_heights -= ground_heights
return (
(knee_heights[:, 0] < threshold)
| (knee_heights[:, 1] < threshold)
| (knee_heights[:, 2] < threshold)
| (knee_heights[:, 3] < threshold)
)
def is_base_below_threshold(self, threshold, ground_heights):
base_pos, _ = self.get_world_poses()
base_heights = base_pos[:, 2]
base_heights -= ground_heights
return base_heights[:] < threshold
| 3,966 | Python | 42.119565 | 101 | 0.664145 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/go1widow_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class Go1WidowView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "Go1WidowView",
track_contact_forces=False,
prepare_contact_sensors=False,
stage = None
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._knees = RigidPrimView(
prim_paths_expr="/World/envs/.*/go1widow/.*_thigh$",
name="knees_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
self._base = RigidPrimView(
prim_paths_expr="/World/envs/.*/go1widow/trunk",
name="base_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
def get_knee_transforms(self):
return self._knees.get_world_poses()
def is_knee_below_threshold(self, threshold, ground_heights=None):
knee_pos, _ = self._knees.get_world_poses()
knee_heights = knee_pos.view((-1, 4, 3))[:, :, 2]
if ground_heights is not None:
knee_heights -= ground_heights
return (
(knee_heights[:, 0] < threshold)
| (knee_heights[:, 1] < threshold)
| (knee_heights[:, 2] < threshold)
| (knee_heights[:, 3] < threshold)
)
def is_base_below_threshold(self, threshold, ground_heights):
base_pos, _ = self.get_world_poses()
base_heights = base_pos[:, 2]
base_heights -= ground_heights
return base_heights[:] < threshold
| 3,473 | Python | 40.855421 | 98 | 0.675785 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/factory_franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FactoryFrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FactoryFrankaView",
) -> None:
"""Initialize articulation view."""
super().__init__(
prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False
)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_hand",
name="hands_view",
reset_xform_properties=False,
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger",
name="lfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._fingertip_centered = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_fingertip_centered",
name="fingertips_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
"""Initialize physics simulation view."""
super().initialize(physics_sim_view)
| 1,488 | Python | 31.369565 | 84 | 0.598118 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/anymal_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class AnymalView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "AnymalView",
track_contact_forces=False,
prepare_contact_sensors=False,
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._knees = RigidPrimView(
prim_paths_expr="/World/envs/.*/anymal/.*_THIGH",
name="knees_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
self._base = RigidPrimView(
prim_paths_expr="/World/envs/.*/anymal/base",
name="base_view",
reset_xform_properties=False,
track_contact_forces=track_contact_forces,
prepare_contact_sensors=prepare_contact_sensors,
)
def get_knee_transforms(self):
return self._knees.get_world_poses()
def is_knee_below_threshold(self, threshold, ground_heights=None):
knee_pos, _ = self._knees.get_world_poses()
knee_heights = knee_pos.view((-1, 4, 3))[:, :, 2]
if ground_heights is not None:
knee_heights -= ground_heights
return (
(knee_heights[:, 0] < threshold)
| (knee_heights[:, 1] < threshold)
| (knee_heights[:, 2] < threshold)
| (knee_heights[:, 3] < threshold)
)
def is_base_below_threshold(self, threshold, ground_heights):
base_pos, _ = self.get_world_poses()
base_heights = base_pos[:, 2]
base_heights -= ground_heights
return base_heights[:] < threshold
| 3,433 | Python | 41.395061 | 98 | 0.678415 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/quadcopter_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class QuadcopterView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "QuadcopterView") -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self.rotors = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Quadcopter/rotor[0-3]", name="rotors_view", reset_xform_properties=False
)
| 2,121 | Python | 47.227272 | 117 | 0.759547 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/allegro_hand_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class AllegroHandView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "AllegroHandView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._actuated_dof_indices = list()
@property
def actuated_dof_indices(self):
return self._actuated_dof_indices
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._actuated_dof_indices = [i for i in range(self.num_dof)]
| 2,275 | Python | 41.148147 | 98 | 0.74989 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/crazyflie_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CrazyflieView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "CrazyflieView") -> None:
"""[summary]"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
)
self.physics_rotors = [
RigidPrimView(prim_paths_expr=f"/World/envs/.*/Crazyflie/m{i}_prop", name=f"m{i}_prop_view")
for i in range(1, 5)
]
| 2,140 | Python | 42.693877 | 104 | 0.737383 |
Virlus/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/views/ingenuity_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class IngenuityView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: Optional[str] = "IngenuityView") -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self.physics_rotors = [
RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_physics_{i}",
name=f"physics_rotor_{i}_view",
reset_xform_properties=False,
)
for i in range(2)
]
self.visual_rotors = [
RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Ingenuity/rotor_visual_{i}",
name=f"visual_rotor_{i}_view",
reset_xform_properties=False,
)
for i in range(2)
]
| 2,524 | Python | 42.534482 | 98 | 0.70206 |
Virlus/OmniIsaacGymEnvs/docs/domain_randomization.md | Domain Randomization
====================
Overview
--------
We sometimes need our reinforcement learning agents to be robust to
different physics than they are trained with, such as when attempting a
sim2real policy transfer. Using domain randomization (DR), we repeatedly
randomize the simulation dynamics during training in order to learn a
good policy under a wide range of physical parameters.
OmniverseIsaacGymEnvs supports "on the fly" domain randomization, allowing
dynamics to be changed without requiring reloading of assets. This allows
us to efficiently apply domain randomizations without common overheads like
re-parsing asset files.
The OmniverseIsaacGymEnvs DR framework utilizes the `omni.replicator.isaac`
extension in its backend to perform "on the fly" randomization. Users can
add domain randomization by either directly using methods provided in
`omni.replicator.isaac` in python, or specifying DR settings in the
task configuration `yaml` file. The following sections will focus on setting
up DR using the `yaml` file interface. For more detailed documentations
regarding methods provided in the `omni.replicator.isaac` extension, please
visit [here](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.replicator.isaac/docs/index.html).
Domain Randomization Options
-------------------------------
We will first explain what can be randomized in the scene and the sampling
distributions. There are five main parameter groups that support randomization.
They are:
- `observations`: Add noise directly to the agent observations
- `actions`: Add noise directly to the agent actions
- `simulation`: Add noise to physical parameters defined for the entire
scene, such as `gravity`
- `rigid_prim_views`: Add noise to properties belonging to rigid prims,
such as `material_properties`.
- `articulation_views`: Add noise to properties belonging to articulations,
such as `stiffness` of joints.
For each parameter you wish to randomize, you can specify two ways that
determine when the randomization is applied:
- `on_reset`: Adds correlated noise to a parameter of an environment when
that environment gets reset. This correlated noise will remain
with an environment until that environemnt gets reset again, which
will then set a new correlated noise. To trigger `on_reset`,
the indices for the environemnts that need to be reset must be passed in
to `omni.replicator.isaac.physics_view.step_randomization(reset_inds)`.
- `on_interval`: Adds uncorrelated noise to a parameter at a frequency specified
by `frequency_interval`. If a parameter also has `on_reset` randomization,
the `on_interval` noise is combined with the noise applied at `on_reset`.
- `on_startup`: Applies randomization once prior to the start of the simulation. Only available
to rigid prim scale, mass, density and articulation scale parameters.
For `on_reset`, `on_interval`, and `on_startup`, you can specify the following settings:
- `distribution`: The distribution to generate a sample `x` from. The available distributions
are listed below. Note that parameters `a` and `b` are defined by the
`distribution_parameters` setting.
- `uniform`: `x ~ unif(a, b)`
- `loguniform`: `x ~ exp(unif(log(a), log(b)))`
- `gaussian`: `x ~ normal(a, b)`
- `distribution_parameters`: The parameters to the distribution.
- For observations and actions, this setting is specified as a tuple `[a, b]` of
real values.
- For simulation and view parameters, this setting is specified as a nested tuple
in the form of `[[a_1, a_2, ..., a_n], [[b_1, b_2, ..., b_n]]`, where the `n` is
the dimension of the parameter (*i.e.* `n` is 3 for position). It can also be
specified as a tuple in the form of `[a, b]`, which will be broadcasted to the
correct dimensions.
- For `uniform` and `loguniform` distributions, `a` and `b` are the lower and
upper bounds.
- For `gaussian`, `a` is the distribution mean and `b` is the variance.
- `operation`: Defines how the generated sample `x` will be applied to the original
simulation parameter. The options are `additive`, `scaling`, `direct`.
- `additive`:, add the sample to the original value.
- `scaling`: multiply the original value by the sample.
- `direct`: directly sets the sample as the parameter value.
- `frequency_interval`: Specifies the number of steps to apply randomization.
- Only used with `on_interval`.
- Steps of each environemnt are incremented with each
`omni.replicator.isaac.physics_view.step_randomization(reset_inds)` call and
reset if the environment index is in `reset_inds`.
- `num_buckets`: Only used for `material_properties` randomization
- Physx only allows 64000 unique physics materials in the scene at once. If more than
64000 materials are needed, increase `num_buckets` to allow materials to be shared
between prims.
YAML Interface
--------------
Now that we know what options are available for domain randomization,
let's put it all together in the YAML config. In your `omniverseisaacgymenvs/cfg/task`
yaml file, you can specify your domain randomization parameters under the
`domain_randomization` key. First, we turn on domain randomization by setting
`randomize` to `True`:
```yaml
domain_randomization:
randomize: True
randomization_params:
...
```
This can also be set as a command line argument at launch time with `task.domain_randomization.randomize=True`.
Next, we will define our parameters under the `randomization_params`
keys. Here you can see how we used the previous settings to define some
randomization parameters for a ShadowHand cube manipulation task:
```yaml
randomization_params:
randomization_params:
observations:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .0001]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .002]
actions:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, 0.015]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0., 0.05]
simulation:
gravity:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [[0.0, 0.0, 0.0], [0.0, 0.0, 0.4]]
rigid_prim_views:
object_view:
material_properties:
on_reset:
num_buckets: 250
operation: "scaling"
distribution: "uniform"
distribution_parameters: [[0.7, 1, 1], [1.3, 1, 1]]
articulation_views:
shadow_hand_view:
stiffness:
on_reset:
operation: "scaling"
distribution: "uniform"
distribution_parameters: [0.75, 1.5]
```
Note how we structured `rigid_prim_views` and `articulation_views`. When creating
a `RigidPrimView` or `ArticulationView` in the task python file, you have the option to
pass in `name` as an argument. **To use domain randomization, the name of the `RigidPrimView` or
`ArticulationView` must match the name provided in the randomization `yaml` file.** In the
example above, `object_view` is the name of a `RigidPrimView` and `shadow_hand_view` is the name
of the `ArticulationView`.
The exact parameters that can be randomized are listed below:
**simulation**:
- gravity (dim=3): The gravity vector of the entire scene.
**rigid\_prim\_views**:
- position (dim=3): The position of the rigid prim. In meters.
- orientation (dim=3): The orientation of the rigid prim, specified with euler angles. In radians.
- linear_velocity (dim=3): The linear velocity of the rigid prim. In m/s. **CPU pipeline only**
- angular_velocity (dim=3): The angular velocity of the rigid prim. In rad/s. **CPU pipeline only**
- velocity (dim=6): The linear + angular velocity of the rigid prim.
- force (dim=3): Apply a force to the rigid prim. In N.
- mass (dim=1): Mass of the rigid prim. In kg. **CPU pipeline only during runtime**.
- inertia (dim=3): The diagonal values of the inertia matrix. **CPU pipeline only**
- material_properties (dim=3): Static friction, Dynamic friction, and Restitution.
- contact_offset (dim=1): A small distance from the surface of the collision geometry at
which contacts start being generated.
- rest_offset (dim=1): A small distance from the surface of the collision geometry at
which the effective contact with the shape takes place.
- scale (dim=1): The scale of the rigid prim. `on_startup` only.
- density (dim=1): Density of the rigid prim. `on_startup` only.
**articulation\_views**:
- position (dim=3): The position of the articulation root. In meters.
- orientation (dim=3): The orientation of the articulation root, specified with euler angles. In radians.
- linear_velocity (dim=3): The linear velocity of the articulation root. In m/s. **CPU pipeline only**
- angular_velocity (dim=3): The angular velocity of the articulation root. In rad/s. **CPU pipeline only**
- velocity (dim=6): The linear + angular velocity of the articulation root.
- stiffness (dim=num_dof): The stiffness of the joints.
- damping (dim=num_dof): The damping of the joints
- joint_friction (dim=num_dof): The friction coefficient of the joints.
- joint_positions (dim=num_dof): The joint positions. In radians or meters.
- joint_velocities (dim=num_dof): The joint velocities. In rad/s or m/s.
- lower_dof_limits (dim=num_dof): The lower limit of the joints. In radians or meters.
- upper_dof_limits (dim=num_dof): The upper limit of the joints. In radians or meters.
- max_efforts (dim=num_dof): The maximum force or torque that the joints can exert. In N or Nm.
- joint_armatures (dim=num_dof): A value added to the diagonal of the joint-space inertia matrix.
Physically, it corresponds to the rotating part of a motor
- joint_max_velocities (dim=num_dof): The maximum velocity allowed on the joints. In rad/s or m/s.
- joint_efforts (dim=num_dof): Applies a force or a torque on the joints. In N or Nm.
- body_masses (dim=num_bodies): The mass of each body in the articulation. In kg. **CPU pipeline only**
- body_inertias (dim=num_bodies×3): The diagonal values of the inertia matrix of each body. **CPU pipeline only**
- material_properties (dim=num_bodies×3): The static friction, dynamic friction, and restitution of each body
in the articulation, specified in the following order:
[body_1_static_friciton, body_1_dynamic_friciton, body_1_restitution,
body_1_static_friciton, body_2_dynamic_friciton, body_2_restitution,
... ]
- tendon_stiffnesses (dim=num_tendons): The stiffness of the fixed tendons in the articulation.
- tendon_dampings (dim=num_tendons): The damping of the fixed tendons in the articulation.
- tendon_limit_stiffnesses (dim=num_tendons): The limit stiffness of the fixed tendons in the articulation.
- tendon_lower_limits (dim=num_tendons): The lower limits of the fixed tendons in the articulation.
- tendon_upper_limits (dim=num_tendons): The upper limits of the fixed tendons in the articulation.
- tendon_rest_lengths (dim=num_tendons): The rest lengths of the fixed tendons in the articulation.
- tendon_offsets (dim=num_tendons): The offsets of the fixed tendons in the articulation.
- scale (dim=1): The scale of the articulation. `on_startup` only.
Applying Domain Randomization
------------------------------
To parse the domain randomization configurations in the task `yaml` file and set up the DR pipeline,
it is necessary to call `self._randomizer.set_up_domain_randomization(self)`, where `self._randomizer`
is the `Randomizer` object created in RLTask's `__init__`.
It is worth noting that the names of the views provided under `rigid_prim_views` or `articulation_views`
in the task `yaml` file must match the names passed into `RigidPrimView` or `ArticulationView` objects
in the python task file. In addition, all `RigidPrimView` and `ArticulationView` that would have domain
randomizaiton applied must be added to the scene in the task's `set_up_scene()` via `scene.add()`.
To trigger `on_startup` randomizations, call `self._randomizer.apply_on_startup_domain_randomization(self)`
in `set_up_scene()` after all views are added to the scene. Note that `on_startup` randomizations
are only availble to rigid prim scale, mass, density and articulation scale parameters since these parameters
cannot be randomized after the simulation begins on GPU pipeline. Therefore, randomizations must be applied
to these parameters in `set_up_scene()` prior to the start of the simulation.
To trigger `on_reset` and `on_interval` randomizations, it is required to step the interal
counter of the DR pipeline in `pre_physics_step()`:
```python
if self._randomizer.randomize:
omni.replicator.isaac.physics_view.step_randomization(reset_inds)
```
`reset_inds` is a list of indices of the environments that need to be reset. For those environments, it will
trigger the randomizations defined with `on_reset`. All other environments will follow randomizations
defined with `on_interval`.
Randomization Scheduling
----------------------------
We provide methods to modify distribution parameters defined in the `yaml` file during training, which
allows custom DR scheduling. There are three methods from the `Randomizer` class
that are relevant to DR scheduling:
- `get_initial_dr_distribution_parameters`: returns a numpy array of the initial parameters (as defined in
the `yaml` file) of a specified distribution
- `get_dr_distribution_parameters`: returns a numpy array of the current parameters of a specified distribution
- `set_dr_distribution_parameters`: sets new parameters to a specified distribution
Using the DR configuration example defined above, we can get the current parameters and set new parameters
to gravity randomization and shadow hand joint stiffness randomization as follows:
```python
current_gravity_dr_params = self._randomizer.get_dr_distribution_parameters(
"simulation",
"gravity",
"on_reset",
)
self._randomizer.set_dr_distribution_parameters(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.5]],
"simulation",
"gravity",
"on_reset",
)
current_joint_stiffness_dr_params = self._randomizer.get_dr_distribution_parameters(
"articulation_views",
"shadow_hand_view",
"stiffness",
"on_reset",
)
self._randomizer.set_dr_distribution_parameters(
[0.7, 1.55],
"articulation_views",
"shadow_hand_view",
"stiffness",
"on_reset",
)
```
The following is an example of using these methods to perform linear scheduling of gaussian noise
that is added to observations and actions in the above shadow hand example. The following method
linearly adds more noise to observations and actions every epoch up until the `schedule_epoch`.
This method can be added to the Task python class and be called in `pre_physics_step()`.
```python
def apply_observations_actions_noise_linear_scheduling(self, schedule_epoch=100):
current_epoch = self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"] // self._cfg["train"]["params"]["config"]["horizon_length"]
if current_epoch <= schedule_epoch:
if (self._env.sim_frame_count // self._cfg["task"]["env"]["controlFrequencyInv"]) % self._cfg["train"]["params"]["config"]["horizon_length"] == 0:
for distribution_path in [("observations", "on_reset"), ("observations", "on_interval"), ("actions", "on_reset"), ("actions", "on_interval")]:
scheduled_params = self._randomizer.get_initial_dr_distribution_parameters(*distribution_path)
scheduled_params[1] = (1/schedule_epoch) * current_epoch * scheduled_params[1]
self._randomizer.set_dr_distribution_parameters(scheduled_params, *distribution_path)
```
| 16,889 | Markdown | 51.453416 | 156 | 0.68814 |
Virlus/OmniIsaacGymEnvs/docs/instanceable_assets.md | ## A Note on Instanceable USD Assets
The following section presents a method that modifies existing USD assets
which allows Isaac Sim to load significantly more environments. This is currently
an experimental method and has thus not been completely integrated into the
framework. As a result, this section is reserved for power users who wish to
maxmimize the performance of the Isaac Sim RL framework.
### Motivation
One common issue in Isaac Sim that occurs when we try to increase
the number of environments `numEnvs` is running out of RAM. This occurs because
the Isaac Sim RL framework uses `omni.isaac.cloner` to duplicate environments.
As a result, there are `numEnvs` number of identical copies of the visual and
collision meshes in the scene, which consumes lots of memory. However, only one
copy of the meshes are needed on stage since prims in all other environments could
merely reference that one copy, thus reducing the amount of memory used for loading
environments. To enable this functionality, USD assets need to be modified to be
`instanceable`.
### Creating Instanceable Assets
Assets can now be directly imported as Instanceable assets through the URDF and MJCF importers provided in Isaac Sim. By selecting this option, imported assets will be split into two separate USD files that follow the above hierarchy definition. Any mesh data will be written to an USD stage to be referenced by the main USD stage, which contains the main robot definition.
To use the Instanceable option in the importers, first check the `Create Instanceable Asset` option. Then, specify a file path to indicate the location for saving the mesh data in the `Instanceable USD Path` textbox. This will default to `./instanceable_meshes.usd`, which will generate a file `instanceable_meshes.usd` that is saved to the current directory.
Once the asset is imported with these options enabled, you will see the robot definition in the stage - we will refer to this stage as the master stage. If we expand the robot hierarchy in the Stage, we will notice that the parent prims that have mesh decendants have been marked as Instanceable and they reference a prim in our `Instanceable USD Path` USD file. We are also no longer able to modify attributes of descendant meshes.
To add the instanced asset into a new stage, we will simply need to add the master USD file.
### Converting Existing Assets
We provide the utility function `convert_asset_instanceable`, which creates an instanceable
version of a given USD asset in `/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py`.
To run this function, launch Isaac Sim and open the script editor via `Window -> Script Editor`.
Enter the following script and press `Run (Ctrl + Enter)`:
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import convert_asset_instanceable
convert_asset_instanceable(
asset_usd_path=ASSET_USD_PATH,
source_prim_path=SOURCE_PRIM_PATH,
save_as_path=SAVE_AS_PATH
)
```
Note that `ASSET_USD_PATH` is the file path to the USD asset (*e.g.* robot_asset.usd).
`SOURCE_PRIM_PATH` is the USD path of the root prim of the asset on stage. `SAVE_AS_PATH`
is the file path of the generated instanceable version of the asset
(*e.g.* robot_asset_instanceable.usd).
Assuming that `SAVE_AS_PATH` is `OUTPUT_NAME.usd`, the above script will generate two files:
`OUTPUT_NAME.usd` and `OUTPUT_NAME_meshes.usd`. `OUTPUT_NAME.usd` is the instanceable version
of the asset that can be imported to stage and used by `omni.isaac.cloner` to create numerous
duplicates without consuming much memory. `OUTPUT_NAME_meshes.usd` contains all the visual
and collision meshes that `OUTPUT_NAME.usd` references.
It is worth noting that any [USD Relationships](https://graphics.pixar.com/usd/dev/api/class_usd_relationship.html)
on the referenced meshes are removed in `OUTPUT_NAME.usd`. This is because those USD Relationships
originally have targets set to prims in `OUTPUT_NAME_meshes.usd` and hence cannot be accessed
from `OUTPUT_NAME.usd`. Common examples of USD Relationships that could exist on the meshes are
visual materials, physics materials, and filtered collision pairs. Therefore, it is recommanded
to set these USD Relationships on the meshes' parent Xforms instead of the meshes themselves.
In a case where we would like to update the main USD file where the instanceable USD file is being referenced from, we also provide a utility method to update all references in the stage that matches a source reference path to a new USD file path.
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import update_reference
update_reference(
source_prim_path=SOURCE_PRIM_PATH,
source_reference_path=SOURCE_REFERENCE_PATH,
target_reference_path=TARGET_REFERENCE_PATH
)
```
### Limitations
USD requires a specific structure in the asset tree definition in order for the instanceable flag to take action. To mark any mesh or primitive geometry prim in the asset as instanceable, the mesh prim requires a parent Xform prim to be present, which will be used to add a reference to a master USD file containing definition of the mesh prim.
For example, the following definition:
```
World
|_ Robot
|_ Collisions
|_ Sphere
|_ Box
```
would have to be modified to:
```
World
|_ Robot
|_ Collisions
|_ Sphere_Xform
| |_ Sphere
|_ Box_Xform
|_ Box
```
Any references that exist on the original `Sphere` and `Box` prims would have to be moved to `Sphere_Xform` and `Box_Xform` prims.
To help with the process of creating new parent prims, we provide a utility method `create_parent_xforms()` in `omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py` to automatically insert a new Xform prim as a parent of every mesh prim in the stage. This method can be run on an existing non-instanced USD file for an asset from the script editor:
```bash
from omniisaacgymenvs.utils.usd_utils.create_instanceable_assets import create_parent_xforms
create_parent_xforms(
asset_usd_path=ASSET_USD_PATH,
source_prim_path=SOURCE_PRIM_PATH,
save_as_path=SAVE_AS_PATH
)
```
This method can also be run as part of `convert_asset_instanceable()` method, by passing in the argument `create_xforms=True`.
It is also worth noting that once an instanced asset is added to the stage, we can no longer modify USD attributes on the instanceable prims. For example, to modify attributes of collision meshes that are set as instanceable, we have to first modify the attributes on the corresponding prims in the master prim which our instanced asset references from. Then, we can allow the instanced asset to pick up the updated values from the master prim. | 6,846 | Markdown | 56.058333 | 444 | 0.76804 |
Virlus/OmniIsaacGymEnvs/docs/reproducibility.md | Reproducibility and Determinism
===============================
Seeds
-----
To achieve deterministic behavior on multiple training runs, a seed
value can be set in the training config file for each task. This will potentially
allow for individual runs of the same task to be deterministic when
executed on the same machine and system setup. Alternatively, a seed can
also be set via command line argument `seed=<seed>` to override any
settings in config files. If no seed is specified in either config files
or command line arguments, we default to generating a random seed. In
this case, individual runs of the same task should not be expected to be
deterministic. For convenience, we also support setting `seed=-1` to
generate a random seed, which will override any seed values set in
config files. By default, we have explicitly set all seed values in
config files to be 42.
PyTorch Deterministic Training
------------------------------
We also include a `torch_deterministic` argument for use when running RL
training. Enabling this flag (by passing `torch_deterministic=True`) will
apply additional settings to PyTorch that can force the usage of deterministic
algorithms in PyTorch, but may also negatively impact runtime performance.
For more details regarding PyTorch reproducibility, refer to
<https://pytorch.org/docs/stable/notes/randomness.html>. If both
`torch_deterministic=True` and `seed=-1` are set, the seed value will be
fixed to 42.
Runtime Simulation Changes / Domain Randomization
-------------------------------------------------
Note that using a fixed seed value will only **potentially** allow for deterministic
behavior. Due to GPU work scheduling, it is possible that runtime changes to
simulation parameters can alter the order in which operations take place, as
environment updates can happen while the GPU is doing other work. Because of the nature
of floating point numeric storage, any alteration of execution ordering can
cause small changes in the least significant bits of output data, leading
to divergent execution over the simulation of thousands of environments and
simulation frames.
As an example of this, runtime domain randomization of object scales
is known to cause both determinancy and simulation issues when running on the GPU
due to the way those parameters are passed from CPU to GPU in lower level APIs. Therefore,
this is only supported at setup time before starting simulation, which is specified by
the `on_startup` condition for Domain Randomization.
At this time, we do not believe that other domain randomizations offered by this
framework cause issues with deterministic execution when running GPU simulation,
but directly manipulating other simulation parameters outside of the omni.isaac.core View
APIs may induce similar issues.
Also due to floating point precision, states across different environments in the simulation
may be non-deterministic when the same set of actions are applied to the same initial
states. This occurs as environments are placed further apart from the world origin at (0, 0, 0).
As actors get placed at different origins in the world, floating point errors may build up
and result in slight variance in results even when starting from the same initial states. One
possible workaround for this issue is to place all actors/environments at the world origin
at (0, 0, 0) and filter out collisions between the environments. Note that this may induce
a performance degradation of around 15-50%, depending on the complexity of actors and
environment.
Another known cause of non-determinism is from resetting actors into contact states.
If actors within a scene is reset to a state where contacts are registered
between actors, the simulation may not be able to produce deterministic results.
This is because contacts are not recorded and will be re-computed from scratch for
each reset scenario where actors come into contact, which cannot guarantee
deterministic behavior across different computations.
| 4,017 | Markdown | 53.297297 | 96 | 0.787155 |
Virlus/OmniIsaacGymEnvs/docs/training_with_camera.md | ## Reinforcement Learning with Vision in the Loop
Some reinforcement learning tasks can benefit from having image data in the pipeline by collecting sensor data from cameras to use as observations. However, high fidelity rendering can be expensive when scaled up towards thousands of environments during training.
Although Isaac Sim does not currently have the capability to scale towards thousands of environments, we are continually working on improvements to reach the goal. As a starting point, we are providing a simple example showcasing a proof-of-concept for reinforcement learning with vision in the loop.
### CartpoleCamera [cartpole_camera.py](../omniisaacgymenvs/tasks/cartpole_camera.py)
As an example showcasing the possiblity of reinforcmenet learning with vision in the loop, we provide a variation of the Cartpole task, which uses RGB image data as observations. This example
can be launched with command line argument `task=CartpoleCamera`.
Config files used for this task are:
- **Task config**: [CartpoleCamera.yaml](../omniisaacgymenvs/cfg/task/CartpoleCamera.yaml)
- **rl_games training config**: [CartpoleCameraPPO.yaml](../omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml)
### Working with Cameras
We have provided an individual app file `apps/omni.isaac.sim.python.gym.camera.kit`, designed specifically towards vision-based RL tasks. This app file provides necessary settings to enable multiple cameras to be rendered each frame. Additional settings are also applied to increase performance when rendering cameras across multiple environments.
In addition, the following settings can be added to the app file to increase performance at a cost of accuracy. By setting these flags to `false`, data collected from the cameras may have a 1 to 2 frame delay.
```
app.renderer.waitIdle=false
app.hydraEngine.waitIdle=false
```
We can also render in white-mode by adding the following line:
```
rtx.debugMaterialType=0
```
### Config Settings
In order for rendering to occur during training, tasks using camera rendering must have the `enable_cameras` flag set to `True` in the task config file. By default, the `omni.isaac.sim.python.gym.camera.kit` app file will be used automatically when `enable_cameras` is set to `True`. This flag is located in the task config file, under the `sim` section.
In addition, the `rendering_dt` parameter can be used to specify the rendering frequency desired. Similar to `dt` for physics simulation frequency, the `rendering_dt` specifies the amount of time in `s` between each rendering step. The `rendering_dt` should be larger or equal to the physics `dt`, and be a multiple of physics `dt`. Note that specifying the `controlFrequencyInv` flag will reduce the control frequency in terms of the physics simulation frequency.
For example, assume control frequency is 30hz, physics simulation frequency is 120 hz, and rendering frequency is 10hz. In the task config file, we can set `dt: 1/120`, `controlFrequencyInv: 4`, such that control is applied every 4 physics steps, and `rendering_dt: 1/10`. In this case, render data will only be updated once every 12 physics steps. Note that both `dt` and `rendering_dt` parameters are under the `sim` section of the config file, while `controlFrequencyInv` is under the `env` section.
### Environment Setup
To set up a task for vision-based RL, we will first need to add a camera to each environment in the scene and wrap it in a Replicator `render_product` to use the vectorized rendering API available in Replicator.
This can be done with the following code in `set_up_scene`:
```python
self.render_products = []
env_pos = self._env_pos.cpu()
for i in range(self._num_envs):
camera = self.rep.create.camera(
position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55))
render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height))
self.render_products.append(render_product)
```
Next, we need to initialize Replicator and the PytorchListener, which will be used to collect rendered data.
```python
# start replicator to capture image data
self.rep.orchestrator._orchestrator._is_started = True
# initialize pytorch writer for vectorized collection
self.pytorch_listener = self.PytorchListener()
self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter")
self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda")
self.pytorch_writer.attach(self.render_products)
```
Then, we can simply collect rendered data from each environment using a single API call:
```python
# retrieve RGB data from all render products
images = self.pytorch_listener.get_rgb_data()
``` | 4,728 | Markdown | 58.860759 | 502 | 0.777496 |
Virlus/OmniIsaacGymEnvs/docs/rl_examples.md | ## Reinforcement Learning Examples
We introduce the following reinforcement learning examples that are implemented using
Isaac Sim's RL framework.
Pre-trained checkpoints can be found on the Nucleus server. To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html).
*Note: All commands should be executed from `omniisaacgymenvs/omniisaacgymenvs`.*
- [Reinforcement Learning Examples](#reinforcement-learning-examples)
- [Cartpole cartpole.py](#cartpole-cartpolepy)
- [Ant ant.py](#ant-antpy)
- [Humanoid humanoid.py](#humanoid-humanoidpy)
- [Shadow Hand Object Manipulation shadow_hand.py](#shadow-hand-object-manipulation-shadow_handpy)
- [OpenAI Variant](#openai-variant)
- [LSTM Training Variant](#lstm-training-variant)
- [Allegro Hand Object Manipulation allegro_hand.py](#allegro-hand-object-manipulation-allegro_handpy)
- [ANYmal anymal.py](#anymal-anymalpy)
- [Anymal Rough Terrain anymal_terrain.py](#anymal-rough-terrain-anymal_terrainpy)
- [NASA Ingenuity Helicopter ingenuity.py](#nasa-ingenuity-helicopter-ingenuitypy)
- [Quadcopter quadcopter.py](#quadcopter-quadcopterpy)
- [Crazyflie crazyflie.py](#crazyflie-crazyfliepy)
- [Ball Balance ball_balance.py](#ball-balance-ball_balancepy)
- [Franka Cabinet franka_cabinet.py](#franka-cabinet-franka_cabinetpy)
- [Franka Deformable franka_deformable.py](#franka-deformablepy)
- [Factory: Fast Contact for Robotic Assembly](#factory-fast-contact-for-robotic-assembly)
### Cartpole [cartpole.py](../omniisaacgymenvs/tasks/cartpole.py)
Cartpole is a simple example that demonstrates getting and setting usage of DOF states using
`ArticulationView` from `omni.isaac.core`. The goal of this task is to move a cart horizontally
such that the pole, which is connected to the cart via a revolute joint, stays upright.
Joint positions and joint velocities are retrieved using `get_joint_positions` and
`get_joint_velocities` respectively, which are required in computing observations. Actions are
applied onto the cartpoles via `set_joint_efforts`. Cartpoles are reset by using `set_joint_positions`
and `set_joint_velocities`.
Training can be launched with command line argument `task=Cartpole`.
Training using the Warp backend can be launched with `task=Cartpole warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Cartpole test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/cartpole.pth`
Config files used for this task are:
- **Task config**: [Cartpole.yaml](../omniisaacgymenvs/cfg/task/Cartpole.yaml)
- **rl_games training config**: [CartpolePPO.yaml](../omniisaacgymenvs/cfg/train/CartpolePPO.yaml)
#### CartpoleCamera [cartpole_camera.py](../omniisaacgymenvs/tasks/cartpole_camera.py)
A variation of the Cartpole task showcases the usage of RGB image data as observations. This example
can be launched with command line argument `task=CartpoleCamera`. Note that to use camera data as
observations, `enable_cameras` must be set to `True` in the task config file. In addition, the example must be run with the `omni.isaac.sim.python.gym.camera.kit` app file provided under `apps`, which applies necessary settings to enable camera training. By default, this app file will be used automatically when `enable_cameras` is set to `True`.
Config files used for this task are:
- **Task config**: [CartpoleCamera.yaml](../omniisaacgymenvs/cfg/task/CartpoleCamera.yaml)
- **rl_games training config**: [CartpoleCameraPPO.yaml](../omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml)
For more details on training with camera data, please visit [here](training_with_camera.md).
<img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/>
### Ant [ant.py](../omniisaacgymenvs/tasks/ant.py)
Ant is an example of a simple locomotion task. The goal of this task is to train
quadruped robots (ants) to run forward as fast as possible. This example inherets
from [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py),
which is a shared class between this example and the humanoid example; this simplifies
implementations for both environemnts since they compute rewards, observations,
and resets in a similar manner. This framework allows us to easily switch between
robots used in the task.
The Ant task includes more examples of utilizing `ArticulationView` from `omni.isaac.core`, which
provides various functions to get and set both DOF states and articulation root states
in a tensorized fashion across all of the actors in the environment. `get_world_poses`,
`get_linear_velocities`, and `get_angular_velocities`, can be used to determine whether the
ants have been moving towards the desired direction and whether they have fallen or flipped over.
Actions are applied onto the ants via `set_joint_efforts`, which moves the ants by setting
torques to the DOFs.
Note that the previously used force sensors and `get_force_sensor_forces` API are now deprecated.
Force sensors can now be retrieved directly using `get_measured_joint_forces` from `ArticulationView`.
Training with PPO can be launched with command line argument `task=Ant`.
Training with SAC with command line arguments `task=AntSAC train=AntSAC`.
Training using the Warp backend can be launched with `task=Ant warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Ant test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth`
Config files used for this task are:
- **PPO task config**: [Ant.yaml](../omniisaacgymenvs/cfg/task/Ant.yaml)
- **rl_games PPO training config**: [AntPPO.yaml](../omniisaacgymenvs/cfg/train/AntPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/>
### Humanoid [humanoid.py](../omniisaacgymenvs/tasks/humanoid.py)
Humanoid is another environment that uses
[LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py). It is conceptually
very similar to the Ant example, where the goal for the humanoid is to run forward
as fast as possible.
Training can be launched with command line argument `task=Humanoid`.
Training with SAC with command line arguments `task=HumanoidSAC train=HumanoidSAC`.
Training using the Warp backend can be launched with `task=Humanoid warp=True`.
Running inference with pre-trained model can be launched with command line argument `task=Humanoid test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/humanoid.pth`
Config files used for this task are:
- **PPO task config**: [Humanoid.yaml](../omniisaacgymenvs/cfg/task/Humanoid.yaml)
- **rl_games PPO training config**: [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/>
### Shadow Hand Object Manipulation [shadow_hand.py](../omniisaacgymenvs/tasks/shadow_hand.py)
The Shadow Hand task is an example of a challenging dexterity manipulation task with complex contact
dynamics. It resembles OpenAI's [Learning Dexterity](https://openai.com/blog/learning-dexterity/)
project and [Robotics Shadow Hand](https://github.com/openai/gym/tree/v0.21.0/gym/envs/robotics)
training environments. The goal of this task is to orient the object in the robot hand to match
a random target orientation, which is visually displayed by a goal object in the scene.
This example inherets from [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py),
which is a shared class between this example and the Allegro Hand example. The idea of
this shared [InHandManipulationTask](../omniisaacgymenvs/tasks/shared/in_hand_manipulation.py) class
is similar to that of the [LocomotionTask](../omniisaacgymenvs/tasks/shared/locomotion.py);
since the Shadow Hand example and the Allegro Hand example only differ by the robot hand used
in the task, using this shared class simplifies implementation across the two.
In this example, motion of the hand is controlled using position targets with `set_joint_position_targets`.
The object and the goal object are reset using `set_world_poses`; their states are retrieved via
`get_world_poses` for computing observations. It is worth noting that the Shadow Hand model in
this example also demonstrates the use of tendons, which are imported using the `omni.isaac.mjcf` extension.
Training can be launched with command line argument `task=ShadowHand`.
Training with Domain Randomization can be launched with command line argument `task.domain_randomization.randomize=True`.
For best training results with DR, use `num_envs=16384`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand.pth`
Config files used for this task are:
- **Task config**: [ShadowHand.yaml](../omniisaacgymenvs/cfg/task/ShadowHand.yaml)
- **rl_games training config**: [ShadowHandPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml)
#### OpenAI Variant
In addition to the basic version of this task, there is an additional variant matching OpenAI's
[Learning Dexterity](https://openai.com/blog/learning-dexterity/) project. This variant uses the **openai**
observations in the policy network, but asymmetric observations of the **full_state** in the value network.
This can be launched with command line argument `task=ShadowHandOpenAI_FF`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHandOpenAI_FF test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand_openai_ff.pth`
Config files used for this are:
- **Task config**: [ShadowHandOpenAI_FF.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml)
- **rl_games training config**: [ShadowHandOpenAI_FFPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml).
#### LSTM Training Variant
This variant uses LSTM policy and value networks instead of feed forward networks, and also asymmetric
LSTM critic designed for the OpenAI variant of the task. This can be launched with command line argument
`task=ShadowHandOpenAI_LSTM`.
Running inference with pre-trained model can be launched with command line argument `task=ShadowHandOpenAI_LSTM test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/shadow_hand_openai_lstm.pth`
Config files used for this are:
- **Task config**: [ShadowHandOpenAI_LSTM.yaml](../omniisaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml)
- **rl_games training config**: [ShadowHandOpenAI_LSTMPPO.yaml](../omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml).
<img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/>
### Allegro Hand Object Manipulation [allegro_hand.py](../omniisaacgymenvs/tasks/allegro_hand.py)
This example performs the same object orientation task as the Shadow Hand example,
but using the Allegro hand instead of the Shadow hand.
Training can be launched with command line argument `task=AllegroHand`.
Running inference with pre-trained model can be launched with command line argument `task=AllegroHand test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/allegro_hand.pth`
Config files used for this task are:
- **Task config**: [AllegroHand.yaml](../omniisaacgymenvs/cfg/task/Allegro.yaml)
- **rl_games training config**: [AllegroHandPPO.yaml](../omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/>
### ANYmal [anymal.py](../omniisaacgymenvs/tasks/anymal.py)
This example trains a model of the ANYmal quadruped robot from ANYbotics
to follow randomly chosen x, y, and yaw target velocities.
Training can be launched with command line argument `task=Anymal`.
Running inference with pre-trained model can be launched with command line argument `task=Anymal test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal.pth`
Config files used for this task are:
- **Task config**: [Anymal.yaml](../omniisaacgymenvs/cfg/task/Anymal.yaml)
- **rl_games training config**: [AnymalPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/>
### Anymal Rough Terrain [anymal_terrain.py](../omniisaacgymenvs/tasks/anymal_terrain.py)
A more complex version of the above Anymal environment that supports
traversing various forms of rough terrain.
Training can be launched with command line argument `task=AnymalTerrain`.
Running inference with pre-trained model can be launched with command line argument `task=AnymalTerrain test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth`
- **Task config**: [AnymalTerrain.yaml](../omniisaacgymenvs/cfg/task/AnymalTerrain.yaml)
- **rl_games training config**: [AnymalTerrainPPO.yaml](../omniisaacgymenvs/cfg/train/AnymalTerrainPPO.yaml)
**Note** during test time use the last weights generated, rather than the usual best weights.
Due to curriculum training, the reward goes down as the task gets more challenging, so the best weights
do not typically correspond to the best outcome.
**Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work:
```
@misc{rudin2021learning,
title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning},
author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter},
year={2021},
journal = {arXiv preprint arXiv:2109.11978}
```
**Note** The OmniIsaacGymEnvs implementation slightly differs from the implementation used in the paper above, which also
uses a different RL library and PPO implementation. The original implementation is made available [here](https://github.com/leggedrobotics/legged_gym). Results reported in the Isaac Gym technical paper are based on that repository, not this one.
<img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="300" height="150"/>
### NASA Ingenuity Helicopter [ingenuity.py](../omniisaacgymenvs/tasks/ingenuity.py)
This example trains a simplified model of NASA's Ingenuity helicopter to navigate to a moving target.
It showcases the use of velocity tensors and applying force vectors to rigid bodies.
Note that we are applying force directly to the chassis, rather than simulating aerodynamics.
This example also demonstrates using different values for gravitational forces.
Ingenuity Helicopter visual 3D Model courtesy of NASA: https://mars.nasa.gov/resources/25043/mars-ingenuity-helicopter-3d-model/.
Training can be launched with command line argument `task=Ingenuity`.
Running inference with pre-trained model can be launched with command line argument `task=Ingenuity test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ingenuity.pth`
Config files used for this task are:
- **Task config**: [Ingenuity.yaml](../omniisaacgymenvs/cfg/task/Ingenuity.yaml)
- **rl_games training config**: [IngenuityPPO.yaml](../omniisaacgymenvs/cfg/train/IngenuityPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/>
### Quadcopter [quadcopter.py](../omniisaacgymenvs/tasks/quadcopter.py)
This example trains a very simple quadcopter model to reach and hover near a fixed position.
Lift is achieved by applying thrust forces to the "rotor" bodies, which are modeled as flat cylinders.
In addition to thrust, the pitch and roll of each rotor is controlled using DOF position targets.
Training can be launched with command line argument `task=Quadcopter`.
Running inference with pre-trained model can be launched with command line argument `task=Quadcopter test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/quadcopter.pth`
Config files used for this task are:
- **Task config**: [Quadcopter.yaml](../omniisaacgymenvs/cfg/task/Quadcopter.yaml)
- **rl_games training config**: [QuadcopterPPO.yaml](../omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/>
### Crazyflie [crazyflie.py](../omniisaacgymenvs/tasks/crazyflie.py)
This example trains the Crazyflie drone model to hover near a fixed position. It is achieved by applying thrust forces to the four rotors.
Training can be launched with command line argument `task=Crazyflie`.
Running inference with pre-trained model can be launched with command line argument `task=Crazyflie test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/crazyflie.pth`
Config files used for this task are:
- **Task config**: [Crazyflie.yaml](../omniisaacgymenvs/cfg/task/Crazyflie.yaml)
- **rl_games training config**: [CrazyfliePPO.yaml](../omniisaacgymenvs/cfg/train/CrazyfliePPO.yaml)
<img src="https://user-images.githubusercontent.com/6352136/185715165-b430a0c7-948b-4dce-b3bb-7832be714c37.gif" width="300" height="150"/>
### Ball Balance [ball_balance.py](../omniisaacgymenvs/tasks/ball_balance.py)
This example trains balancing tables to balance a ball on the table top.
This is a great example to showcase the use of force and torque sensors, as well as DOF states for the table and root states for the ball.
In this example, the three-legged table has a force sensor attached to each leg.
We use the force sensor APIs to collect force and torque data on the legs, which guide position target outputs produced by the policy.
Training can be launched with command line argument `task=BallBalance`.
Running inference with pre-trained model can be launched with command line argument `task=BallBalance test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ball_balance.pth`
Config files used for this task are:
- **Task config**: [BallBalance.yaml](../omniisaacgymenvs/cfg/task/BallBalance.yaml)
- **rl_games training config**: [BallBalancePPO.yaml](../omniisaacgymenvs/cfg/train/BallBalancePPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/>
### Franka Cabinet [franka_cabinet.py](../omniisaacgymenvs/tasks/franka_cabinet.py)
This Franka example demonstrates interaction between Franka arm and cabinet, as well as setting states of objects inside the drawer.
It also showcases control of the Franka arm using position targets.
In this example, we use DOF state tensors to retrieve the state of the Franka arm, as well as the state of the drawer on the cabinet.
Actions are applied as position targets to the Franka arm DOFs.
Training can be launched with command line argument `task=FrankaCabinet`.
Running inference with pre-trained model can be launched with command line argument `task=FrankaCabinet test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/franka_cabinet.pth`
Config files used for this task are:
- **Task config**: [FrankaCabinet.yaml](../omniisaacgymenvs/cfg/task/FrankaCabinet.yaml)
- **rl_games training config**: [FrankaCabinetPPO.yaml](../omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml)
<img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/>
### Franka Deformable [franka_deformable.py](../omniisaacgymenvs/tasks/franka_deformable.py)
This Franka example demonstrates interaction between Franka arm and a deformable tube. It demonstrates the manipulation of deformable objects, using nodal positions and velocities of the simulation mesh as observations.
Training can be launched with command line argument `task=FrankaDeformable`.
Running inference with pre-trained model can be launched with command line argument `task=FrankaDeformable test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/franka_deformable.pth`
Config files used for this task are:
- **Task config**: [FrankaDeformable.yaml](../omniisaacgymenvs/cfg/task/FrankaDeformable.yaml)
- **rl_games training config**: [FrankaCabinetFrankaDeformable.yaml](../omniisaacgymenvs/cfg/train/FrankaDeformablePPO.yaml)
### Factory: Fast Contact for Robotic Assembly
We provide a set of Factory example tasks, [**FactoryTaskNutBoltPick**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py), [**FactoryTaskNutBoltPlace**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py), and [**FactoryTaskNutBoltScrew**](../omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py),
`FactoryTaskNutBoltPick` can be executed with `python train.py task=FactoryTaskNutBoltPick`. This task trains policy for the Pick task, a simplified version of the corresponding task in the Factory paper. The policy may take ~1 hour to achieve high success rates on a modern GPU.
- The general configuration file for the above task is [FactoryTaskNutBoltPick.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltPickPPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltPick test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_pick.pth`
`FactoryTaskNutBoltPlace` can be executed with `python train.py task=FactoryTaskNutBoltPlace`. This task trains policy for the Place task.
- The general configuration file for the above task is [FactoryTaskNutBoltPlace.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltPlacePPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltPlace test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_place.pth`
`FactoryTaskNutBoltScrew` can be executed with `python train.py task=FactoryTaskNutBoltScrew`. This task trains policy for the Screw task.
- The general configuration file for the above task is [FactoryTaskNutBoltScrew.yaml](../omniisaacgymenvs/cfg/task/FactoryTaskNutBoltScrew.yaml).
- The training configuration file for the above task is [FactoryTaskNutBoltScrewPPO.yaml](../omniisaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml).
Running inference with pre-trained model can be launched with command line argument `task=FactoryTaskNutBoltScrew test=True checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/factory_task_nut_bolt_screw.pth`
If you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
Also note that our original formulations of SDF collisions and contact reduction were developed by [Macklin, et al.](https://dl.acm.org/doi/abs/10.1145/3384538) and [Moravanszky and Terdiman](https://scholar.google.com/scholar?q=Game+Programming+Gems+4%2C+chapter+Fast+Contact+Reduction+for+Dynamics+Simulation), respectively.
<img src="https://user-images.githubusercontent.com/6352136/205978286-fa2ae714-a3cb-4acd-9f5f-a467338a8bb3.gif"/>
| 25,126 | Markdown | 63.927648 | 347 | 0.792725 |
Virlus/OmniIsaacGymEnvs/docs/release_notes.md | Release Notes
=============
2023.1.0b - November 02, 2023
-----------------------------
Changes
-------
- Update docker scripts to Isaac Sim docker image 2023.1.0-hotfix.1
- Use omniisaacgymenvs module root for app file parsing
- Update FrankaDeformable physics dt for better training stability
Fixes
-----
- Fix CartpoleCamera num_observations value
- Fix missing import in startup randomization for mass and density
2023.1.0a - October 20, 2023
----------------------------
Fixes
-----
- Fix extension loading error in camera app file
2023.1.0 - October 18, 2023
---------------------------
Additions
---------
- Add support for Warp backend task implementation
- Add Warp-based RL examples: Cartpole, Ant, Humanoid
- Add new Factory environments for place and screw: FactoryTaskNutBoltPlace and FactoryTaskNutBoltScrew
- Add new camera-based Cartpole example: CartpoleCamera
- Add new deformable environment showing Franka picking up a deformable tube: FrankaDeformable
- Add support for running OIGE as an extension in Isaac Sim
- Add options to filter collisions between environments and specify global collision filter paths to `RLTask.set_to_scene()`
- Add multinode training support
- Add dockerfile with OIGE
- Add option to select kit app file from command line argument `kit_app`
- Add `rendering_dt` parameter to the task config file for setting rendering dt. Defaults to the same value as the physics dt.
Changes
-------
- `use_flatcache` flag has been renamed to `use_fabric`
- Update hydra-core version to 1.3.2, omegaconf version to 2.3.0
- Update rlgames to version 1.6.1.
- The `get_force_sensor_forces` API for articulations is now deprecated and replaced with `get_measured_joint_forces`
- Remove unnecessary cloning of buffers in VecEnv classes
- Only enable omni.replicator.isaac when domain randomization or cameras are enabled
- The multi-threaded launch script `rlgames_train_mt.py` has been re-designed to support the extension workflow. This script can no longer be used to launch a training run from python. Please use `rlgames_train.py` instead.
- Restructures for environments to support the new extension-based workflow
- Add async workflow to factory pick environment to support extension-based workflow
- Update docker scripts with cache directories
Fixes
-----
- Fix errors related to setting velocities to kinematic markers in Ingenuity and Quadcopter environments
- Fix contact-related issues with quadruped assets
- Fix errors in physics APIs when returning empty tensors
- Fix orientation correctness issues when using some assets with omni.isaac.core. Additional orientations applied to accommodate for the error are no longer required (i.e. ShadowHand)
- Updated the deprecated config name `seq_len` used with RNN networks to `seq_length`
2022.2.1 - March 16, 2023
-------------------------
Additions
---------
- Add FactoryTaskNutBoltPick example
- Add Ant and Humanoid SAC training examples
- Add multi-GPU support for training
- Add utility scripts for launching Isaac Sim docker with OIGE
- Add support for livestream through the Omniverse Streaming Client
Changes
-------
- Change rigid body fixed_base option to make_kinematic, avoiding creation of unnecessary articulations
- Update ShadowHand, Ingenuity, Quadcopter and Crazyflie marker objects to use kinematics
- Update ShadowHand GPU buffer parameters
- Disable PyTorch nvFuser for better performance
- Enable viewport and replicator extensions dynamically to maintain order of extension startup
- Separate app files for headless environments with rendering (requires Isaac Sim update)
- Update rl-games to v1.6.0
Fixes
-----
- Fix material property randomization at run-time, including friction and restitution (requires Isaac Sim update)
- Fix a bug in contact reporting API where incorrect values were being reported (requires Isaac Sim update)
- Enable render flag in Isaac Sim when enable_cameras is set to True
- Add root pose and velocity reset to BallBalance environment
2.0.0 - December 15, 2022
-------------------------
Additions
---------
- Update to Viewport 2.0
- Allow for runtime mass randomization on GPU pipeline
- Add runtime mass randomization to ShadowHand environments
- Introduce `disable_contact_processing` simulation parameter for faster contact processing
- Use physics replication for cloning by default for faster load time
Changes
-------
- Update AnymalTerrain environment to use contact forces
- Update Quadcopter example to apply local forces
- Update training parameters for ShadowHandOpenAI_FF environment
- Rename rlgames_play.py to rlgames_demo.py
Fixes
-----
- Remove fix_base option from articulation configs
- Fix in_hand_manipulation random joint position sampling on reset
- Fix mass and density randomization in MT training script
- Fix actions/observations noise randomization in MT training script
- Fix random seed when domain randomization is enabled
- Check whether simulation is running before executing pre_physics_step logic
1.1.0 - August 22, 2022
-----------------------
Additions
---------
- Additional examples: Anymal, AnymalTerrain, BallBalance, Crazyflie, FrankaCabinet, Ingenuity, Quadcopter
- Add OpenAI variantions for Feed-Forward and LSTM networks for ShadowHand
- Add domain randomization framework `using omni.replicator.isaac`
- Add AnymalTerrain interactable demo
- Automatically disable `omni.kit.window.viewport` and `omni.physx.flatcache` extensions in headless mode to improve start-up load time
- Introduce `reset_xform_properties` flag for initializing Views of cloned environments to reduce load time
- Add WandB support
- Update RL-Games version to 1.5.2
Fixes
-----
- Correctly sets simulation device for GPU simulation
- Fix omni.client import order
- Fix episode length reset condition for ShadowHand and AllegroHand
1.0.0 - June 03, 2022
----------------------
- Initial release for RL examples with Isaac Sim
- Examples provided: AllegroHand, Ant, Cartpole, Humanoid, ShadowHand | 5,986 | Markdown | 39.727891 | 223 | 0.765119 |
Virlus/OmniIsaacGymEnvs/docs/CHANGELOG.md | # Changelog
## [0.0.0] - 2023-07-13
### Added
- UI for launching RL trasks
| 76 | Markdown | 11.833331 | 28 | 0.618421 |
Virlus/OmniIsaacGymEnvs/docs/transfering_policies_from_isaac_gym.md | ## Transfering Policies from Isaac Gym Preview Releases
This section delineates some of the differences between the standalone
[Isaac Gym Preview Releases](https://developer.nvidia.com/isaac-gym) and
Isaac Sim reinforcement learning extensions, in hopes of facilitating the
process of transferring policies trained in the standalone preview releases
to Isaac Sim.
### Isaac Sim RL Extensions
Unlike the monolithic standalone Isaac Gym Preview Releases, Omniverse is
a highly modular system, with functionality split between various [Extensions](https://docs.omniverse.nvidia.com/extensions/latest/index.html).
The APIs used by typical robotics RL systems are split between a handful of
extensions in Isaac Sim. These include `omni.isaac.core`, which provides
tensorized access to physics simulation state as well as a task management
framework, the `omni.isaac.cloner` extension for creating many copies of
your environments, and the `omni.isaac.gym` extension for interfacing with
external RL training libraries.
For naming clarity, we'll refer collectively to the extensions used for RL
within Isaac Sim as the **Isaac Sim RL extensions**, in contrast with the
older **Isaac Gym Preview Releases**.
### Quaternion Convention
The Isaac Sim RL extensions use various classes and methods in `omni.isaac.core`,
which adopts `wxyz` as the quaternion convention. However, the quaternion
convention used in Isaac Gym Preview Releases is `xyzw`. Therefore, if a policy
trained in one of the Isaac Gym Preview Releases takes in quaternions as part
of its observations, remember to switch all quaternions to use the `xyzw` convention
in the observation buffer `self.obs_buf`. Similarly, please ensure all quaternions
are in `wxyz` before passing them in any of the utility functions in `omni.isaac.core`.
### Assets
Isaac Sim provides [URDF](https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html)
and [MJCF](https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_mjcf.html) importers for translating URDF and MJCF assets into USD format.
Any robot or object assets must be in .usd, .usda, or .usdc format for Isaac Sim and Omniverse.
For more details on working with USD, please see https://docs.omniverse.nvidia.com/isaacsim/latest/reference_glossary.html#usd.
Importer tools are also available for other common geometry file formats, such as .obj, .fbx, and more.
Please see [Asset Importer](https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-importer.html) for more details.
### Joint Order
Isaac Sim's `ArticulationView` in `omni.isaac.core` assumes a breadth-first
ordering for the joints in a given kinematic tree. Specifically, for the following
kinematic tree, the method `ArticulationView.get_joint_positions` returns a
tensor of shape `(number of articulations in the view, number of joints in the articulation)`.
Along the second dimension of this tensor, the values represent the articulation's joint positions
in the following order: `[Joint 1, Joint 2, Joint 4, Joint 3, Joint 5]`. On the other hand,
the Isaac Gym Preview Releases assume a depth-first ordering for the joints in the kinematic
tree; In the example below, the joint orders would be the following: `[Joint 1, Joint 2, Joint 3, Joint 4, Joint 5]`.
<img src="./media/KinematicTree.png" height="300"/>
With this in mind, it is important to change the joint order to depth-first in
the observation buffer before feeding it into an existing policy trained in one of the
Isaac Gym Preview Releases. Similarly, you would also need to change the joint order
in the output (the action buffer) of the Isaac Gym Preview Release trained policy
to breadth-first before applying joint actions to articulations via methods in `ArticulationView`.
### Physics Parameters
One factor that could dictate the success of policy transfer from Isaac Gym Preview
Releases to Isaac Sim is to ensure the physics parameters used in both simulations are
identical or very similar. In general, the `sim` parameters specified in the
task configuration `yaml` file overwrite the corresponding parameters in the USD asset.
However, there are additional parameters in the USD asset that are not included
in the task configuration `yaml` file. These additional parameters may sometimes
impact the performance of Isaac Gym Preview Release trained policies and hence need
modifications in the USD asset itself to match the values set in Isaac Gym Preview Releases.
For instance, the following parameters in the `RigidBodyAPI` could be modified in the
USD asset to yield better policy transfer performance:
| RigidBodyAPI Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases |
|:----------------------:|:--------------------------:|:--------------------------:|
| Linear Damping | 0.00 | 0.00 |
| Angular Damping | 0.05 | 0.00 |
| Max Linear Velocity | inf | 1000 |
| Max Angular Velocity | 5729.58008 (deg/s) | 64 (rad/s) |
| Max Contact Impulse | inf | 1e32 |
<img src="./media/RigidBodyAPI.png" width="500"/>
Parameters in the `JointAPI` as well as the `DriveAPI` could be altered as well. Note
that the Isaac Sim UI assumes the unit of angle to be degrees. It is particularly
worth noting that the `Damping` and `Stiffness` paramters in the `DriveAPI` have the unit
of `1/deg` in the Isaac Sim UI but `1/rad` in Isaac Gym Preview Releases.
| Joint Parameter | Default Value in Isaac Sim | Default Value in Isaac Gym Preview Releases |
|:----------------------:|:--------------------------:|:--------------------------:|
| Maximum Joint Velocity | 1000000.0 (deg) | 100.0 (rad) |
<img src="./media/JointAPI.png" width="500"/>
### Differences in APIs
APIs for accessing physics states in Isaac Sim require the creation of an ArticulationView or RigidPrimView
object. Multiple view objects can be initialized for different articulations or bodies in the scene by defining
a regex expression that matches the paths of the desired objects. This approach eliminates the need of retrieving
body handles to slice states for specific bodies in the scene.
We have also removed `acquire` and `refresh` APIs in Isaac Sim. Physics states can be directly applied or retrieved
by using `set`/`get` APIs defined for the views.
New APIs provided in Isaac Sim no longer require explicit wrapping and un-wrapping of underlying buffers.
APIs can now work with tensors directly for reading and writing data. Most APIs in Isaac Sim also provide
the option to specify an `indices` parameter, which can be used when reading or writing data for a subset
of environments. Note that when setting states with the `indices` parameter, the shape of the states buffer
should match with the dimension of the `indices` list.
Note some naming differences between APIs in Isaac Gym Preview Release and Isaac Sim. Most `dof` related APIs have been
named to `joint` in Isaac Sim. `root_states` is now separated into different APIs for `world_poses` and `velocities`.
Similary, `dof_states` are retrieved individually in Isaac Sim as `joint_positions` and `joint_velocities`.
APIs in Isaac Sim also no longer follow the explicit `_tensors` or `_tensor_indexed` suffixes in naming.
Indexed versions of APIs now happen implicitly through the optional `indices` parameter.
### Task Configuration Files
There are a few modifications that need to be made to an existing Isaac Gym Preview Release
task `yaml` file in order for it to be compatible with the Isaac Sim RL extensions.
#### Frequencies of Physics Simulation and RL Policy
The way in which physics simulation frequency and RL policy frequency are specified is different
between Isaac Gym Preview Releases and Isaac Sim, dictated by the following three
parameters: `dt`, `substeps`, and `controlFrequencyInv`.
- `dt`: The simulation time difference between each simulation step.
- `substeps`: The number of physics steps within one simulation step. *i.e.* if `dt: 1/60`
and `substeps: 4`, physics is simulated at 240 hz.
- `controlFrequencyInv`: The control decimation of the RL policy, which is the number of
simulation steps between RL actions. *i.e.* if `dt: 1/60` and `controlFrequencyInv: 2`,
RL policy is running at 30 hz.
In Isaac Gym Preview Releases, all three of the above parameters are used to specify
the frequencies of physics simulation and RL policy. However, Isaac Sim only uses `controlFrequencyInv` and `dt` as `substeps` is always fixed at `1`. Note that despite
only using two parameters, Isaac Sim can still achieve the same substeps definition
as Isaac Gym. For example, if in an Isaac Gym Preview Release policy, we set `substeps: 2`,
`dt: 1/60` and `controlFrequencyInv: 1`, we can achieve the equivalent in Isaac Sim
by setting `controlFrequencyInv: 2` and `dt: 1/120`.
In the Isaac Sim RL extensions, `dt` is specified in the task configuration `yaml` file
under `sim`, whereas `controlFrequencyInv` is a parameter under `env`.
#### Physx Parameters
Parameters under `physx` in the task configuration `yaml` file remain mostly unchanged.
In Isaac Gym Preview Releases, `use_gpu` is frequently set to
`${contains:"cuda",${....sim_device}}`. For Isaac Sim, please ensure this is changed
to `${eq:${....sim_device},"gpu"}`.
In Isaac Gym Preview Releases, GPU buffer sizes are specified using the following two parameters:
`default_buffer_size_multiplier` and `max_gpu_contact_pairs`. With the Isaac Sim RL extensions,
these two parameters are no longer used; instead, the various GPU buffer sizes can be
set explicitly.
For instance, in the [Humanoid task configuration file](../omniisaacgymenvs/cfg/task/Humanoid.yaml),
GPU buffer sizes are specified as follows:
```yaml
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
```
Please refer to the [Troubleshooting](./troubleshoot.md#simulation) documentation should
you encounter errors related to GPU buffer sizes.
#### Articulation Parameters
The articulation parameters of each actor can now be individually specified tn the Isaac Sim
task configuration `yaml` file. The following is an example template for setting these parameters:
```yaml
ARTICULATION_NAME:
# -1 to use default values
override_usd_defaults: False
fixed_base: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0
```
These articulation parameters can be parsed using the `parse_actor_config` method in the
[SimConfig](../omniisaacgymenvs/utils/config_utils/sim_config.py) class, which can then be applied
to a prim in simulation via the `apply_articulation_settings` method. A concrete example of this
is the following code snippet from the [HumanoidTask](../omniisaacgymenvs/tasks/humanoid.py#L75):
```python
self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid"))
```
#### Additional Simulation Parameters
- `use_fabric`: Setting this paramter to `True` enables [PhysX Fabric](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#flatcache), which offers a significant increase in simulation speed. However, this parameter must
be set to `False` if soft-body simulation is required because `PhysX Fabric` curently only supports rigid-body simulation.
- `enable_scene_query_support`: Setting this paramter to `True` allows the user to interact with prims in the scene. Keeping this setting to `False` during
training improves simulation speed. Note that this parameter is always set to `True` if in test/inference mode to enable user interaction with trained models.
### Training Configuration Files
The Omniverse Isaac Gym RL Environments are trained using a third-party highly-optimized RL library,
[rl_games](https://github.com/Denys88/rl_games), which is also used to train the Isaac Gym Preview Release examples
in [IsaacGymEnvs](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs). Therefore, the rl_games training
configuration `yaml` files in Isaac Sim are compatible with those from IsaacGymEnvs. However, please
add the following lines under `config` in the training configuration `yaml` files (*i.e.*
line 41-42 in [HumanoidPPO.yaml](../omniisaacgymenvs/cfg/train/HumanoidPPO.yaml#L41)) to ensure
RL training runs on the intended device.
```yaml
device: ${....rl_device}
device_name: ${....rl_device}
``` | 13,250 | Markdown | 55.387234 | 252 | 0.749585 |
Virlus/OmniIsaacGymEnvs/docs/framework.md | ## RL Framework
### Overview
Our RL examples are built on top of Isaac Sim's RL framework provided in `omni.isaac.gym`. Tasks are implemented following `omni.isaac.core`'s Task structure. PPO training is performed using the [rl_games](https://github.com/Denys88/rl_games) library, but we provide the flexibility to use other RL libraries for training.
For a list of examples provided, refer to the
[RL List of Examples](rl_examples.md)
### Class Definition
The RL ecosystem can be viewed as three main pieces: the Task, the RL policy, and the Environment wrapper that provides an interface for communication between the task and the RL policy.
#### Task
The Task class is where main task logic is implemented, such as computing observations and rewards. This is where we can collect states of actors in the scene and apply controls or actions to our actors.
For convenience, we provide a base Task class, `RLTask`, which inherits from the `BaseTask` class in `omni.isaac.core`. This class is responsible for dealing with common configuration parsing, buffer initialization, and environment creation. Note that some config parameters and buffers in this class are specific to the rl_games library, and it is not necessary to inherit new tasks from `RLTask`.
A few key methods in `RLTask` include:
* `__init__(self, name: str, env: VecEnvBase, offset: np.ndarray = None)` - Parses config values common to all tasks and initializes action/observation spaces if not defined in the child class. Defines a GridCloner by default and creates a base USD scope for holding all environment prims. Can be called from child class.
* `set_up_scene(self, scene: Scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True)` - Adds ground plane and creates clones of environment 0 based on values specifid in config. Can be called from child class `set_up_scene()`.
* `pre_physics_step(self, actions: torch.Tensor)` - Takes in actions buffer from RL policy. Can be overriden by child class to process actions.
* `post_physics_step(self)` - Controls flow of RL data processing by triggering APIs to compute observations, retrieve states, compute rewards, resets, and extras. Will return observation, reward, reset, and extras buffers.
#### Environment Wrappers
As part of the RL framework in Isaac Sim, we have introduced environment wrapper classes in `omni.isaac.gym` for RL policies to communicate with simulation in Isaac Sim. This class provides a vectorized interface for common RL APIs used by `gym.Env` and can be easily extended towards RL libraries that require additional APIs. We show an example of this extension process in this repository, where we extend `VecEnvBase` as provided in `omni.isaac.gym` to include additional APIs required by the rl_games library.
Commonly used APIs provided by the base wrapper class `VecEnvBase` include:
* `render(self, mode: str = "human")` - renders the current frame
* `close(self)` - closes the simulator
* `seed(self, seed: int = -1)` - sets a seed. Use `-1` for a random seed.
* `step(self, actions: Union[np.ndarray, torch.Tensor])` - triggers task `pre_physics_step` with actions, steps simulation and renderer, computes observations, rewards, dones, and returns state buffers
* `reset(self)` - triggers task `reset()`, steps simulation, and re-computes observations
##### Multi-Threaded Environment Wrapper for Extension Workflows
`VecEnvBase` is a simple interface that’s designed to provide commonly used `gym.Env` APIs required by RL libraries. Users can create an instance of this class, attach your task to the interface, and provide your wrapper instance to the RL policy. Since the RL algorithm maintains the main loop of execution, interaction with the UI and environments in the scene can be limited and may interfere with the training loop.
We also provide another environment wrapper class called `VecEnvMT`, which is designed to isolate the RL policy in a new thread, separate from the main simulation and rendering thread. This class provides the same set of interface as `VecEnvBase`, but also provides threaded queues for sending and receiving actions and states between the RL policy and the task. In order to use this wrapper interface, users have to implement a `TrainerMT` class, which should implement a `run()` method that initiates the RL loop on a new thread. We show an example of this in OmniIsaacGymEnvs under `omniisaacgymenvs/utils/rlgames/rlgames_train_mt.py`. The setup for using `VecEnvMT` is more involved compared to the single-threaded `VecEnvBase` interface, but will allow users to have more control over starting and stopping the training loop through interaction with the UI.
Note that `VecEnvMT` has a timeout variable, which defaults to 90 seconds. If either the RL thread waiting for physics state exceeds the timeout amount or the simulation thread waiting for RL actions exceeds the timeout amount, the threaded queues will throw an exception and terminate training. For larger scenes that require longer simulation or training time, try increasing the timeout variable in `VecEnvMT` to prevent unnecessary timeouts. This can be done by passing in a `timeout` argument when calling `VecEnvMT.initialize()`.
This wrapper is currently only supported with the [extension workflow](extension_workflow.md).
### Creating New Examples
For simplicity, we will focus on using the single-threaded `VecEnvBase` interface in this tutorial.
To run any example, first make sure an instance of `VecEnvBase` or descendant of `VecEnvBase` is initialized.
This will be required as an argumet to our new Task. For example:
``` python
env = VecEnvBase(headless=False)
```
The headless parameter indicates whether a viewer should be created for visualizing results.
Then, create our task class, extending it from `RLTask`:
```python
class MyNewTask(RLTask):
def __init__(
self,
name: str, # name of the Task
sim_config: SimConfig, # SimConfig instance for parsing cfg
env: VecEnvBase, # env instance of VecEnvBase or inherited class
offset=None # transform offset in World
) -> None:
# parse configurations, set task-specific members
...
self._num_observations = 4
self._num_actions = 1
# call parent class’s __init__
RLTask.__init__(self, name, env)
```
The `__init__` method should take 4 arguments:
* `name`: a string for the name of the task (required by BaseTask)
* `sim_config`: an instance of `SimConfig` used for config parsing, can be `None`. This object is created in `omniisaacgymenvs/utils/task_utils.py`.
* `env`: an instance of `VecEnvBase` or an inherited class of `VecEnvBase`
* `offset`: any offset required to place the `Task` in `World` (required by `BaseTask`)
In the `__init__` method of `MyNewTask`, we can populate any task-specific parameters, such as dimension of observations and actions, and retrieve data from config dictionaries. Make sure to make a call to `RLTask`’s `__init__` at the end of the method to perform additional data initialization.
Next, we can implement the methods required by the RL framework. These methods follow APIs defined in `omni.isaac.core` `BaseTask` class. Below is an example of a simple implementation for each method.
```python
def set_up_scene(self, scene: Scene) -> None:
# implement environment setup here
add_prim_to_stage(my_robot) # add a robot actor to the stage
super().set_up_scene(scene) # pass scene to parent class - this method in RLTask also uses GridCloner to clone the robot and adds a ground plane if desired
self._my_robots = ArticulationView(...) # create a view of robots
scene.add(self._my_robots) # add view to scene for initialization
def post_reset(self):
# implement any logic required for simulation on-start here
pass
def pre_physics_step(self, actions: torch.Tensor) -> None:
# implement logic to be performed before physics steps
self.perform_reset()
self.apply_action(actions)
def get_observations(self) -> dict:
# implement logic to retrieve observation states
self.obs_buf = self.compute_observations()
def calculate_metrics(self) -> None:
# implement logic to compute rewards
self.rew_buf = self.compute_rewards()
def is_done(self) -> None:
# implement logic to update dones/reset buffer
self.reset_buf = self.compute_resets()
```
To launch the new example from one of our training scripts, add `MyNewTask` to `omniisaacgymenvs/utils/task_util.py`. In `initialize_task()`, add an import to the `MyNewTask` class and add an instance to the `task_map` dictionary to register it into the command line parsing.
To use the Hydra config parsing system, also add a task and train config files into `omniisaacgymenvs/cfg`. The config files should be named `cfg/task/MyNewTask.yaml` and `cfg/train/MyNewTaskPPO.yaml`.
Finally, we can launch `MyNewTask` with:
```bash
PYTHON_PATH random_policy.py task=MyNewTask
```
### Using a New RL Library
In this repository, we provide an example of extending Isaac Sim's environment wrapper classes to work with the rl_games library, which can be found at `omniisaacgymenvs/envs/vec_env_rlgames.py` and `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`.
The first script, `omniisaacgymenvs/envs/vec_env_rlgames.py`, extends from `VecEnvBase`.
```python
from omni.isaac.gym.vec_env import VecEnvBase
class VecEnvRLGames(VecEnvBase):
```
One of the features in rl_games is the support for asymmetrical actor-critic policies, which requires a `states` buffer in addition to the `observations` buffer. Thus, we have overriden a few of the class in `VecEnvBase` to incorporate this requirement.
```python
def set_task(
self, task, backend="numpy", sim_params=None, init_sim=True
) -> None:
super().set_task(task, backend, sim_params, init_sim) # class VecEnvBase's set_task to register task to the environment instance
# special variables required by rl_games
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
# we clamp the actions so that values are within a defined range
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
# pass actions buffer to task for processing
self._task.pre_physics_step(actions)
# allow users to specify the control frequency through config
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
# compute new buffers
self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step()
self._states = self._task.get_states() # special buffer required by rl_games
# return buffers in format required by rl_games
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
```
Similarly, we also have a multi-threaded version of the rl_games environment wrapper implementation, `omniisaacgymenvs/envs/vec_env_rlgames_mt.py`. This class extends from `VecEnvMT` and `VecEnvRLGames`:
```python
from omni.isaac.gym.vec_env import VecEnvMT
from .vec_env_rlgames import VecEnvRLGames
class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT):
```
In this class, we also have a special method `_parse_data(self, data)`, which is required to be implemented to parse dictionary values passed through queues. Since multiple buffers of data are required by the RL policy, we concatenate all of the buffers in a single dictionary, and send that to the queue to be received by the RL thread.
```python
def _parse_data(self, data):
self._obs = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rew = data["rew"].to(self._task.rl_device).clone()
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._resets = data["reset"].to(self._task.rl_device).clone()
self._extras = data["extras"].copy()
```
### API Limitations
#### omni.isaac.core Setter APIs
Setter APIs in omni.isaac.core for ArticulationView, RigidPrimView, and RigidContactView should only be called once per simulation step for
each view instance per API. This means that for use cases where multiple calls to the same setter API from the same view instance is required,
users will need to cache the states to be set for intermmediate calls, and make only one call to the setter API prior to stepping physics with
the complete buffer containing all cached states.
If multiple calls to the same setter API from the same view object are made within the simulation step,
subsequent calls will override the states that have been set by prior calls to the same API,
voiding the previous calls to the API. The API can be called again once a simulation step is made.
For example, the below code will override states.
```python
my_view.set_world_poses(positions=[[0, 0, 1]], orientations=[[1, 0, 0, 0]], indices=[0])
# this call will void the previous call
my_view.set_world_poses(positions=[[0, 1, 1]], orientations=[[1, 0, 0, 0]], indices=[1])
my_world.step()
```
Instead, the below code should be used.
```python
my_view.set_world_poses(positions=[[0, 0, 1], [0, 1, 1]], orientations=[[1, 0, 0, 0], [1, 0, 0, 0]], indices=[0, 1])
my_world.step()
```
#### omni.isaac.core Getter APIs
Getter APIs for cloth simulation may return stale states when used with the GPU pipeline. This is because the physics simulation requires a simulation step
to occur in order to refresh the GPU buffers with new states. Therefore, when a getter API is called after a setter API before a
simulation step, the states returned from the getter API may not reflect the values that were set using the setter API.
For example:
```python
my_view.set_world_positions(positions=[[0, 0, 1]], indices=[0])
# Values may be stale when called before step
positions = my_view.get_world_positions() # positions may not match [[0, 0, 1]]
my_world.step()
# Values will be updated when called after step
positions = my_view.get_world_positions() # positions will reflect the new states
```
#### Performing Resets
When resetting the states of actors, impulses generated by previous target or effort controls
will continue to be carried over from the previous states in simulation.
Therefore, depending on the time step, the masses of the objects, and the magnitude of the impulses,
the difference between the desired reset state and the observed first state after reset can be large.
To eliminate this issue, users should also reset any position/velocity targets or effort controllers
to the reset state or zero state when resetting actor states. For setting joint positions and velocities
using the omni.isaac.core ArticulationView APIs, position targets and velocity targets will
automatically be set to the same states as joint positions and velocities.
#### Massless Links
It may be helpful in some scenarios to introduce dummy bodies into articulations for
retrieving transformations at certain locations of the articulation. Although it is possible
to introduce rigid bodies with no mass and colliders APIs and attach them to the articulation
with fixed joints, this can sometimes cause physics instabilities in simulation. To prevent
instabilities from occurring, it is recommended to add a dummy geometry to the rigid body
and include both Mass and Collision APIs. The mass of the geometry can be set to a very
small value, such as 0.0001, to avoid modifying physical behaviors of the articulation.
Similarly, we can also disable collision on the Collision API of the geometry to preserve
contact behavior of the articulation. | 15,846 | Markdown | 58.575188 | 862 | 0.754007 |
Virlus/OmniIsaacGymEnvs/docs/README.md | # Usage
To enable this extension, go to the Extension Manager menu and enable omniisaacgymenvs extension | 105 | Markdown | 34.333322 | 96 | 0.828571 |
Virlus/OmniIsaacGymEnvs/docs/index.rst | RL Examples [omniisaacgymenvs]
######################################################
| 86 | reStructuredText | 27.999991 | 54 | 0.302326 |
2820207922/isaac_ws/main.py | #launch Isaac Sim before any other imports
#default first two lines in any standalone application
from omni.isaac.kit import SimulationApp
# This sample enables a livestream server to connect to when running headless
KIT_CONFIG = {
"width": 1280,
"height": 720,
"window_width": 1920,
"window_height": 1080,
"headless": True,
"renderer": "RayTracedLighting",
"display_options": 3286, # Set display options to show default grid
}
kit = SimulationApp(KIT_CONFIG)
from omni.isaac.core.articulations import Articulation
from omni.isaac.sensor import IMUSensor
from omni.importer.urdf import _urdf
from omni.isaac.dynamic_control import _dynamic_control
from pxr import Gf, PhysxSchema, Sdf, UsdLux, UsdPhysics, Tf
import omni.kit.commands
import numpy as np
import math
def quaternion_to_euler(q):
"""
Convert a quaternion into euler angles (yaw, roll, pitch)
Quaternion format: [w, x, y, z]
Euler angles order: yaw (Z), roll (X), pitch (Y)
"""
# Extract the values from quaternion
w, x, y, z = q
# Pre-calculate common terms
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x**2 + y**2)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = np.clip(t2, a_min=-1.0, a_max=1.0)
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y**2 + z**2)
yaw_z = math.atan2(t3, t4)
return yaw_z, roll_x, pitch_y # Order: yaw, roll, pitch
# Acquire the URDF extension interface
urdf_interface = _urdf.acquire_urdf_interface()
# Set the settings in the import config
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = False
import_config.fix_base = False
import_config.import_inertia_tensor = True
import_config.distance_scale = 1.0
import_config.density = 0.0
import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_VELOCITY
import_config.default_drive_strength = 0.0
import_config.default_position_drive_damping = 0.0
import_config.convex_decomp = False
import_config.self_collision = False
import_config.create_physics_scene = True
import_config.make_default_prim = False
# Get path to extension data:
URDF_PATH = "balance_infantry/model.urdf"
DEST_PATH = "balance_infantry/model/model.usd"
# Import URDF, stage_path contains the path the path to the usd prim in the stage.
status, stage_path = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=URDF_PATH,
import_config=import_config,
get_articulation_root=True,
)
# Get stage handle
stage = omni.usd.get_context().get_stage()
# Enable physics
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
# Set gravity
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(9.81)
# Set solver settings
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Set limit
LOWER_LIMIT_ANGLE = 0
UPPER_LIMIT_ANGLE = 70
left_front_joint_prim = UsdPhysics.RevoluteJoint.Get(stage, "/balance_infantry/base_link/joint1")
left_front_joint_prim.GetLowerLimitAttr().Set(LOWER_LIMIT_ANGLE)
left_front_joint_prim.GetUpperLimitAttr().Set(UPPER_LIMIT_ANGLE)
left_back_joint_prim = UsdPhysics.RevoluteJoint.Get(stage, "/balance_infantry/base_link/joint2")
left_back_joint_prim.GetLowerLimitAttr().Set(-UPPER_LIMIT_ANGLE)
left_back_joint_prim.GetUpperLimitAttr().Set(-LOWER_LIMIT_ANGLE)
right_front_joint_prim = UsdPhysics.RevoluteJoint.Get(stage, "/balance_infantry/base_link/joint7")
right_front_joint_prim.GetLowerLimitAttr().Set(LOWER_LIMIT_ANGLE)
right_front_joint_prim.GetUpperLimitAttr().Set(UPPER_LIMIT_ANGLE)
right_back_joint_prim = UsdPhysics.RevoluteJoint.Get(stage, "/balance_infantry/base_link/joint6")
right_back_joint_prim.GetLowerLimitAttr().Set(-UPPER_LIMIT_ANGLE)
right_back_joint_prim.GetUpperLimitAttr().Set(-LOWER_LIMIT_ANGLE)
# Set constraint
left_wheel_link = stage.GetPrimAtPath("/balance_infantry/left_wheel_link")
left_hole_link = stage.GetPrimAtPath("/balance_infantry/left_hole_link")
left_constraint = UsdPhysics.RevoluteJoint.Define(stage, "/balance_infantry/base_link/left_constraint")
left_constraint.CreateBody0Rel().SetTargets([left_wheel_link.GetPath()])
left_constraint.CreateBody1Rel().SetTargets([left_hole_link.GetPath()])
left_constraint.CreateAxisAttr().Set("X")
left_constraint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
left_constraint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
left_constraint.CreateExcludeFromArticulationAttr().Set(True)
right_wheel_link = stage.GetPrimAtPath("/balance_infantry/right_wheel_link")
right_hole_link = stage.GetPrimAtPath("/balance_infantry/right_hole_link")
right_constraint = UsdPhysics.RevoluteJoint.Define(stage, "/balance_infantry/base_link/right_constraint")
right_constraint.CreateBody0Rel().SetTargets([right_wheel_link.GetPath()])
right_constraint.CreateBody1Rel().SetTargets([right_hole_link.GetPath()])
right_constraint.CreateAxisAttr().Set("X")
right_constraint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
right_constraint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
right_constraint.CreateExcludeFromArticulationAttr().Set(True)
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=150.0,
position=Gf.Vec3f(0, 0, -0.3),
color=Gf.Vec3f(0.3),
)
# Add lighting
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
# Start simulation
omni.timeline.get_timeline_interface().play()
# perform one simulation step so physics is loaded and dynamic control works.
kit.update()
art = Articulation(prim_path=stage_path)
art.initialize()
if not art.handles_initialized:
print(f"{stage_path} is not an articulation")
else:
print(f"Got articulation {stage_path} with handle {art.articulation_handle}")
dc = _dynamic_control.acquire_dynamic_control_interface()
dc.wake_up_articulation(art.articulation_handle)
dof_properties = _dynamic_control.DofProperties()
dof_properties.damping = 0.0
dof_properties.stiffness = 0.0
dof_properties.max_effort = 100.0
dof_properties.max_velocity = 10.0
left_constraint = dc.find_articulation_dof(art.articulation_handle, "left_constraint")
right_constraint = dc.find_articulation_dof(art.articulation_handle, "right_constraint")
dc.set_dof_properties(left_constraint, dof_properties)
dc.set_dof_properties(right_constraint, dof_properties)
dof_properties.damping = 100.0
dof_properties.stiffness = 0.0
dof_properties.max_effort = 100.0
dof_properties.max_velocity = 10.0
left_wheel_joint = dc.find_articulation_dof(art.articulation_handle, "joint4")
right_wheel_joint = dc.find_articulation_dof(art.articulation_handle, "joint9")
dc.set_dof_properties(left_wheel_joint, dof_properties)
dc.set_dof_properties(right_wheel_joint, dof_properties)
left_front_joint = dc.find_articulation_dof(art.articulation_handle, "joint1")
left_back_joint = dc.find_articulation_dof(art.articulation_handle, "joint2")
right_front_joint = dc.find_articulation_dof(art.articulation_handle, "joint7")
right_back_joint = dc.find_articulation_dof(art.articulation_handle, "joint6")
dc.set_dof_properties(left_front_joint, dof_properties)
dc.set_dof_properties(left_back_joint, dof_properties)
dc.set_dof_properties(right_front_joint, dof_properties)
dc.set_dof_properties(right_back_joint, dof_properties)
# Set IMU sensor
# imu_sensor = IMUSensor(
# prim_path="/balance_infantry/base_link/imu_sensor",
# name="imu",
# frequency=100,
# translation=np.array([0.0, -0.2, 0.1]),
# )
# imu_sensor.initialize()
if not stage:
print("Stage could not be used.")
else:
for prim in stage.Traverse():
prim_path = prim.GetPath()
prim_type = prim.GetTypeName()
print(f"prim_path: {prim_path}, prim_type: {prim_type}")
k = 0
torque = 10
# perform simulation
while kit._app.is_running() and not kit.is_exiting():
# Run in realtime mode, we don't specify the step size
k = k + 1
if k // 500 % 2 == 1:
dc.set_dof_effort(left_front_joint, torque)
dc.set_dof_effort(left_back_joint, -torque)
dc.set_dof_effort(right_front_joint, torque)
dc.set_dof_effort(right_back_joint, -torque)
else:
dc.set_dof_effort(left_front_joint, -torque)
dc.set_dof_effort(left_back_joint, torque)
dc.set_dof_effort(right_front_joint, -torque)
dc.set_dof_effort(right_back_joint, torque)
# imu_data = imu_sensor.get_current_frame()
# quaternion = imu_data['orientation']
# # quaternion = [1.0, 0.0, 0.0, 0.0]
# rotation = quaternion_to_euler(quaternion)
# euler_angles_deg = np.degrees(rotation)
# print(f"quaternion: {quaternion}, angle: {euler_angles_deg}")
kit.update()
# Shutdown and exit
omni.timeline.get_timeline_interface().stop()
kit.close()
| 9,170 | Python | 37.860169 | 105 | 0.735333 |
2820207922/isaac_ws/balance_train.py | from omni.isaac.gym.vec_env import VecEnvBase
env = VecEnvBase(headless=True, enable_livestream=True)
# env = VecEnvBase(headless=True)
from balance_task import BalanceTask
task = BalanceTask(name="Balance")
env.set_task(task, backend="torch")
from stable_baselines3 import PPO
import torch
# create agent from stable baselines
# model = PPO(
# "MlpPolicy",
# env,
# n_steps=1000,
# batch_size=1000,
# n_epochs=10,
# learning_rate=0.005,
# gamma=0.99,
# device="cuda:0",
# ent_coef=0.0,
# vf_coef=0.5,
# max_grad_norm=1.0,
# verbose=1,
# tensorboard_log="./balance_tensorboard"
# )
# torch.save(model.state_dict(),"model.pth")
model = PPO.load(
"models/ppo/ppo_balance0",
env=env,
device="cuda:0"
)
model.learn(total_timesteps=300000)
model.save("models/ppo/ppo_balance1")
env.close() | 856 | Python | 20.974358 | 55 | 0.661215 |
2820207922/isaac_ws/balance_play.py | # create isaac environment
from omni.isaac.gym.vec_env import VecEnvBase
env = VecEnvBase(headless=True, enable_livestream=True)
# create task and register task
from balance_task import BalanceTask
task = BalanceTask(name="Balance")
env.set_task(task, backend="torch")
# import stable baselines
from stable_baselines3 import PPO
# Run inference on the trained policy
model = PPO.load("models/ppo/ppo_balance0")
env._world.reset()
obs, _ = env.reset()
while env._simulation_app.is_running():
action, _states = model.predict(obs)
obs, rewards, terminated, truncated, info = env.step(action[0])
# print(f"obs: {obs}")
# print(f"action: {action}")
# print(f"obs: {obs}, rewards: {rewards}, terminated: {terminated}, truncated: {truncated}, info: {info}")
env.close() | 786 | Python | 31.791665 | 110 | 0.717557 |
2820207922/isaac_ws/balance_task.py | from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import create_prim
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.materials.deformable_material import DeformableMaterial
from pxr import Gf, PhysxSchema, Sdf, UsdLux, UsdPhysics, Tf, UsdShade
from omni.importer.urdf import _urdf
import omni.kit.commands
from gymnasium import spaces
import numpy as np
import torch
import math
class BalanceTask(BaseTask):
def __init__(
self,
name,
offset=None
) -> None:
# print("running: __init__")
# task-specific parameters
self._reward_cnt = 0
self._orders = [0, 1, 2, 3]
self._left_wheel_target = 0.0
self._right_wheel_target = 0.0
self._angle_limit = 90.0 * math.pi / 180
self._vel_wheel_limit = 50.0
self._effort_leg_limit = 10.0
self._effort_wheel_limit = 20.0
# values used for defining RL buffers
self._num_observations = 15
self._num_actions = 6
self._device = "cpu"
self.num_envs = 1
# a few class buffers to store RL-related states
self.obs = torch.zeros((self.num_envs, self._num_observations))
self.obs_last = torch.zeros((self.num_envs, self._num_observations))
self.resets = torch.zeros((self.num_envs, 1))
# set the action and observation space for RL
self.action_space = spaces.Box(
np.ones(self._num_actions, dtype=np.float32) * -1.0, np.ones(self._num_actions, dtype=np.float32) * 1.0
)
self.observation_space = spaces.Box(
np.ones(self._num_observations, dtype=np.float32) * -np.Inf,
np.ones(self._num_observations, dtype=np.float32) * np.Inf,
)
# trigger __init__ of parent class
BaseTask.__init__(self, name=name, offset=offset)
def set_up_scene(self, scene) -> None:
# print("running: set_up_scene")
# retrieve file path for the Cartpole USD file
# usd_path = "balance_infantry/model/balance_infantry_no_constraint.usd"
# add the Cartpole USD to our stage
# Set the settings in the import config
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = False
import_config.fix_base = False
import_config.import_inertia_tensor = True
import_config.distance_scale = 1.0
import_config.density = 0.0
import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_VELOCITY
import_config.default_drive_strength = 0.0
import_config.default_position_drive_damping = 0.0
import_config.convex_decomp = False
import_config.self_collision = False
import_config.create_physics_scene = True
import_config.make_default_prim = False
urdf_path = "balance_infantry/model.urdf"
status, robot_path = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=urdf_path,
import_config=import_config,
# get_articulation_root=True,
)
add_reference_to_stage(robot_path, "/World")
# Get stage handle
self.stage = omni.usd.get_context().get_stage()
if not self.stage:
print("Stage could not be used.")
else:
for prim in self.stage.Traverse():
prim_path = prim.GetPath()
prim_type = prim.GetTypeName()
print(f"prim_path: {prim_path}, prim_type: {prim_type}")
# Set material
self.wheel_material = DeformableMaterial(
prim_path="/World/balance_infantry/base_link/wheel_material",
name="wheel_material",
dynamic_friction=0.5,
youngs_modulus=6e6,
poissons_ratio=0.47,
elasticity_damping=0.00784,
damping_scale=0.1,
)
# print("wheel_material: ", self.wheel_material)
wheel_material_prim = self.stage.GetPrimAtPath("/World/balance_infantry/base_link/wheel_material")
# print("wheel_material_prim: ", wheel_material_prim)
wheel_material_shade = UsdShade.Material(wheel_material_prim)
# print("wheel_material_shade: ", wheel_material_shade)
left_wheel_link = self.stage.GetPrimAtPath("/World/balance_infantry/left_wheel_link")
right_wheel_link = self.stage.GetPrimAtPath("/World/balance_infantry/right_wheel_link")
UsdShade.MaterialBindingAPI(left_wheel_link).Bind(wheel_material_shade, UsdShade.Tokens.strongerThanDescendants)
UsdShade.MaterialBindingAPI(right_wheel_link).Bind(wheel_material_shade, UsdShade.Tokens.strongerThanDescendants)
# create an ArticulationView wrapper for our cartpole - this can be extended towards accessing multiple cartpoles
self._robots = ArticulationView(prim_paths_expr="/World/balance_infantry/base_link*", name="robot_view")
scene.add(self._robots)
# scene.add_default_ground_plane()
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=self.stage,
planePath="/groundPlane",
axis="Z",
size=150.0,
position=Gf.Vec3f(0, 0, -0.2),
color=Gf.Vec3f(0.2),
)
# set default camera viewport position and target
self.set_initial_camera_params()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
set_camera_view(eye=camera_position, target=camera_target, camera_prim_path="/OmniverseKit_Persp")
def post_reset(self):
# print("running: post_reset")
self.robot_init()
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset(indices)
def robot_init(self):
self._height_lower_limit = self.calc_height(torch.tensor([0.0]), torch.tensor([0.0]))
self._height_upper_limit = self.calc_height(torch.tensor([1.2217]), torch.tensor([1.2217]))
self._height_target = 0.2
# Get joint index
self._base_link_idx = self._robots.get_body_index("base_link")
self._joint1_idx = self._robots.get_dof_index("joint1")
self._joint2_idx = self._robots.get_dof_index("joint2")
self._joint6_idx = self._robots.get_dof_index("joint6")
self._joint7_idx = self._robots.get_dof_index("joint7")
self._joint4_idx = self._robots.get_dof_index("joint4")
self._joint9_idx = self._robots.get_dof_index("joint9")
# print("base_link_idx: ", self._base_link_idx)
# print("joint1_idx: ", self._joint1_idx)
# print("joint2_idx: ", self._joint2_idx)
# print("joint4_idx: ", self._joint4_idx)
# print("joint6_idx: ", self._joint6_idx)
# print("joint7_idx: ", self._joint7_idx)
# print("joint9_idx: ", self._joint9_idx)
# Set limit
LOWER_LIMIT_ANGLE = 0
UPPER_LIMIT_ANGLE = 70
left_front_joint_prim = UsdPhysics.RevoluteJoint.Get(self.stage, "/World/balance_infantry/base_link/joint1")
left_front_joint_prim.GetLowerLimitAttr().Set(LOWER_LIMIT_ANGLE)
left_front_joint_prim.GetUpperLimitAttr().Set(UPPER_LIMIT_ANGLE)
left_back_joint_prim = UsdPhysics.RevoluteJoint.Get(self.stage, "/World/balance_infantry/base_link/joint2")
left_back_joint_prim.GetLowerLimitAttr().Set(-UPPER_LIMIT_ANGLE)
left_back_joint_prim.GetUpperLimitAttr().Set(-LOWER_LIMIT_ANGLE)
right_front_joint_prim = UsdPhysics.RevoluteJoint.Get(self.stage, "/World/balance_infantry/base_link/joint7")
right_front_joint_prim.GetLowerLimitAttr().Set(LOWER_LIMIT_ANGLE)
right_front_joint_prim.GetUpperLimitAttr().Set(UPPER_LIMIT_ANGLE)
right_back_joint_prim = UsdPhysics.RevoluteJoint.Get(self.stage, "/World/balance_infantry/base_link/joint6")
right_back_joint_prim.GetLowerLimitAttr().Set(-UPPER_LIMIT_ANGLE)
right_back_joint_prim.GetUpperLimitAttr().Set(-LOWER_LIMIT_ANGLE)
# Set constraint
left_wheel_link = self.stage.GetPrimAtPath("/World/balance_infantry/left_wheel_link")
left_hole_link = self.stage.GetPrimAtPath("/World/balance_infantry/left_hole_link")
left_constraint = UsdPhysics.RevoluteJoint.Define(self.stage, "/World/balance_infantry/base_link/left_constraint")
left_constraint.CreateBody0Rel().SetTargets([left_wheel_link.GetPath()])
left_constraint.CreateBody1Rel().SetTargets([left_hole_link.GetPath()])
left_constraint.CreateAxisAttr().Set("X")
left_constraint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
left_constraint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
left_constraint.CreateExcludeFromArticulationAttr().Set(True)
right_wheel_link = self.stage.GetPrimAtPath("/World/balance_infantry/right_wheel_link")
right_hole_link = self.stage.GetPrimAtPath("/World/balance_infantry/right_hole_link")
right_constraint = UsdPhysics.RevoluteJoint.Define(self.stage, "/World/balance_infantry/base_link/right_constraint")
right_constraint.CreateBody0Rel().SetTargets([right_wheel_link.GetPath()])
right_constraint.CreateBody1Rel().SetTargets([right_hole_link.GetPath()])
right_constraint.CreateAxisAttr().Set("X")
right_constraint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
right_constraint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
right_constraint.CreateExcludeFromArticulationAttr().Set(True)
def reset(self, env_ids=None):
# print("running: reset")
if env_ids is None:
env_ids = torch.arange(self.num_envs, device=self._device)
num_resets = len(env_ids)
if self._orders[0] == 0:
self._left_wheel_target = 0.0
self._right_wheel_target = 0.0
self._height_target = 0.2
elif self._orders[0] == 1:
uniform_num = 1.0 * (1.0 - 2.0 * torch.rand(2, device=self._device))
self._left_wheel_target = uniform_num[0]
self._right_wheel_target = uniform_num[1]
self._height_target = 0.2
elif self._orders[0] == 2:
uniform_num = 1.0 * (1.0 - 2.0 * torch.rand(1, device=self._device))
self._left_wheel_target = 0.0
self._right_wheel_target = 0.0
self._height_target = uniform_num[0]
elif self._orders[0] == 3:
uniform_num = 1.0 * (1.0 - 2.0 * torch.rand(2, device=self._device))
self._left_wheel_target = uniform_num[0]
self._right_wheel_target = uniform_num[1]
uniform_num = 1.0 * (1.0 - 2.0 * torch.rand(1, device=self._device))
self._height_target = uniform_num[0]
print(f"left_wheel_target: {self._left_wheel_target}, right_wheel_target: {self._right_wheel_target}, height_target: {self._height_target}")
self._robots.post_reset()
# bookkeeping
self.resets[env_ids] = 0
def pre_physics_step(self, actions) -> None:
# print("running: pre_physics_step")
reset_env_ids = self.resets.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset(reset_env_ids)
# print("actions: ", actions)
forces = torch.zeros((self._robots.count, 6), dtype=torch.float32, device=self._device)
forces[:, 0] = self._effort_leg_limit * actions[0]
forces[:, 1] = self._effort_leg_limit * actions[1]
forces[:, 2] = self._effort_leg_limit * actions[2]
forces[:, 3] = self._effort_leg_limit * actions[3]
forces[:, 4] = self._effort_wheel_limit * actions[4]
forces[:, 5] = self._effort_wheel_limit * actions[5]
indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
self._robots.set_joint_efforts(forces, indices=indices, joint_indices=torch.tensor([self._joint1_idx, self._joint2_idx, self._joint6_idx, self._joint7_idx, self._joint4_idx, self._joint9_idx]))
def get_observations(self):
# print("running: get_observations")
positions, orientations = self._robots.get_world_poses()
# positions_check = torch.where(positions[:, 2] > 10.0, 1, 0)
# if positions_check.item() == 1:
# self.reset()
# return
# if torch.isnan(positions).any() or torch.isnan(orientations).any():
# self.reset()
# return
# print("positions: ", positions)
# print("orientations: ", orientations)
angle = self.quaternion_to_euler_zxy(orientations)
# print(f"roll_x: {angle[:, 1] * 180 / math.pi}, picth_y: {angle[:, 2] * 180 / math.pi}")
if torch.isnan(angle).any():
return
# collect joint positions and velocities for observation
dof_pos = self._robots.get_joint_positions()
dof_vel = self._robots.get_joint_velocities()
if torch.isnan(dof_pos).any() or torch.isnan(dof_vel).any():
return
joint1_pos = dof_pos[:, self._joint1_idx]
joint1_vel = dof_vel[:, self._joint1_idx]
joint2_pos = dof_pos[:, self._joint2_idx]
joint2_vel = dof_vel[:, self._joint2_idx]
joint6_pos = dof_pos[:, self._joint6_idx]
joint6_vel = dof_vel[:, self._joint6_idx]
joint7_pos = dof_pos[:, self._joint7_idx]
joint7_vel = dof_vel[:, self._joint7_idx]
joint4_vel = dof_vel[:, self._joint4_idx]
joint9_vel = dof_vel[:, self._joint9_idx]
self.obs_last = self.obs.clone()
self.obs[:, 0] = self._left_wheel_target
self.obs[:, 1] = self._right_wheel_target
self.obs[:, 2] = self._height_target
self.obs[:, 3] = angle[:, 1]
self.obs[:, 4] = angle[:, 2]
self.obs[:, 5] = joint1_pos
self.obs[:, 6] = joint2_pos
self.obs[:, 7] = joint6_pos
self.obs[:, 8] = joint7_pos
self.obs[:, 9] = joint1_vel
self.obs[:, 10] = joint2_vel
self.obs[:, 11] = joint6_vel
self.obs[:, 12] = joint7_vel
self.obs[:, 13] = joint4_vel
self.obs[:, 14] = joint9_vel
# print("obs: ", self.obs)
return self.obs
def quaternion_to_euler_zxy(self, q):
# q = torch.tensor(q)
quat = torch.zeros((self._robots.count, 4), dtype=torch.float32, device=self._device)
angle = torch.zeros((self._robots.count, 3), dtype=torch.float32, device=self._device)
quat[:, 0] = q[:, 0]
quat[:, 1] = q[:, 1]
quat[:, 2] = q[:, 2]
quat[:, 3] = q[:, 3]
angle[:, 1] = torch.atan2(2.0 * (quat[:, 0] * quat[:, 1] + quat[:, 2] * quat[:, 3]), 1.0 - 2.0 * (quat[:, 1] * quat[:, 1] + quat[:, 2] * quat[:, 2]))
angle[:, 2] = torch.asin(torch.clamp(2.0 * (quat[:, 0] * quat[:, 2] - quat[:, 3] * quat[:, 1]), min=-1.0, max=1.0))
angle[:, 0] = torch.atan2(2.0 * (quat[:, 0] * quat[:, 3] + quat[:, 1] * quat[:, 2]), 1.0 - 2.0 * (quat[:, 2] * quat[:, 2] + quat[:, 3] * quat[:, 3]))
# angle = angle * 180 / math.pi
return angle
def calculate_metrics(self) -> None:
# print("running: calculate_metrics")
left_wheel_target = self.obs[:, 0]
right_wheel_target = self.obs[:, 1]
height_target = self.obs[:, 2]
# print(f"left_wheel_target: {left_wheel_target}, right_wheel_target: {right_wheel_target}, height_target: {height_target}")
roll_x = self.obs[:, 3]
pitch_y = self.obs[:, 4]
joint1_pos = self.obs[:, 5]
joint2_pos = self.obs[:, 6]
joint6_pos = self.obs[:, 7]
joint7_pos = self.obs[:, 8]
joint1_vel = self.obs[:, 9]
joint2_vel = self.obs[:, 10]
joint6_vel = self.obs[:, 11]
joint7_vel = self.obs[:, 12]
joint4_vel = self.obs[:, 13]
joint9_vel = self.obs[:, 14]
# print(f"joint4_vel: {joint4_vel}, joint9_vel: {joint9_vel}")
roll_x_last = self.obs_last[:, 3]
pitch_y_last = self.obs_last[:, 4]
joint1_pos_last = self.obs_last[:, 5]
joint2_pos_last = self.obs_last[:, 6]
joint6_pos_last = self.obs_last[:, 7]
joint7_pos_last = self.obs_last[:, 8]
left_height = self.calc_height(joint1_pos, joint2_pos)
right_height = self.calc_height(joint6_pos, joint7_pos)
height_current = (left_height + right_height) / 2
left_height_last = self.calc_height(joint1_pos_last, joint2_pos_last)
right_height_last = self.calc_height(joint6_pos_last, joint7_pos_last)
height_last = (left_height_last + right_height_last) / 2
reward_roll_x = -0.8 * torch.abs(roll_x / self._angle_limit + (roll_x - roll_x_last) / self._angle_limit)
reward_pitch_y = -0.5 * torch.abs(pitch_y / self._angle_limit + (pitch_y - pitch_y_last) / self._angle_limit)
reward_wheel_vel = -0.3 * (torch.abs(left_wheel_target - joint4_vel / self._vel_wheel_limit) + torch.abs(right_wheel_target - joint9_vel / self._vel_wheel_limit))
reward_height = -0.5 * torch.abs(height_target - height_current / (self._height_upper_limit - self._height_lower_limit))
reward = 1.5 + reward_roll_x + reward_pitch_y + reward_wheel_vel + reward_height
# print(f"reward: {reward.item()}")
if reward.item() > 0.0:
self._reward_cnt = self._reward_cnt + int(reward.item() * 10)
# print("reward_cnt: ", self._reward_cnt)
return reward.item()
def calc_height(self, a, b):
l1 = 0.075
l2 = 0.15
l3 = 0.27
d = 15 * math.pi / 180
p1 = torch.zeros((self._robots.count, 2), dtype=torch.float32, device=self._device)
p2 = torch.zeros((self._robots.count, 2), dtype=torch.float32, device=self._device)
p3 = torch.zeros((self._robots.count, 2), dtype=torch.float32, device=self._device)
res = torch.zeros((self._robots.count, 1), dtype=torch.float32, device=self._device)
a = torch.abs(a)
b = torch.abs(b)
p1[:, 0] = -l2 * torch.cos(a - d) - l1
p1[:, 1] = l2 * torch.sin(a - d)
p2[:, 0] = l2 * torch.cos(b - d) + l1
p2[:, 1] = l2 * torch.sin(b - d)
p3[:, 0] = (p1[:, 0] + p2[:, 0]) / 2
p3[:, 1] = (p1[:, 1] + p2[:, 1]) / 2
res[:, 0] = l3 * l3 - torch.pow(p2[:, 0] - p3[:, 0], 2) - torch.pow(p2[:, 1] - p3[:, 1], 2)
res[:, 0] = torch.sqrt(res[:, 0] * torch.pow(p2[:, 0]- p1[:, 0], 2) / (torch.pow(p2[:, 0]- p1[:, 0], 2) + torch.pow(p2[:, 1]- p1[:, 1], 2)))
return res
def is_done(self) -> None:
# print("running: is_done")
roll_x = self.obs[:, 3]
pitch_y = self.obs[:, 4]
joint1_pos = self.obs[:, 5]
joint2_pos = self.obs[:, 6]
joint6_pos = self.obs[:, 7]
joint7_pos = self.obs[:, 8]
joint1_vel = self.obs[:, 9]
joint2_vel = self.obs[:, 10]
joint6_vel = self.obs[:, 11]
joint7_vel = self.obs[:, 12]
joint4_vel = self.obs[:, 13]
joint9_vel = self.obs[:, 14]
# reset the robot if cart has reached reset_dist or pole is too far from upright
resets = torch.where(torch.abs(roll_x) > self._angle_limit, 1, 0)
resets = torch.where(torch.abs(pitch_y) > self._angle_limit, 1, resets)
resets = torch.where(torch.tensor([self._reward_cnt]) > 1000, 1, resets)
if self._reward_cnt > 1000:
order = self._orders.pop(0)
self._orders.append(order)
self._reward_cnt = 0
# print("order: ", order)
self.resets = resets
# print("resets: ", resets.item())
return resets.item()
| 19,987 | Python | 42.546841 | 201 | 0.592835 |
2820207922/isaac_ws/README.md | # Servicer Deployment Guide | 27 | Markdown | 26.999973 | 27 | 0.851852 |
2820207922/isaac_ws/standalone_examples/replicator/amr_navigation.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate synthetic data from an AMR navigating to random locations
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp(launch_config={"headless": False})
import argparse
import builtins
import os
import random
from itertools import cycle
import carb.settings
import omni.client
import omni.kit.app
import omni.replicator.core as rep
import omni.timeline
import omni.usd
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, create_new_stage
from pxr import Gf, PhysxSchema, UsdGeom, UsdLux, UsdPhysics
class NavSDGDemo:
CARTER_URL = "/Isaac/Samples/Replicator/OmniGraph/nova_carter_nav_only.usd"
DOLLY_URL = "/Isaac/Props/Dolly/dolly_physics.usd"
PROPS_URL = "/Isaac/Props/YCB/Axis_Aligned_Physics"
LEFT_CAMERA_PATH = "/NavWorld/CarterNav/chassis_link/front_hawk/left/camera_left"
RIGHT_CAMERA_PATH = "/NavWorld/CarterNav/chassis_link/front_hawk/right/camera_right"
def __init__(self):
self._carter_chassis = None
self._carter_nav_target = None
self._dolly = None
self._dolly_light = None
self._props = []
self._cycled_env_urls = None
self._env_interval = 1
self._timeline = None
self._timeline_sub = None
self._stage_event_sub = None
self._stage = None
self._trigger_distance = 2.0
self._num_frames = 0
self._frame_counter = 0
self._writer = None
self._out_dir = None
self._render_products = []
self._use_temp_rp = False
self._in_running_state = False
def start(
self,
num_frames=10,
out_dir=None,
env_urls=[],
env_interval=3,
use_temp_rp=False,
seed=None,
):
print(f"[NavSDGDemo] Starting")
if seed is not None:
random.seed(seed)
self._num_frames = num_frames
self._out_dir = out_dir if out_dir is not None else os.path.join(os.getcwd(), "_out_nav_sdg_demo")
self._cycled_env_urls = cycle(env_urls)
self._env_interval = env_interval
self._use_temp_rp = use_temp_rp
self._frame_counter = 0
self._trigger_distance = 2.0
self._load_env()
self._randomize_dolly_pose()
self._randomize_dolly_light()
self._randomize_prop_poses()
self._setup_sdg()
self._timeline = omni.timeline.get_timeline_interface()
self._timeline.play()
self._timeline_sub = self._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type(
int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED), self._on_timeline_event
)
self._stage_event_sub = (
omni.usd.get_context()
.get_stage_event_stream()
.create_subscription_to_pop_by_type(int(omni.usd.StageEventType.CLOSING), self._on_stage_closing_event)
)
self._in_running_state = True
def clear(self):
self._cycled_env_urls = None
self._carter_chassis = None
self._carter_nav_target = None
self._dolly = None
self._dolly_light = None
self._timeline = None
self._frame_counter = 0
if self._stage_event_sub:
self._stage_event_sub.unsubscribe()
self._stage_event_sub = None
if self._timeline_sub:
self._timeline_sub.unsubscribe()
self._timeline_sub = None
self._destroy_render_products()
self._stage = None
self._in_running_state = False
def is_running(self):
return self._in_running_state
def _is_running_in_script_editor(self):
return builtins.ISAAC_LAUNCHED_FROM_TERMINAL is True
def _on_stage_closing_event(self, e: carb.events.IEvent):
self.clear()
def _load_env(self):
# Fresh stage with custom physics scene for carter's navigation
create_new_stage()
self._stage = omni.usd.get_context().get_stage()
self._add_physics_scene()
# Environment
assets_root_path = get_assets_root_path()
add_reference_to_stage(usd_path=assets_root_path + next(self._cycled_env_urls), prim_path="/Environment")
# Carter
add_reference_to_stage(usd_path=assets_root_path + self.CARTER_URL, prim_path="/NavWorld/CarterNav")
self._carter_nav_target = self._stage.GetPrimAtPath("/NavWorld/CarterNav/targetXform")
self._carter_chassis = self._stage.GetPrimAtPath("/NavWorld/CarterNav/chassis_link")
# Dolly
add_reference_to_stage(usd_path=assets_root_path + self.DOLLY_URL, prim_path="/NavWorld/Dolly")
self._dolly = self._stage.GetPrimAtPath("/NavWorld/Dolly")
if not self._dolly.GetAttribute("xformOp:translate"):
UsdGeom.Xformable(self._dolly).AddTranslateOp()
if not self._dolly.GetAttribute("xformOp:rotateXYZ"):
UsdGeom.Xformable(self._dolly).AddRotateXYZOp()
# Light
light = UsdLux.SphereLight.Define(self._stage, f"/NavWorld/DollyLight")
light.CreateRadiusAttr(0.5)
light.CreateIntensityAttr(35000)
light.CreateColorAttr(Gf.Vec3f(1.0, 1.0, 1.0))
self._dolly_light = light.GetPrim()
if not self._dolly_light.GetAttribute("xformOp:translate"):
UsdGeom.Xformable(self._dolly_light).AddTranslateOp()
# Props
props_urls = []
props_folder_path = assets_root_path + self.PROPS_URL
result, entries = omni.client.list(props_folder_path)
if result != omni.client.Result.OK:
carb.log_error(f"Could not list assets in path: {props_folder_path}")
return
for entry in entries:
_, ext = os.path.splitext(entry.relative_path)
if ext == ".usd":
props_urls.append(f"{props_folder_path}/{entry.relative_path}")
cycled_props_url = cycle(props_urls)
for i in range(15):
prop_url = next(cycled_props_url)
prop_name = os.path.splitext(os.path.basename(prop_url))[0]
path = f"/NavWorld/Props/Prop_{prop_name}_{i}"
prim = self._stage.DefinePrim(path, "Xform")
prim.GetReferences().AddReference(prop_url)
self._props.append(prim)
def _add_physics_scene(self):
# Physics setup specific for the navigation graph
physics_scene = UsdPhysics.Scene.Define(self._stage, "/physicsScene")
physx_scene = PhysxSchema.PhysxSceneAPI.Apply(self._stage.GetPrimAtPath("/physicsScene"))
physx_scene.GetEnableCCDAttr().Set(True)
physx_scene.GetEnableGPUDynamicsAttr().Set(False)
physx_scene.GetBroadphaseTypeAttr().Set("MBP")
def _randomize_dolly_pose(self):
min_dist_from_carter = 4
carter_loc = self._carter_chassis.GetAttribute("xformOp:translate").Get()
for _ in range(100):
x, y = random.uniform(-6, 6), random.uniform(-6, 6)
dist = (Gf.Vec2f(x, y) - Gf.Vec2f(carter_loc[0], carter_loc[1])).GetLength()
if dist > min_dist_from_carter:
self._dolly.GetAttribute("xformOp:translate").Set((x, y, 0))
self._carter_nav_target.GetAttribute("xformOp:translate").Set((x, y, 0))
break
self._dolly.GetAttribute("xformOp:rotateXYZ").Set((0, 0, random.uniform(-180, 180)))
def _randomize_dolly_light(self):
dolly_loc = self._dolly.GetAttribute("xformOp:translate").Get()
self._dolly_light.GetAttribute("xformOp:translate").Set(dolly_loc + (0, 0, 2.5))
self._dolly_light.GetAttribute("inputs:color").Set(
(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
)
def _randomize_prop_poses(self):
spawn_loc = self._dolly.GetAttribute("xformOp:translate").Get()
spawn_loc[2] = spawn_loc[2] + 0.5
for prop in self._props:
prop.GetAttribute("xformOp:translate").Set(spawn_loc + (random.uniform(-1, 1), random.uniform(-1, 1), 0))
spawn_loc[2] = spawn_loc[2] + 0.2
def _setup_sdg(self):
# Disable capture on play and async rendering
carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
carb.settings.get_settings().set("/omni/replicator/asyncRendering", False)
carb.settings.get_settings().set("/app/asyncRendering", False)
# Set camera sensors fStop to 0.0 to get well lit sharp images
left_camera_prim = self._stage.GetPrimAtPath(self.LEFT_CAMERA_PATH)
left_camera_prim.GetAttribute("fStop").Set(0.0)
right_camera_prim = self._stage.GetPrimAtPath(self.RIGHT_CAMERA_PATH)
right_camera_prim.GetAttribute("fStop").Set(0.0)
self._writer = rep.WriterRegistry.get("BasicWriter")
self._writer.initialize(output_dir=self._out_dir, rgb=True)
# If no temporary render products are requested, create them once here and destroy them only at the end
if not self._use_temp_rp:
self._setup_render_products()
def _setup_render_products(self):
print(f"[NavSDGDemo] Creating render products")
rp_left = rep.create.render_product(
self.LEFT_CAMERA_PATH,
(512, 512),
name="left_sensor",
force_new=True,
)
rp_right = rep.create.render_product(
self.RIGHT_CAMERA_PATH,
(512, 512),
name="right_sensor",
force_new=True,
)
self._render_products = [rp_left, rp_right]
self._writer.attach(self._render_products)
rep.orchestrator.preview()
def _destroy_render_products(self):
print(f"[NavSDGDemo] Destroying render products")
if self._writer:
self._writer.detach()
for rp in self._render_products:
rp.destroy()
self._render_products.clear()
if self._stage.GetPrimAtPath("/Replicator"):
omni.kit.commands.execute("DeletePrimsCommand", paths=["/Replicator"])
def _run_sdg(self):
if self._use_temp_rp:
self._setup_render_products()
rep.orchestrator.step(rt_subframes=16, pause_timeline=False)
rep.orchestrator.wait_until_complete()
if self._use_temp_rp:
self._destroy_render_products()
async def _run_sdg_async(self):
if self._use_temp_rp:
self._setup_render_products()
await rep.orchestrator.step_async(rt_subframes=16, pause_timeline=False)
await rep.orchestrator.wait_until_complete_async()
if self._use_temp_rp:
self._destroy_render_products()
def _load_next_env(self):
if self._stage.GetPrimAtPath("/Environment"):
omni.kit.commands.execute("DeletePrimsCommand", paths=["/Environment"])
assets_root_path = get_assets_root_path()
add_reference_to_stage(usd_path=assets_root_path + next(self._cycled_env_urls), prim_path="/Environment")
def _on_sdg_done(self, task):
self._setup_next_frame()
def _setup_next_frame(self):
self._frame_counter += 1
if self._frame_counter >= self._num_frames:
print(f"[NavSDGDemo] Finished")
self.clear()
return
self._randomize_dolly_pose()
self._randomize_dolly_light()
self._randomize_prop_poses()
if self._frame_counter % self._env_interval == 0:
self._load_next_env()
# Set a new random distance from which to take capture the next frame
self._trigger_distance = random.uniform(1.75, 2.5)
self._timeline.play()
self._timeline_sub = self._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type(
int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED), self._on_timeline_event
)
def _on_timeline_event(self, e: carb.events.IEvent):
carter_loc = self._carter_chassis.GetAttribute("xformOp:translate").Get()
dolly_loc = self._dolly.GetAttribute("xformOp:translate").Get()
dist = (Gf.Vec2f(dolly_loc[0], dolly_loc[1]) - Gf.Vec2f(carter_loc[0], carter_loc[1])).GetLength()
if dist < self._trigger_distance:
print(f"[NavSDGDemo] Capturing frame no. {self._frame_counter}")
self._timeline.pause()
self._timeline_sub.unsubscribe()
if self._is_running_in_script_editor():
import asyncio
task = asyncio.ensure_future(self._run_sdg_async())
task.add_done_callback(self._on_sdg_done)
else:
self._run_sdg()
self._setup_next_frame()
ENV_URLS = [
"/Isaac/Environments/Grid/default_environment.usd",
"/Isaac/Environments/Simple_Warehouse/warehouse.usd",
"/Isaac/Environments/Grid/gridroom_black.usd",
]
parser = argparse.ArgumentParser()
parser.add_argument("--use_temp_rp", action="store_true", help="Create and destroy render products for each SDG frame")
parser.add_argument("--num_frames", type=int, default=9, help="The number of frames to capture")
parser.add_argument("--env_interval", type=int, default=3, help="Interval at which to change the environments")
args, unknown = parser.parse_known_args()
out_dir = os.path.join(os.getcwd(), "_out_nav_sdg_demo", "")
nav_demo = NavSDGDemo()
nav_demo.start(
num_frames=args.num_frames,
out_dir=out_dir,
env_urls=ENV_URLS,
env_interval=args.env_interval,
use_temp_rp=args.use_temp_rp,
seed=124,
)
while simulation_app.is_running() and nav_demo.is_running():
simulation_app.update()
simulation_app.close()
| 14,064 | Python | 39.768116 | 119 | 0.630475 |
2820207922/isaac_ws/standalone_examples/replicator/online_generation/generate_shapenet.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Dataset with online randomized scene generation for Instance Segmentation training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
adding assets from the user-specified classes with randomized pose and colour.
The camera position is also randomized before capturing groundtruth consisting of
an RGB rendered image, Tight 2D Bounding Boxes and Instance Segmentation masks.
"""
import glob
import os
import signal
import sys
import carb
import numpy as np
import torch
from omni.isaac.kit import SimulationApp
LABEL_TO_SYNSET = {
"table": "04379243",
"monitor": "03211117",
"phone": "04401088",
"watercraft": "04530566",
"chair": "03001627",
"lamp": "03636649",
"speaker": "03691459",
"bench": "02828884",
"plane": "02691156",
"bathtub": "02808440",
"bookcase": "02871439",
"bag": "02773838",
"basket": "02801938",
"bowl": "02880940",
"bus": "02924116",
"cabinet": "02933112",
"camera": "02942699",
"car": "02958343",
"dishwasher": "03207941",
"file": "03337140",
"knife": "03624134",
"laptop": "03642806",
"mailbox": "03710193",
"microwave": "03761084",
"piano": "03928116",
"pillow": "03938244",
"pistol": "03948459",
"printer": "04004475",
"rocket": "04099429",
"sofa": "04256520",
"washer": "04554684",
"rifle": "04090263",
"can": "02946921",
"bottle": "02876657",
"bowl": "02880940",
"earphone": "03261776",
"mug": "03797390",
}
SYNSET_TO_LABEL = {v: k for k, v in LABEL_TO_SYNSET.items()}
# Setup default variables
RESOLUTION = (1024, 1024)
OBJ_LOC_MIN = (-50, 5, -50)
OBJ_LOC_MAX = (50, 5, 50)
CAM_LOC_MIN = (100, 0, -100)
CAM_LOC_MAX = (100, 100, 100)
SCALE_MIN = 15
SCALE_MAX = 40
# Default rendering parameters
RENDER_CONFIG = {"headless": False}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of random ShapeNet objects.
Objects are randomly chosen from selected categories and are positioned, rotated and coloured
randomly in an empty room. RGB, BoundingBox2DTight and Instance Segmentation are captured by moving a
camera aimed at the centre of the scene which is positioned at random at a fixed distance from the centre.
This dataset is intended for use with ShapeNet but will function with any dataset of USD models
structured as `root/category/**/*.usd. One note is that this is designed for assets without materials
attached. This is to avoid requiring to compile MDLs and load textures while training.
Args:
categories (tuple of str): Tuple or list of categories. For ShapeNet, these will be the synset IDs.
max_asset_size (int): Maximum asset file size that will be loaded. This prevents out of memory errors
due to loading large meshes.
num_assets_min (int): Minimum number of assets populated in the scene.
num_assets_max (int): Maximum number of assets populated in the scene.
split (float): Fraction of the USDs found to use for training.
train (bool): If true, use the first training split and generate infinite random scenes.
"""
def __init__(
self, root, categories, max_asset_size=None, num_assets_min=3, num_assets_max=5, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = SimulationApp(RENDER_CONFIG)
import omni.replicator.core as rep
import warp as wp
self.rep = rep
self.wp = wp
from omni.isaac.core.utils.nucleus import get_assets_root_path
self.assets_root_path = get_assets_root_path()
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
category_ids = [LABEL_TO_SYNSET.get(c, c) for c in categories]
self.categories = category_ids
self.range_num_assets = (num_assets_min, max(num_assets_min, num_assets_max))
try:
self.references = self._find_usd_assets(root, category_ids, max_asset_size, split, train)
except ValueError as err:
carb.log_error(str(err))
self.kit.close()
sys.exit()
# Setup the scene, lights, walls, camera, etc.
self.setup_scene()
# Setup replicator randomizer graph
self.setup_replicator()
self.cur_idx = 0
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
def _get_textures(self):
return [
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/checkered.png",
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/marble_tile.png",
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/picture_a.png",
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/picture_b.png",
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/textured_wall.png",
self.assets_root_path + "/Isaac/Samples/DR/Materials/Textures/checkered_color.png",
]
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def close(self):
self.rep.orchestrator.stop()
self.kit.close()
def setup_scene(self):
from omni.isaac.core.utils.prims import create_prim
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.core.utils.stage import set_stage_up_axis
"""Setup lights, walls, floor, ceiling and camera"""
# Set stage up axis to Y-up
set_stage_up_axis("y")
# In a practical setting, the room parameters should attempt to match those of the
# target domain. Here, we instead opt for simplicity.
create_prim("/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]})
create_prim(
"/World/Ground",
"Cylinder",
position=np.array([0.0, -0.5, 0.0]),
orientation=euler_angles_to_quat(np.array([90.0, 0.0, 0.0]), degrees=True),
attributes={"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]},
)
create_prim("/World/Asset", "Xform")
self.camera = self.rep.create.camera()
self.render_product = self.rep.create.render_product(self.camera, RESOLUTION)
# Setup annotators that will report groundtruth
self.rgb = self.rep.AnnotatorRegistry.get_annotator("rgb")
self.bbox_2d_tight = self.rep.AnnotatorRegistry.get_annotator("bounding_box_2d_tight")
self.instance_seg = self.rep.AnnotatorRegistry.get_annotator("instance_segmentation")
self.rgb.attach(self.render_product)
self.bbox_2d_tight.attach(self.render_product)
self.instance_seg.attach(self.render_product)
self.kit.update()
def _find_usd_assets(self, root, categories, max_asset_size, split, train=True):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
references = {}
for category in categories:
all_assets = glob.glob(os.path.join(root, category, "*/*.usd"), recursive=True)
print(os.path.join(root, category, "*/*.usd"))
# Filter out large files (which can prevent OOM errors during training)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
print(f"{a} skipped as it exceeded the max size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(f"No USDs found for category {category} under max size {max_asset_size} MB.")
if train:
references[category] = assets_filtered[: int(num_assets * split)]
else:
references[category] = assets_filtered[int(num_assets * split) :]
return references
def _instantiate_category(self, category, references):
with self.rep.randomizer.instantiate(references, size=1, mode="reference"):
self.rep.modify.semantics([("class", category)])
self.rep.modify.pose(
position=self.rep.distribution.uniform(OBJ_LOC_MIN, OBJ_LOC_MAX),
rotation=self.rep.distribution.uniform((0, -180, 0), (0, 180, 0)),
scale=self.rep.distribution.uniform(SCALE_MIN, SCALE_MAX),
)
self.rep.randomizer.texture(self._get_textures(), project_uvw=True)
def setup_replicator(self):
"""Setup the replicator graph with various attributes."""
# Create two sphere lights
light1 = self.rep.create.light(light_type="sphere", position=(-450, 350, 350), scale=100, intensity=30000.0)
light2 = self.rep.create.light(light_type="sphere", position=(450, 350, 350), scale=100, intensity=30000.0)
with self.rep.new_layer():
with self.rep.trigger.on_frame():
# Randomize light colors
with self.rep.create.group([light1, light2]):
self.rep.modify.attribute("color", self.rep.distribution.uniform((0.1, 0.1, 0.1), (1.0, 1.0, 1.0)))
# Randomize camera position
with self.camera:
self.rep.modify.pose(
position=self.rep.distribution.uniform(CAM_LOC_MIN, CAM_LOC_MAX), look_at=(0, 0, 0)
)
# Randomize asset positions and textures
for category, references in self.references.items():
self._instantiate_category(category, references)
# Run replicator for a single iteration without triggering any writes
self.rep.orchestrator.preview()
def __iter__(self):
return self
def __next__(self):
# Step - trigger a randomization and a render
self.rep.orchestrator.step(rt_subframes=4)
# Collect Groundtruth
gt = {
"rgb": self.rgb.get_data(device="cuda"),
"boundingBox2DTight": self.bbox_2d_tight.get_data(device="cpu"),
"instanceSegmentation": self.instance_seg.get_data(device="cuda"),
}
# RGB
# Drop alpha channel
image = self.wp.to_torch(gt["rgb"])[..., :3]
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]["data"]
# Create mapping from categories to index
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist(), device="cuda")
id_to_labels = gt["boundingBox2DTight"]["info"]["idToLabels"]
prim_paths = gt["boundingBox2DTight"]["info"]["primPaths"]
# For each bounding box, map semantic label to label index
cat_to_id = {cat: i + 1 for i, cat in enumerate(self.categories)}
semantic_labels_mapping = {int(k): v.get("class", "") for k, v in id_to_labels.items()}
semantic_labels = [cat_to_id[semantic_labels_mapping[i]] for i in gt_bbox["semanticId"]]
labels = torch.tensor(semantic_labels, device="cuda")
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Identify invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
# Instance Segmentation
instance_data = self.wp.to_torch(gt["instanceSegmentation"]["data"].view(self.wp.int32)).squeeze()
path_to_instance_id = {v: int(k) for k, v in gt["instanceSegmentation"]["info"]["idToLabels"].items()}
instance_list = [im[0] for im in gt_bbox]
masks = torch.zeros((len(instance_list), *instance_data.shape), dtype=bool, device="cuda")
# Filter for the mask of each object
for i, prim_path in enumerate(prim_paths):
# Merge child instances of prim_path as one instance
for instance in path_to_instance_id:
if prim_path in instance:
masks[i] += torch.isin(instance_data, path_to_instance_id[instance])
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"masks": masks[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import struct
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("--categories", type=str, nargs="+", required=True, help="List of object classes to use")
parser.add_argument(
"--max_asset_size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument(
"--num_test_images", type=int, default=10, help="number of test images to generate when executing main"
)
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing USDs. If not specified, use {SHAPENET_LOCAL_DIR}_mat as root.",
)
args, unknown_args = parser.parse_known_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _mat suffix as root
if args.root is None:
if "SHAPENET_LOCAL_DIR" in os.environ:
shapenet_local_dir = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_mat"
if os.path.exists(shapenet_local_dir):
args.root = shapenet_local_dir
if args.root is None:
print(
"root argument not specified and SHAPENET_LOCAL_DIR environment variable was not set or the path did not exist"
)
exit()
dataset = RandomObjects(args.root, args.categories, max_asset_size=args.max_asset_size)
from omni.replicator.core import random_colours
categories = [LABEL_TO_SYNSET.get(c, c) for c in args.categories]
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
# Directory to save the example images to
out_dir = os.path.join(os.getcwd(), "_out_gen_imgs", "")
os.makedirs(out_dir, exist_ok=True)
image_num = 0
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
# Create random colors for each instance as rgb float lists
colours = random_colours(num_instances, num_channels=3)
colours = colours.astype(float) / 255.0
colours = colours.tolist()
overlay = np.zeros_like(np_image)
for mask, colour in zip(target["masks"].cpu().numpy(), colours):
overlay[mask, :3] = colour
axes[1].imshow(overlay)
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [SYNSET_TO_LABEL[mapping[label.item()]] for label in target["labels"]]
for bb, label, colour in zip(target["boxes"].tolist(), labels, colours):
maxint = 2 ** (struct.Struct("i").size * 8 - 1) - 1
# if a bbox is not visible, do not draw
if bb[0] != maxint and bb[1] != maxint:
x = bb[0]
y = bb[1]
w = bb[2] - x
h = bb[3] - y
box = plt.Rectangle((x, y), w, h, fill=False, edgecolor=colour)
ax.add_patch(box)
ax.text(bb[0], bb[1], label, fontdict={"family": "sans-serif", "color": colour, "size": 10})
plt.draw()
plt.pause(0.01)
fig_name = os.path.join(out_dir, f"domain_randomization_test_image_{image_num}.png")
plt.savefig(fig_name)
image_num += 1
if dataset.exiting or (image_num >= args.num_test_images):
break
# cleanup
dataset.close()
| 17,377 | Python | 39.226852 | 127 | 0.61449 |
2820207922/isaac_ws/standalone_examples/replicator/online_generation/train_shapenet.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Instance Segmentation Training Demonstration
Use a PyTorch dataloader together with OmniKit to generate scenes and groundtruth to
train a [Mask-RCNN](https://arxiv.org/abs/1703.06870) model.
"""
import os
import signal
import matplotlib.pyplot as plt
import numpy as np
from generate_shapenet import LABEL_TO_SYNSET, SYNSET_TO_LABEL, RandomObjects
def main(args):
device = "cuda"
train_set = RandomObjects(
args.root, args.categories, num_assets_min=3, num_assets_max=5, max_asset_size=args.max_asset_size
)
def handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
train_set.exiting = True
signal.signal(signal.SIGINT, handle_exit)
import struct
import torch
import torchvision
from omni.replicator.core import random_colours
from torch.utils.data import DataLoader
# Setup data
train_loader = DataLoader(train_set, batch_size=2, collate_fn=lambda x: tuple(zip(*x)))
# Setup Model
model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=None, num_classes=1 + len(args.categories))
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.visualize:
plt.ion()
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
# Directory to save the train images to
out_dir = os.path.join(os.getcwd(), "_out_train_imgs", "")
os.makedirs(out_dir, exist_ok=True)
for i, train_batch in enumerate(train_loader):
if i > args.max_iters or train_set.exiting:
print("Exiting ...")
train_set.close()
break
model.train()
images, targets = train_batch
images = [i.to(device) for i in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
loss = sum(loss for loss in loss_dict.values())
print(f"ITER {i} | {loss:.6f}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
model.eval()
with torch.no_grad():
predictions = model(images[:1])
if args.visualize:
idx = 0
score_thresh = 0.5
mask_thresh = 0.5
pred = predictions[idx]
np_image = images[idx].permute(1, 2, 0).cpu().numpy()
for ax in axes:
fig.suptitle(f"Iteration {i:05}", fontsize=14)
ax.cla()
ax.axis("off")
ax.imshow(np_image)
axes[0].set_title("Input")
axes[1].set_title("Input + Predictions")
score_filter = [i for i in range(len(pred["scores"])) if pred["scores"][i] > score_thresh]
num_instances = len(score_filter)
# Create random colors for each instance as rgb float lists
colours = random_colours(num_instances, num_channels=3)
colours = colours.astype(float) / 255.0
colours = colours.tolist()
overlay = np.zeros_like(np_image)
for mask, colour in zip(pred["masks"], colours):
overlay[mask.squeeze().cpu().numpy() > mask_thresh, :3] = colour
axes[1].imshow(overlay, alpha=0.5)
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
args.categories = [LABEL_TO_SYNSET.get(c, c) for c in args.categories]
mapping = {i + 1: cat for i, cat in enumerate(args.categories)}
labels = [SYNSET_TO_LABEL[mapping[label.item()]] for label in pred["labels"]]
for bb, label, colour in zip(pred["boxes"].cpu().numpy(), labels, colours):
maxint = 2 ** (struct.Struct("i").size * 8 - 1) - 1
# if a bbox is not visible, do not draw
if bb[0] != maxint and bb[1] != maxint:
x = bb[0]
y = bb[1]
w = bb[2] - x
h = bb[3] - y
box = plt.Rectangle((x, y), w, h, fill=False, edgecolor=colour)
ax.add_patch(box)
ax.text(bb[0], bb[1], label, fontdict={"family": "sans-serif", "color": colour, "size": 10})
plt.draw()
fig_name = os.path.join(out_dir, f"train_image_{i}.png")
plt.savefig(fig_name)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing ShapeNet USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
parser.add_argument(
"--categories", type=str, nargs="+", required=True, help="List of ShapeNet categories to use (space seperated)."
)
parser.add_argument(
"--max_asset_size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--max_iters", type=float, default=1000, help="Number of training iterations.")
parser.add_argument("--visualize", action="store_true", help="Visualize predicted masks during training.")
args, unknown_args = parser.parse_known_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
if "SHAPENET_LOCAL_DIR" in os.environ:
shapenet_local_dir = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_nomat"
if os.path.exists(shapenet_local_dir):
args.root = shapenet_local_dir
if args.root is None:
print(
"root argument not specified and SHAPENET_LOCAL_DIR environment variable was not set or the path did not exist"
)
exit()
main(args)
| 6,668 | Python | 37.549133 | 127 | 0.586083 |
2820207922/isaac_ws/standalone_examples/replicator/offline_generation/offline_generation_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import random
import numpy as np
import omni
import omni.replicator.core as rep
from omni.isaac.core import World
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils import prims
from omni.isaac.core.utils.bounds import compute_combined_aabb, compute_obb, create_bbox_cache, get_obb_corners
from omni.isaac.core.utils.rotations import euler_angles_to_quat, quat_to_euler_angles
from omni.isaac.core.utils.semantics import remove_all_semantics
from pxr import Gf
# Clear any previous semantic data in the stage
def remove_previous_semantics(stage, recursive: bool = False):
prims = stage.Traverse()
for prim in prims:
remove_all_semantics(prim, recursive)
# Run a simulation
def simulate_falling_objects(forklift_prim, assets_root_path, config, max_sim_steps=250, num_boxes=8):
# Create the isaac sim world to run any physics simulations
world = World(physics_dt=1.0 / 90.0, stage_units_in_meters=1.0)
# Set a random relative offset to the pallet using the forklift transform as a base frame
forklift_tf = omni.usd.get_world_transform_matrix(forklift_prim)
pallet_offset_tf = Gf.Matrix4d().SetTranslate(Gf.Vec3d(random.uniform(-1, 1), random.uniform(-4, -3.6), 0))
pallet_pos = (pallet_offset_tf * forklift_tf).ExtractTranslation()
# Spawn pallet prim at a relative random offset to the forklift
pallet_prim_name = "SimulatedPallet"
pallet_prim = prims.create_prim(
prim_path=f"/World/{pallet_prim_name}",
usd_path=assets_root_path + config["pallet"]["url"],
semantic_label=config["pallet"]["class"],
translation=pallet_pos,
orientation=euler_angles_to_quat([0, 0, random.uniform(0, math.pi)]),
)
# Wrap the pallet as a simulation ready rigid prim a
pallet_rigid_prim = RigidPrim(prim_path=str(pallet_prim.GetPrimPath()), name=pallet_prim_name)
# Enable physics and add to isaacsim world scene
pallet_rigid_prim.enable_rigid_body_physics()
world.scene.add(pallet_rigid_prim)
# Use the height of the pallet as a spawn base for the boxes
bb_cache = create_bbox_cache()
spawn_height = bb_cache.ComputeLocalBound(pallet_prim).GetRange().GetSize()[2] * 1.1
# Spawn boxes falling on the pallet
for i in range(num_boxes):
# Spawn box prim
cardbox_prim_name = f"SimulatedCardbox_{i}"
box_prim = prims.create_prim(
prim_path=f"/World/{cardbox_prim_name}",
usd_path=assets_root_path + config["cardbox"]["url"],
semantic_label=config["cardbox"]["class"],
)
# Get the next spawn height for the box
spawn_height += bb_cache.ComputeLocalBound(box_prim).GetRange().GetSize()[2] * 1.1
# Wrap the cardbox prim into a rigid prim to be able to simulate it
box_rigid_prim = RigidPrim(
prim_path=str(box_prim.GetPrimPath()),
name=cardbox_prim_name,
position=pallet_pos + Gf.Vec3d(random.uniform(-0.2, 0.2), random.uniform(-0.2, 0.2), spawn_height),
orientation=euler_angles_to_quat([0, 0, random.uniform(0, math.pi)]),
)
# Make sure physics are enabled on the rigid prim
box_rigid_prim.enable_rigid_body_physics()
# Register rigid prim with the scene
world.scene.add(box_rigid_prim)
# Reset the world to handle the physics of the newly created rigid prims
world.reset()
# Simulate the world for the given number of steps or until the highest box stops moving
last_box = world.scene.get_object(f"SimulatedCardbox_{num_boxes - 1}")
for i in range(max_sim_steps):
world.step(render=False)
if last_box and np.linalg.norm(last_box.get_linear_velocity()) < 0.001:
print(f"Falling objects simulation finished at step {i}..")
break
# Register the boxes and materials randomizer graph
def register_scatter_boxes(pallet_prim, assets_root_path, config):
# Calculate the bounds of the prim to create a scatter plane of its size
bb_cache = create_bbox_cache()
bbox3d_gf = bb_cache.ComputeLocalBound(pallet_prim)
prim_tf_gf = omni.usd.get_world_transform_matrix(pallet_prim)
# Calculate the bounds of the prim
bbox3d_gf.Transform(prim_tf_gf)
range_size = bbox3d_gf.GetRange().GetSize()
# Get the quaterion of the prim in xyzw format from usd
prim_quat_gf = prim_tf_gf.ExtractRotation().GetQuaternion()
prim_quat_xyzw = (prim_quat_gf.GetReal(), *prim_quat_gf.GetImaginary())
# Create a plane on the pallet to scatter the boxes on
plane_scale = (range_size[0] * 0.8, range_size[1] * 0.8, 1)
plane_pos_gf = prim_tf_gf.ExtractTranslation() + Gf.Vec3d(0, 0, range_size[2])
plane_rot_euler_deg = quat_to_euler_angles(np.array(prim_quat_xyzw), degrees=True)
scatter_plane = rep.create.plane(
scale=plane_scale, position=plane_pos_gf, rotation=plane_rot_euler_deg, visible=False
)
cardbox_mats = [
f"{assets_root_path}/Isaac/Environments/Simple_Warehouse/Materials/MI_PaperNotes_01.mdl",
f"{assets_root_path}/Isaac/Environments/Simple_Warehouse/Materials/MI_CardBoxB_05.mdl",
]
def scatter_boxes():
cardboxes = rep.create.from_usd(
assets_root_path + config["cardbox"]["url"], semantics=[("class", config["cardbox"]["class"])], count=5
)
with cardboxes:
rep.randomizer.scatter_2d(scatter_plane, check_for_collisions=True)
rep.randomizer.materials(cardbox_mats)
return cardboxes.node
rep.randomizer.register(scatter_boxes)
# Register the place cones randomizer graph
def register_cone_placement(forklift_prim, assets_root_path, config):
# Get the bottom corners of the oriented bounding box (OBB) of the forklift
bb_cache = create_bbox_cache()
centroid, axes, half_extent = compute_obb(bb_cache, forklift_prim.GetPrimPath())
larger_xy_extent = (half_extent[0] * 1.3, half_extent[1] * 1.3, half_extent[2])
obb_corners = get_obb_corners(centroid, axes, larger_xy_extent)
bottom_corners = [
obb_corners[0].tolist(),
obb_corners[2].tolist(),
obb_corners[4].tolist(),
obb_corners[6].tolist(),
]
# Orient the cone using the OBB (Oriented Bounding Box)
obb_quat = Gf.Matrix3d(axes).ExtractRotation().GetQuaternion()
obb_quat_xyzw = (obb_quat.GetReal(), *obb_quat.GetImaginary())
obb_euler = quat_to_euler_angles(np.array(obb_quat_xyzw), degrees=True)
def place_cones():
cones = rep.create.from_usd(
assets_root_path + config["cone"]["url"], semantics=[("class", config["cone"]["class"])]
)
with cones:
rep.modify.pose(position=rep.distribution.sequence(bottom_corners), rotation_z=obb_euler[2])
return cones.node
rep.randomizer.register(place_cones)
# Register light randomization graph
def register_lights_placement(forklift_prim, pallet_prim):
bb_cache = create_bbox_cache()
combined_range_arr = compute_combined_aabb(bb_cache, [forklift_prim.GetPrimPath(), pallet_prim.GetPrimPath()])
pos_min = (combined_range_arr[0], combined_range_arr[1], 6)
pos_max = (combined_range_arr[3], combined_range_arr[4], 7)
def randomize_lights():
lights = rep.create.light(
light_type="Sphere",
color=rep.distribution.uniform((0.2, 0.1, 0.1), (0.9, 0.8, 0.8)),
intensity=rep.distribution.uniform(500, 2000),
position=rep.distribution.uniform(pos_min, pos_max),
scale=rep.distribution.uniform(5, 10),
count=3,
)
return lights.node
rep.randomizer.register(randomize_lights)
| 8,124 | Python | 41.763158 | 115 | 0.679468 |
2820207922/isaac_ws/standalone_examples/replicator/offline_generation/offline_generation.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate offline synthetic dataset
"""
import argparse
import json
import math
import os
import random
import carb
import yaml
from omni.isaac.kit import SimulationApp
# Default config (will be updated/extended by any other passed config arguments)
config = {
"launch_config": {
"renderer": "RayTracedLighting",
"headless": False,
},
"resolution": [1024, 1024],
"rt_subframes": 2,
"num_frames": 20,
"env_url": "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd",
"scope_name": "/MyScope",
"writer": "BasicWriter",
"writer_config": {
"output_dir": "_out_offline_generation",
"rgb": True,
"bounding_box_2d_tight": True,
"semantic_segmentation": True,
"distance_to_image_plane": True,
"bounding_box_3d": True,
"occlusion": True,
},
"clear_previous_semantics": True,
"forklift": {
"url": "/Isaac/Props/Forklift/forklift.usd",
"class": "Forklift",
},
"cone": {
"url": "/Isaac/Environments/Simple_Warehouse/Props/S_TrafficCone.usd",
"class": "TrafficCone",
},
"pallet": {
"url": "/Isaac/Environments/Simple_Warehouse/Props/SM_PaletteA_01.usd",
"class": "Pallet",
},
"cardbox": {
"url": "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_04.usd",
"class": "Cardbox",
},
}
# Check if there are any config files (yaml or json) are passed as arguments
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=False, help="Include specific config parameters (json or yaml))")
args, unknown = parser.parse_known_args()
args_config = {}
if args.config and os.path.isfile(args.config):
print("File exist")
with open(args.config, "r") as f:
if args.config.endswith(".json"):
args_config = json.load(f)
elif args.config.endswith(".yaml"):
args_config = yaml.safe_load(f)
else:
carb.log_warn(f"File {args.config} is not json or yaml, will use default config")
else:
print(f"File {args.config} does not exist, will use default config")
carb.log_warn(f"File {args.config} does not exist, will use default config")
# If there are specific writer parameters in the input config file make sure they are not mixed with the default ones
if "writer_config" in args_config:
config["writer_config"].clear()
# Update the default config dictionay with any new parameters or values from the config file
config.update(args_config)
# Create the simulation app with the given launch_config
simulation_app = SimulationApp(launch_config=config["launch_config"])
import offline_generation_utils
# Late import of runtime modules (the SimulationApp needs to be created before loading the modules)
import omni.replicator.core as rep
import omni.usd
from omni.isaac.core.utils import prims
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.core.utils.stage import get_current_stage, open_stage
from pxr import Gf
# Get server path
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not get nucleus server path, closing application..")
simulation_app.close()
# Open the given environment in a new stage
print(f"Loading Stage {config['env_url']}")
if not open_stage(assets_root_path + config["env_url"]):
carb.log_error(f"Could not open stage{config['env_url']}, closing application..")
simulation_app.close()
# Disable capture on play and async rendering
carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
carb.settings.get_settings().set("/omni/replicator/asyncRendering", False)
carb.settings.get_settings().set("/app/asyncRendering", False)
if config["clear_previous_semantics"]:
stage = get_current_stage()
offline_generation_utils.remove_previous_semantics(stage)
# Spawn a new forklift at a random pose
forklift_prim = prims.create_prim(
prim_path="/World/Forklift",
position=(random.uniform(-20, -2), random.uniform(-1, 3), 0),
orientation=euler_angles_to_quat([0, 0, random.uniform(0, math.pi)]),
usd_path=assets_root_path + config["forklift"]["url"],
semantic_label=config["forklift"]["class"],
)
# Spawn the pallet in front of the forklift with a random offset on the Y (pallet's forward) axis
forklift_tf = omni.usd.get_world_transform_matrix(forklift_prim)
pallet_offset_tf = Gf.Matrix4d().SetTranslate(Gf.Vec3d(0, random.uniform(-1.2, -1.8), 0))
pallet_pos_gf = (pallet_offset_tf * forklift_tf).ExtractTranslation()
forklift_quat_gf = forklift_tf.ExtractRotationQuat()
forklift_quat_xyzw = (forklift_quat_gf.GetReal(), *forklift_quat_gf.GetImaginary())
pallet_prim = prims.create_prim(
prim_path="/World/Pallet",
position=pallet_pos_gf,
orientation=forklift_quat_xyzw,
usd_path=assets_root_path + config["pallet"]["url"],
semantic_label=config["pallet"]["class"],
)
# Register randomizers graphs
offline_generation_utils.register_scatter_boxes(pallet_prim, assets_root_path, config)
offline_generation_utils.register_cone_placement(forklift_prim, assets_root_path, config)
offline_generation_utils.register_lights_placement(forklift_prim, pallet_prim)
# Spawn a camera in the driver's location looking at the pallet
foklift_pos_gf = forklift_tf.ExtractTranslation()
driver_cam_pos_gf = foklift_pos_gf + Gf.Vec3d(0.0, 0.0, 1.9)
driver_cam = rep.create.camera(
focus_distance=400.0, focal_length=24.0, clipping_range=(0.1, 10000000.0), name="DriverCam"
)
# Camera looking at the pallet
pallet_cam = rep.create.camera(name="PalletCam")
# Camera looking at the forklift from a top view with large min clipping to see the scene through the ceiling
top_view_cam = rep.create.camera(clipping_range=(6.0, 1000000.0), name="TopCam")
# Generate graph nodes to be triggered every frame
with rep.trigger.on_frame():
rep.randomizer.scatter_boxes()
rep.randomizer.place_cones()
rep.randomizer.randomize_lights()
pallet_cam_min = (pallet_pos_gf[0] - 2, pallet_pos_gf[1] - 2, 2)
pallet_cam_max = (pallet_pos_gf[0] + 2, pallet_pos_gf[1] + 2, 4)
with pallet_cam:
rep.modify.pose(
position=rep.distribution.uniform(pallet_cam_min, pallet_cam_max),
look_at=str(pallet_prim.GetPrimPath()),
)
driver_cam_min = (driver_cam_pos_gf[0], driver_cam_pos_gf[1], driver_cam_pos_gf[2] - 0.25)
driver_cam_max = (driver_cam_pos_gf[0], driver_cam_pos_gf[1], driver_cam_pos_gf[2] + 0.25)
with driver_cam:
rep.modify.pose(
position=rep.distribution.uniform(driver_cam_min, driver_cam_max),
look_at=str(pallet_prim.GetPrimPath()),
)
# Generate graph nodes to be triggered only at the given interval
with rep.trigger.on_frame(interval=4):
top_view_cam_min = (foklift_pos_gf[0], foklift_pos_gf[1], 9)
top_view_cam_max = (foklift_pos_gf[0], foklift_pos_gf[1], 11)
with top_view_cam:
rep.modify.pose(
position=rep.distribution.uniform(top_view_cam_min, top_view_cam_max),
rotation=rep.distribution.uniform((0, -90, -30), (0, -90, 30)),
)
# If output directory is relative, set it relative to the current working directory
if config["writer_config"]["output_dir"] and not os.path.isabs(config["writer_config"]["output_dir"]):
config["writer_config"]["output_dir"] = os.path.join(os.getcwd(), config["writer_config"]["output_dir"])
print(f"Output directory={config['writer_config']['output_dir']}")
# Setup the writer
writer = rep.WriterRegistry.get(config["writer"])
writer.initialize(**config["writer_config"])
forklift_rp = rep.create.render_product(top_view_cam, config["resolution"], name="TopView")
driver_rp = rep.create.render_product(driver_cam, config["resolution"], name="DriverView")
pallet_rp = rep.create.render_product(pallet_cam, config["resolution"], name="PalletView")
writer.attach([forklift_rp, driver_rp, pallet_rp])
# Run a simulation before generating data
offline_generation_utils.simulate_falling_objects(forklift_prim, assets_root_path, config)
# Increase subframes if materials are not loaded on time or ghosting rendering artifacts appear on quickly moving objects,
# see: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/subframes_examples.html
if config["rt_subframes"] > 1:
rep.settings.carb_settings("/omni/replicator/RTSubframes", config["rt_subframes"])
else:
carb.log_warn("RTSubframes is set to 1, consider increasing it if materials are not loaded on time")
# Run the SDG
rep.orchestrator.run_until_complete(num_frames=config["num_frames"])
simulation_app.close()
| 9,138 | Python | 39.799107 | 122 | 0.706281 |
2820207922/isaac_ws/standalone_examples/replicator/offline_generation/config/config_kitti_writer.yaml | launch_config:
renderer: RayTracedLighting
headless: true
resolution: [512, 512]
num_frames: 5
clear_previous_semantics: false
writer: KittiWriter
writer_config:
output_dir: _out_kitti
colorize_instance_segmentation: true | 229 | YAML | 21.999998 | 38 | 0.786026 |
2820207922/isaac_ws/standalone_examples/replicator/offline_generation/config/config_basic_writer.yaml | launch_config:
renderer: RayTracedLighting
headless: false
resolution: [512, 512]
env_url: "/Isaac/Environments/Grid/default_environment.usd"
rt_subframes: 32
writer: BasicWriter
writer_config:
output_dir: _out_basicwriter
rgb: true | 240 | YAML | 23.099998 | 59 | 0.775 |
2820207922/isaac_ws/standalone_examples/replicator/augmentation/annotator_augmentation.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate augmented synthetic data from annotators
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp(launch_config={"headless": False})
import argparse
import os
import time
import carb.settings
import numpy as np
import omni.replicator.core as rep
import warp as wp
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import open_stage
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument("--num_frames", type=int, default=25, help="The number of frames to capture")
parser.add_argument("--use_warp", action="store_true", help="Use warp augmentations instead of numpy")
args, unknown = parser.parse_known_args()
NUM_FRAMES = args.num_frames
USE_WARP = args.use_warp
ENV_URL = "/Isaac/Environments/Grid/default_environment.usd"
# Enable scripts
carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True)
# Illustrative augmentation switching red and blue channels in rgb data using numpy (CPU) and warp (GPU)
def rgb_to_bgr_np(data_in):
data_in[:, :, [0, 2]] = data_in[:, :, [2, 0]]
return data_in
@wp.kernel
def rgb_to_bgr_wp(data_in: wp.array3d(dtype=wp.uint8), data_out: wp.array3d(dtype=wp.uint8)):
i, j = wp.tid()
data_out[i, j, 0] = data_in[i, j, 2]
data_out[i, j, 1] = data_in[i, j, 1]
data_out[i, j, 2] = data_in[i, j, 0]
data_out[i, j, 3] = data_in[i, j, 3]
# Gaussian noise augmentation on depth data in numpy (CPU) and warp (GPU)
def gaussian_noise_depth_np(data_in, sigma: float, seed: int):
np.random.seed(seed)
return data_in + np.random.randn(*data_in.shape) * sigma
rep.AnnotatorRegistry.register_augmentation(
"gn_depth_np", rep.annotators.Augmentation.from_function(gaussian_noise_depth_np, sigma=0.1, seed=None)
)
@wp.kernel
def gaussian_noise_depth_wp(
data_in: wp.array2d(dtype=wp.float32), data_out: wp.array2d(dtype=wp.float32), sigma: float, seed: int
):
i, j = wp.tid()
state = wp.rand_init(seed, wp.tid())
data_out[i, j] = data_in[i, j] + sigma * wp.randn(state)
rep.AnnotatorRegistry.register_augmentation(
"gn_depth_wp", rep.annotators.Augmentation.from_function(gaussian_noise_depth_wp, sigma=0.1, seed=None)
)
# Helper functions for writing images from annotator data
def write_rgb(data, path):
rgb_img = Image.fromarray(data, mode="RGBA")
rgb_img.save(path + ".png")
def write_depth(data, path):
# Convert to numpy (if warp), normalize, handle any nan values, and convert to from float32 to 8-bit int array
if isinstance(data, wp.array):
data = data.numpy()
# Replace any -inf and inf values with nan, then calculate the mean value and replace nan with the mean
data[np.isinf(data)] = np.nan
data = np.nan_to_num(data, nan=np.nanmean(data), copy=False)
normalized_array = (data - np.min(data)) / (np.max(data) - np.min(data))
integer_array = (normalized_array * 255).astype(np.uint8)
depth_img = Image.fromarray(integer_array, mode="L")
depth_img.save(path + ".png")
# Setup the environment
assets_root_path = get_assets_root_path()
open_stage(assets_root_path + ENV_URL)
# Disable capture on play and async rendering
carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
carb.settings.get_settings().set("/omni/replicator/asyncRendering", False)
carb.settings.get_settings().set("/app/asyncRendering", False)
# Create a red cube and a render product from a camera looking at the cube from the top
red_mat = rep.create.material_omnipbr(diffuse=(1, 0, 0))
red_cube = rep.create.cube(position=(0, 0, 0.71), material=red_mat)
cam = rep.create.camera(position=(0, 0, 5), look_at=(0, 0, 0))
rp = rep.create.render_product(cam, (512, 512))
# Update the app a couple of times to fully load texture/materials
for _ in range(5):
simulation_app.update()
# Get the local augmentations, either from function or from the registry
rgb_to_bgr_augm = None
gn_depth_augm = None
if USE_WARP:
rgb_to_bgr_augm = rep.annotators.Augmentation.from_function(rgb_to_bgr_wp)
gn_depth_augm = rep.AnnotatorRegistry.get_augmentation("gn_depth_wp")
else:
rgb_to_bgr_augm = rep.annotators.Augmentation.from_function(rgb_to_bgr_np)
gn_depth_augm = rep.AnnotatorRegistry.get_augmentation("gn_depth_np")
# Output directories
out_dir = os.path.join(os.getcwd(), "_out_augm_annot")
print(f"Writing data to: {out_dir}")
os.makedirs(out_dir, exist_ok=True)
# Register the annotator together with its augmentation
rep.annotators.register(
name="rgb_to_bgr_augm",
annotator=rep.annotators.augment(
source_annotator=rep.AnnotatorRegistry.get_annotator("rgb"),
augmentation=rgb_to_bgr_augm,
),
)
rgb_to_bgr_annot = rep.AnnotatorRegistry.get_annotator("rgb_to_bgr_augm")
depth_annot_1 = rep.AnnotatorRegistry.get_annotator("distance_to_camera")
depth_annot_1.augment(gn_depth_augm)
depth_annot_2 = rep.AnnotatorRegistry.get_annotator("distance_to_camera")
depth_annot_2.augment(gn_depth_augm, sigma=0.5)
rgb_to_bgr_annot.attach(rp)
depth_annot_1.attach(rp)
depth_annot_2.attach(rp)
# Generate a replicator graph to rotate the cube every capture frame
with rep.trigger.on_frame():
with red_cube:
rep.randomizer.rotation()
# Evaluate the graph
rep.orchestrator.preview()
# Measure the duration of capturing the data
start_time = time.time()
# The `step()` function will trigger the randomization graph, feed annotators with new data, and trigger the writers
for i in range(NUM_FRAMES):
rep.orchestrator.step()
rgb_data = rgb_to_bgr_annot.get_data()
depth_data_1 = depth_annot_1.get_data()
depth_data_2 = depth_annot_2.get_data()
write_rgb(rgb_data, os.path.join(out_dir, f"annot_rgb_{i}"))
write_depth(depth_data_1, os.path.join(out_dir, f"annot_depth_1_{i}"))
write_depth(depth_data_2, os.path.join(out_dir, f"annot_depth_2_{i}"))
print(
f"The duration for capturing {NUM_FRAMES} frames using '{'warp' if USE_WARP else 'numpy'}' was: {time.time() - start_time:.4f} seconds, with an average of {(time.time() - start_time) / NUM_FRAMES:.4f} seconds per frame."
)
simulation_app.close()
| 6,579 | Python | 35.759776 | 224 | 0.716978 |
2820207922/isaac_ws/standalone_examples/replicator/augmentation/writer_augmentation.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate augmented synthetic from a writer
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp(launch_config={"headless": False})
import argparse
import os
import time
import carb.settings
import numpy as np
import omni.replicator.core as rep
import warp as wp
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import open_stage
parser = argparse.ArgumentParser()
parser.add_argument("--num_frames", type=int, default=25, help="The number of frames to capture")
parser.add_argument("--use_warp", action="store_true", help="Use warp augmentations instead of numpy")
args, unknown = parser.parse_known_args()
NUM_FRAMES = args.num_frames
USE_WARP = args.use_warp
ENV_URL = "/Isaac/Environments/Grid/default_environment.usd"
# Enable scripts
carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True)
# Gaussian noise augmentation on rgba data in numpy (CPU) and warp (GPU)
def gaussian_noise_rgb_np(data_in, sigma: float, seed: int):
np.random.seed(seed)
data_in[:, :, 0] = data_in[:, :, 0] + np.random.randn(*data_in.shape[:-1]) * sigma
data_in[:, :, 1] = data_in[:, :, 1] + np.random.randn(*data_in.shape[:-1]) * sigma
data_in[:, :, 2] = data_in[:, :, 2] + np.random.randn(*data_in.shape[:-1]) * sigma
return data_in
@wp.kernel
def gaussian_noise_rgb_wp(
data_in: wp.array3d(dtype=wp.uint8), data_out: wp.array3d(dtype=wp.uint8), sigma: float, seed: int
):
i, j = wp.tid()
state = wp.rand_init(seed, wp.tid())
data_out[i, j, 0] = wp.uint8(wp.int32(data_in[i, j, 0]) + wp.int32(sigma * wp.randn(state)))
data_out[i, j, 1] = wp.uint8(wp.int32(data_in[i, j, 1]) + wp.int32(sigma * wp.randn(state)))
data_out[i, j, 2] = wp.uint8(wp.int32(data_in[i, j, 2]) + wp.int32(sigma * wp.randn(state)))
data_out[i, j, 3] = data_in[i, j, 3]
# Gaussian noise augmentation on depth data in numpy (CPU) and warp (GPU)
def gaussian_noise_depth_np(data_in, sigma: float, seed: int):
np.random.seed(seed)
return data_in + np.random.randn(*data_in.shape) * sigma
rep.AnnotatorRegistry.register_augmentation(
"gn_depth_np", rep.annotators.Augmentation.from_function(gaussian_noise_depth_np, sigma=0.1, seed=None)
)
@wp.kernel
def gaussian_noise_depth_wp(
data_in: wp.array2d(dtype=wp.float32), data_out: wp.array2d(dtype=wp.float32), sigma: float, seed: int
):
i, j = wp.tid()
state = wp.rand_init(seed, wp.tid())
data_out[i, j] = data_in[i, j] + sigma * wp.randn(state)
rep.AnnotatorRegistry.register_augmentation(
"gn_depth_wp", rep.annotators.Augmentation.from_function(gaussian_noise_depth_wp, sigma=0.1, seed=None)
)
# Setup the environment
assets_root_path = get_assets_root_path()
open_stage(assets_root_path + ENV_URL)
# Disable capture on play and async rendering
carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
carb.settings.get_settings().set("/omni/replicator/asyncRendering", False)
carb.settings.get_settings().set("/app/asyncRendering", False)
# Create a red cube and a render product from a camera looking at the cube from the top
red_mat = rep.create.material_omnipbr(diffuse=(1, 0, 0))
red_cube = rep.create.cube(position=(0, 0, 0.71), material=red_mat)
cam = rep.create.camera(position=(0, 0, 5), look_at=(0, 0, 0))
rp = rep.create.render_product(cam, (512, 512))
# Update the app a couple of times to fully load texture/materials
for _ in range(5):
simulation_app.update()
# Access default annotators from replicator
rgb_to_hsv_augm = rep.annotators.Augmentation.from_function(rep.augmentations_default.aug_rgb_to_hsv)
hsv_to_rgb_augm = rep.annotators.Augmentation.from_function(rep.augmentations_default.aug_hsv_to_rgb)
# Access the custom annotators as functions or from the registry
gn_rgb_augm = None
gn_depth_augm = None
if USE_WARP:
gn_rgb_augm = rep.annotators.Augmentation.from_function(gaussian_noise_rgb_wp, sigma=6.0, seed=None)
gn_depth_augm = rep.AnnotatorRegistry.get_augmentation("gn_depth_wp")
else:
gn_rgb_augm = rep.annotators.Augmentation.from_function(gaussian_noise_rgb_np, sigma=6.0, seed=None)
gn_depth_augm = rep.AnnotatorRegistry.get_augmentation("gn_depth_np")
# Create a writer and apply the augmentations to its corresponding annotators
out_dir = os.path.join(os.getcwd(), "_out_augm_writer")
print(f"Writing data to: {out_dir}")
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=out_dir, rgb=True, distance_to_camera=True)
augmented_rgb_annot = rep.annotators.get("rgb").augment_compose(
[rgb_to_hsv_augm, gn_rgb_augm, hsv_to_rgb_augm], name="rgb"
)
writer.add_annotator(augmented_rgb_annot)
writer.augment_annotator("distance_to_camera", gn_depth_augm)
# Attach render product to writer
writer.attach([rp])
# Generate a replicator graph randomizing the cube's rotation every frame
with rep.trigger.on_frame():
with red_cube:
rep.randomizer.rotation()
# Evaluate the graph
rep.orchestrator.preview()
# Measure the duration of capturing the data
start_time = time.time()
# The `step()` function will trigger the randomization graph, feed annotators with new data, and trigger the writers
for i in range(NUM_FRAMES):
rep.orchestrator.step()
print(
f"The duration for capturing {NUM_FRAMES} frames using '{'warp' if USE_WARP else 'numpy'}' was: {time.time() - start_time:.4f} seconds, with an average of {(time.time() - start_time) / NUM_FRAMES:.4f} seconds per frame."
)
simulation_app.close()
| 5,949 | Python | 38.144737 | 224 | 0.722642 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/offline_pose_generation.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Generate a [YCBVideo, DOPE] synthetic datasets
"""
import argparse
import os
import signal
import carb
import numpy as np
import torch
import yaml
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("Pose Generation data generator")
parser.add_argument("--num_mesh", type=int, default=30, help="Number of frames to record similar to MESH dataset")
parser.add_argument("--num_dome", type=int, default=30, help="Number of frames to record similar to DOME dataset")
parser.add_argument(
"--dome_interval",
type=int,
default=1,
help="Number of frames to capture before switching DOME background. When generating large datasets, increasing this interval will reduce time taken. A good value to set is 10.",
)
parser.add_argument("--output_folder", "-o", type=str, default="output", help="Output directory.")
parser.add_argument("--use_s3", action="store_true", help="Saves output to s3 bucket. Only supported by DOPE writer.")
parser.add_argument(
"--bucket",
type=str,
default=None,
help="Bucket name to store output in. See naming rules: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html",
)
parser.add_argument("--s3_region", type=str, default="us-east-1", help="s3 region.")
parser.add_argument("--endpoint", type=str, default=None, help="s3 endpoint to write to.")
parser.add_argument(
"--writer",
type=str,
default="YCBVideo",
help="Which writer to use to output data. Choose between: [YCBVideo, DOPE]",
)
parser.add_argument(
"--test",
action="store_true",
help="Generates data for testing. Hardcodes the pose of the object to compare output data with expected data to ensure that generation is correct.",
)
args, unknown_args = parser.parse_known_args()
# Do not write to s3 if in test mode
if args.test:
args.use_s3 = False
if args.use_s3 and (args.endpoint is None or args.bucket is None):
raise Exception("To use s3, --endpoint and --bucket must be specified.")
CONFIG_FILES = {"dope": "config/dope_config.yaml", "ycbvideo": "config/ycb_config.yaml"}
TEST_CONFIG_FILES = {"dope": "tests/dope/test_dope_config.yaml", "ycbvideo": "tests/ycbvideo/test_ycb_config.yaml"}
# Path to config file:
cf_map = TEST_CONFIG_FILES if args.test else CONFIG_FILES
CONFIG_FILE = cf_map[args.writer.lower()]
CONFIG_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), CONFIG_FILE)
with open(CONFIG_FILE_PATH) as f:
config_data = yaml.full_load(f)
OBJECTS_TO_GENERATE = config_data["OBJECTS_TO_GENERATE"]
kit = SimulationApp(launch_config=config_data["CONFIG"])
import math
import omni.replicator.core as rep
from omni.isaac.core import World
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.core.utils.semantics import add_update_semantics
from omni.replicator.isaac.scripts.writers import DOPEWriter, YCBVideoWriter
# Since the simulation is mostly collision checking, a larger physics dt can be used to speed up the object movements
world = World(physics_dt=1.0 / 30.0)
world.reset()
from flying_distractors.collision_box import CollisionBox
from flying_distractors.dynamic_object import DynamicObject
from flying_distractors.dynamic_object_set import DynamicObjectSet
from flying_distractors.dynamic_shape_set import DynamicShapeSet
from flying_distractors.flying_distractors import FlyingDistractors
from omni.isaac.core.utils.random import get_random_world_pose_in_view
from omni.isaac.core.utils.transformations import get_world_pose_from_relative
from tests.test_utils import clean_output_dir, run_pose_generation_test
class RandomScenario(torch.utils.data.IterableDataset):
def __init__(
self,
num_mesh,
num_dome,
dome_interval,
output_folder,
use_s3=False,
endpoint="",
s3_region="us-east-1",
writer="ycbvideo",
bucket="",
test=False,
):
self.test = test
if writer == "ycbvideo":
self.writer_helper = YCBVideoWriter
elif writer == "dope":
self.writer_helper = DOPEWriter
else:
raise Exception("Invalid writer specified. Choose between [DOPE, YCBVideo].")
self.result = True
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self.result = False
return
self.dome_texture_path = assets_root_path + "/NVIDIA/Assets/Skies/"
self.ycb_asset_path = assets_root_path + "/Isaac/Props/YCB/Axis_Aligned/"
self.asset_path = assets_root_path + "/Isaac/Props/YCB/Axis_Aligned/"
self.train_parts = []
self.train_part_mesh_path_to_prim_path_map = {}
self.mesh_distractors = FlyingDistractors()
self.dome_distractors = FlyingDistractors()
self.current_distractors = None
self.data_writer = None
self.num_mesh = max(0, num_mesh) if not self.test else 5
self.num_dome = max(0, num_dome) if not self.test else 0
self.train_size = self.num_mesh + self.num_dome
self.dome_interval = dome_interval
self._output_folder = output_folder if use_s3 else os.path.join(os.getcwd(), output_folder)
self.use_s3 = use_s3
self.endpoint = endpoint
self.s3_region = s3_region
self.bucket = bucket
self.writer_config = {
"output_folder": self._output_folder,
"use_s3": self.use_s3,
"bucket_name": self.bucket,
"endpoint_url": self.endpoint,
"s3_region": self.s3_region,
"train_size": self.train_size,
}
self._setup_world()
self.cur_idx = 0
self.exiting = False
self.last_frame_reached = False
# Clean up output folder ahead of test
if not self.use_s3 and self.test:
clean_output_dir(self._output_folder)
# Disable capture on play and async rendering
self._carb_settings = carb.settings.get_settings()
self._carb_settings.set("/omni/replicator/captureOnPlay", False)
self._carb_settings.set("/app/asyncRendering", False)
self._carb_settings.set("/omni/replicator/asyncRendering", False)
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("Exiting dataset generation..")
self.exiting = True
def _setup_world(self):
"""Populate scene with assets and prepare for synthetic data generation."""
self._setup_camera()
rep.settings.set_render_rtx_realtime()
# Allow flying distractors to float
world.get_physics_context().set_gravity(0.0)
collision_box = self._setup_collision_box()
world.scene.add(collision_box)
self._setup_distractors(collision_box)
self._setup_train_objects()
if not self.test:
self._setup_randomizers()
# Update the app a few times to make sure the materials are fully loaded and world scene objects are registered
for _ in range(5):
kit.app.update()
# Setup writer
self.writer_helper.register_pose_annotator(config_data=config_data)
self.writer = self.writer_helper.setup_writer(config_data=config_data, writer_config=self.writer_config)
self.writer.attach([self.render_product])
self.dome_distractors.set_visible(False)
# Generate the replicator graphs without triggering any writing
rep.orchestrator.preview()
def _setup_camera(self):
focal_length = config_data["HORIZONTAL_APERTURE"] * config_data["F_X"] / config_data["WIDTH"]
# Setup camera and render product
self.camera = rep.create.camera(
position=(0, 0, 0),
rotation=np.array(config_data["CAMERA_RIG_ROTATION"]),
focal_length=focal_length,
clipping_range=(0.01, 10000),
)
self.render_product = rep.create.render_product(self.camera, (config_data["WIDTH"], config_data["HEIGHT"]))
camera_rig_path = str(rep.utils.get_node_targets(self.camera.node, "inputs:primsIn")[0])
self.camera_path = camera_rig_path + "/Camera"
with rep.get.prims(prim_types=["Camera"]):
rep.modify.pose(
rotation=rep.distribution.uniform(
np.array(config_data["CAMERA_ROTATION"]), np.array(config_data["CAMERA_ROTATION"])
)
)
self.rig = XFormPrim(prim_path=camera_rig_path)
def _setup_collision_box(self):
# Create a collision box in view of the camera, allowing distractors placed in the box to be within
# [MIN_DISTANCE, MAX_DISTANCE] of the camera. The collision box will be placed in front of the camera,
# regardless of CAMERA_ROTATION or CAMERA_RIG_ROTATION.
self.fov_x = 2 * math.atan(config_data["WIDTH"] / (2 * config_data["F_X"]))
self.fov_y = 2 * math.atan(config_data["HEIGHT"] / (2 * config_data["F_Y"]))
theta_x = self.fov_x / 2.0
theta_y = self.fov_y / 2.0
# Collision box dimensions lower than 1.3 do not work properly
collision_box_width = max(2 * config_data["MAX_DISTANCE"] * math.tan(theta_x), 1.3)
collision_box_height = max(2 * config_data["MAX_DISTANCE"] * math.tan(theta_y), 1.3)
collision_box_depth = config_data["MAX_DISTANCE"] - config_data["MIN_DISTANCE"]
collision_box_path = "/World/collision_box"
collision_box_name = "collision_box"
# Collision box is centered between MIN_DISTANCE and MAX_DISTANCE, with translation relative to camera in the z
# direction being negative due to cameras in Isaac Sim having coordinates of -z out, +y up, and +x right.
collision_box_translation_from_camera = np.array(
[0, 0, (config_data["MIN_DISTANCE"] + config_data["MAX_DISTANCE"]) / 2.0]
)
# Collision box has no rotation with respect to the camera.
collision_box_rotation_from_camera = np.array([0, 0, 0])
collision_box_orientation_from_camera = euler_angles_to_quat(collision_box_rotation_from_camera, degrees=True)
# Get the desired pose of the collision box from a pose defined locally with respect to the camera.
camera_prim = world.stage.GetPrimAtPath(self.camera_path)
collision_box_center, collision_box_orientation = get_world_pose_from_relative(
camera_prim, collision_box_translation_from_camera, collision_box_orientation_from_camera
)
return CollisionBox(
collision_box_path,
collision_box_name,
position=collision_box_center,
orientation=collision_box_orientation,
width=collision_box_width,
height=collision_box_height,
depth=collision_box_depth,
)
def _setup_distractors(self, collision_box):
# List of distractor objects should not contain objects that are being used for training
train_objects = [object["part_name"] for object in OBJECTS_TO_GENERATE]
distractor_mesh_filenames = [
file_name for file_name in config_data["MESH_FILENAMES"] if file_name not in train_objects
]
usd_path_list = [
f"{self.ycb_asset_path}{usd_filename_prefix}.usd" for usd_filename_prefix in distractor_mesh_filenames
]
mesh_list = [f"_{usd_filename_prefix[1:]}" for usd_filename_prefix in distractor_mesh_filenames]
if self.num_mesh > 0:
# Distractors for the MESH dataset
mesh_shape_set = DynamicShapeSet(
"/World/mesh_shape_set",
"mesh_shape_set",
"mesh_shape",
"mesh_shape",
config_data["NUM_MESH_SHAPES"],
collision_box,
scale=np.array(config_data["SHAPE_SCALE"]),
mass=config_data["SHAPE_MASS"],
fraction_glass=config_data["MESH_FRACTION_GLASS"],
)
self.mesh_distractors.add(mesh_shape_set)
mesh_object_set = DynamicObjectSet(
"/World/mesh_object_set",
"mesh_object_set",
usd_path_list,
mesh_list,
"mesh_object",
"mesh_object",
config_data["NUM_MESH_OBJECTS"],
collision_box,
scale=np.array(config_data["OBJECT_SCALE"]),
mass=config_data["OBJECT_MASS"],
fraction_glass=config_data["MESH_FRACTION_GLASS"],
)
self.mesh_distractors.add(mesh_object_set)
# Set the current distractors to the mesh dataset type
self.current_distractors = self.mesh_distractors
if self.num_dome > 0:
# Distractors for the DOME dataset
dome_shape_set = DynamicShapeSet(
"/World/dome_shape_set",
"dome_shape_set",
"dome_shape",
"dome_shape",
config_data["NUM_DOME_SHAPES"],
collision_box,
scale=np.array(config_data["SHAPE_SCALE"]),
mass=config_data["SHAPE_MASS"],
fraction_glass=config_data["DOME_FRACTION_GLASS"],
)
self.dome_distractors.add(dome_shape_set)
dome_object_set = DynamicObjectSet(
"/World/dome_object_set",
"dome_object_set",
usd_path_list,
mesh_list,
"dome_object",
"dome_object",
config_data["NUM_DOME_OBJECTS"],
collision_box,
scale=np.array(config_data["OBJECT_SCALE"]),
mass=config_data["OBJECT_MASS"],
fraction_glass=config_data["DOME_FRACTION_GLASS"],
)
self.dome_distractors.add(dome_object_set)
def _setup_train_objects(self):
# Add the part to train the network on
train_part_idx = 0
for object in OBJECTS_TO_GENERATE:
for prim_idx in range(object["num"]):
part_name = object["part_name"]
ref_path = self.asset_path + part_name + ".usd"
prim_type = object["prim_type"]
if self.writer_helper == YCBVideoWriter and prim_type not in config_data["CLASS_NAME_TO_INDEX"]:
raise Exception(f"Train object {prim_type} is not in CLASS_NAME_TO_INDEX in config.yaml.")
path = "/World/" + prim_type + f"_{prim_idx}"
mesh_path = path + "/" + prim_type
name = f"train_part_{train_part_idx}"
self.train_part_mesh_path_to_prim_path_map[mesh_path] = path
train_part = DynamicObject(
usd_path=ref_path,
prim_path=path,
mesh_path=mesh_path,
name=name,
position=np.array([0.0, 0.0, 0.0]),
scale=config_data["OBJECT_SCALE"],
mass=1.0,
)
train_part.prim.GetAttribute("physics:rigidBodyEnabled").Set(True)
self.train_parts.append(train_part)
# Add semantic information
mesh_prim = world.stage.GetPrimAtPath(mesh_path)
add_update_semantics(mesh_prim, prim_type)
train_part_idx += 1
if prim_idx == 0 and self.writer_helper == YCBVideoWriter:
# Save the vertices of the part in '.xyz' format. This will be used in one of PoseCNN's loss functions
coord_prim = world.stage.GetPrimAtPath(path)
self.writer_helper.save_mesh_vertices(mesh_prim, coord_prim, prim_type, self._output_folder)
def _setup_randomizers(self):
"""Add domain randomization with Replicator Randomizers"""
# Create and randomize sphere lights
def randomize_sphere_lights():
lights = rep.create.light(
light_type="Sphere",
color=rep.distribution.uniform((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),
intensity=rep.distribution.uniform(100000, 3000000),
position=rep.distribution.uniform((-250, -250, -250), (250, 250, 100)),
scale=rep.distribution.uniform(1, 20),
count=config_data["NUM_LIGHTS"],
)
return lights.node
# Randomize prim colors
def randomize_colors(prim_path_regex):
prims = rep.get.prims(path_pattern=prim_path_regex)
mats = rep.create.material_omnipbr(
metallic=rep.distribution.uniform(0.0, 1.0),
roughness=rep.distribution.uniform(0.0, 1.0),
diffuse=rep.distribution.uniform((0, 0, 0), (1, 1, 1)),
count=100,
)
with prims:
rep.randomizer.materials(mats)
return prims.node
rep.randomizer.register(randomize_sphere_lights, override=True)
rep.randomizer.register(randomize_colors, override=True)
with rep.trigger.on_frame():
rep.randomizer.randomize_sphere_lights()
rep.randomizer.randomize_colors("(?=.*shape)(?=.*nonglass).*")
def _setup_dome_randomizers(self):
"""Add domain randomization with Replicator Randomizers"""
# Create and randomize a dome light for the DOME dataset
def randomize_domelight(texture_paths):
lights = rep.create.light(
light_type="Dome",
rotation=rep.distribution.uniform((0, 0, 0), (360, 360, 360)),
texture=rep.distribution.choice(texture_paths),
)
return lights.node
rep.randomizer.register(randomize_domelight, override=True)
dome_texture_paths = [
self.dome_texture_path + dome_texture + ".hdr" for dome_texture in config_data["DOME_TEXTURES"]
]
with rep.trigger.on_frame(interval=self.dome_interval):
rep.randomizer.randomize_domelight(dome_texture_paths)
def randomize_movement_in_view(self, prim):
"""Randomly move and rotate prim such that it stays in view of camera.
Args:
prim (DynamicObject): prim to randomly move and rotate.
"""
if not self.test:
camera_prim = world.stage.GetPrimAtPath(self.camera_path)
rig_prim = world.stage.GetPrimAtPath(self.rig.prim_path)
translation, orientation = get_random_world_pose_in_view(
camera_prim,
config_data["MIN_DISTANCE"],
config_data["MAX_DISTANCE"],
self.fov_x,
self.fov_y,
config_data["FRACTION_TO_SCREEN_EDGE"],
rig_prim,
np.array(config_data["MIN_ROTATION_RANGE"]),
np.array(config_data["MAX_ROTATION_RANGE"]),
)
else:
translation, orientation = np.array([0.0, 0.0, 1.0]), np.array([0.0, 0.0, 0.0, 1.0])
prim.set_world_pose(translation, orientation)
def __iter__(self):
return self
def __next__(self):
# First frame of DOME dataset
if self.cur_idx == self.num_mesh: # MESH datset generation complete, switch to DOME dataset
print(f"Starting DOME dataset generation of {self.num_dome} frames..")
# Hide the FlyingDistractors used for the MESH dataset
self.mesh_distractors.set_visible(False)
# Show the FlyingDistractors used for the DOME dataset
self.dome_distractors.set_visible(True)
# Switch the distractors to DOME
self.current_distractors = self.dome_distractors
# Randomize the dome backgrounds
self._setup_dome_randomizers()
# Run another preview to generate the replicator graphs for the DOME dataset without triggering any writing
rep.orchestrator.preview()
# Randomize the distractors by applying forces to them and changing their materials
self.current_distractors.apply_force_to_assets(config_data["FORCE_RANGE"])
self.current_distractors.randomize_asset_glass_color()
# Randomize the pose of the object(s) of interest in the camera view
for train_part in self.train_parts:
self.randomize_movement_in_view(train_part)
# Simulate the applied forces for a couple of frames
for _ in range(50):
world.step(render=False)
print(f"ID: {self.cur_idx}/{self.train_size - 1}")
rep.orchestrator.step(rt_subframes=4)
# Check that there was valid training data in the last frame (target object(s) visible to camera)
if self.writer.is_last_frame_valid():
self.cur_idx += 1
# Check if last frame has been reached
if self.cur_idx >= self.train_size:
print(f"Dataset of size {self.train_size} has been reached, generation loop will be stopped..")
print(f"Data outputted to: {self._output_folder}")
self.last_frame_reached = True
dataset = RandomScenario(
num_mesh=args.num_mesh,
num_dome=args.num_dome,
dome_interval=args.dome_interval,
output_folder=args.output_folder,
use_s3=args.use_s3,
bucket=args.bucket,
s3_region=args.s3_region,
endpoint=args.endpoint,
writer=args.writer.lower(),
test=args.test,
)
if dataset.result:
# Iterate through dataset and visualize the output
print("Loading materials. Will generate data soon...")
import datetime
start_time = datetime.datetime.now()
print("Start timestamp:", start_time.strftime("%m/%d/%Y, %H:%M:%S"))
if dataset.train_size > 0:
print(f"Starting dataset generation of {dataset.train_size} frames..")
if dataset.num_mesh > 0:
print(f"Starting MESH dataset generation of {dataset.num_mesh} frames..")
# Dataset generation loop
for _ in dataset:
if dataset.last_frame_reached:
print(f"Stopping generation loop at index {dataset.cur_idx}..")
break
if dataset.exiting:
break
else:
print(f"Dataset size is set to 0 (num_mesh={dataset.num_mesh} num_dope={dataset.num_dome}), nothing to write..")
print("End timestamp:", datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
print("Total time taken:", str(datetime.datetime.now() - start_time).split(".")[0])
if args.test:
run_pose_generation_test(
writer=args.writer,
output_folder=dataset._output_folder,
test_folder=os.path.join(os.path.dirname(os.path.abspath(__file__)), "tests"),
)
# Close the app
kit.close()
| 23,434 | Python | 39.128425 | 181 | 0.618716 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from flying_distractors.collision_box import CollisionBox
from flying_distractors.dynamic_asset_set import DynamicAssetSet
from flying_distractors.dynamic_object import DynamicObject
from flying_distractors.dynamic_object_set import DynamicObjectSet
from flying_distractors.dynamic_shape_set import DynamicShapeSet
from flying_distractors.flying_distractors import FlyingDistractors
from utils import save_points_xyz
| 848 | Python | 52.062497 | 76 | 0.84434 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/dynamic_object_set.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import random
from typing import List, Optional
import numpy as np
from omni.isaac.core.materials.omni_glass import OmniGlass
from .collision_box import CollisionBox
from .dynamic_asset_set import DynamicAssetSet
from .dynamic_object import DynamicObject
class DynamicObjectSet(DynamicAssetSet):
"""Container class to hold and manage dynamic objects, providing an API to keep objects in motion within a collision
box, and to allow various properties of the assets to be randomized. Please note that this class assumes that
each referenced asset in usd_path_list has only a single mesh prim defining its geometry.
Args:
set_prim_path (str): prim path of the parent Prim to create, which contains all the objects in the object set
as its children.
set_name (str): name of the parent prim in the scene.
usd_path_list (List[str]): list of possible USD reference paths that the prims of each dynamic object in the
dynamic object set refer to.
mesh_list (List[str]): list of prim path base names for underlying mesh prims. Each base name in mesh_list
corresponds to the mesh prim of the referenced asset in usd_path_list.
asset_prim_path_base_prefix (str): prefix of what the objects are called in the stage (prim path base name).
asset_name_prefix (str): prefix of the objects' names in the scene.
num_assets (int): number of objects in the object set.
collision_box (CollisionBox): collision box in which to place objects, and allow objects to move within.
scale (Optional[np.ndarray], optional): local scale to be applied to each object's dimensions. Shape is (3, ).
Defaults to None, which means left unchanged.
mass (Optional[float], optional): mass of each object in kg. Defaults to None.
fraction_glass (int, optional): fraction of objects for which glass material should be applied.
"""
def __init__(
self,
set_prim_path: str,
set_name: str,
usd_path_list: List[str],
mesh_list: List[str],
asset_prim_path_base_prefix: str,
asset_name_prefix: str,
num_assets: int,
collision_box: CollisionBox,
scale: Optional[np.ndarray] = None,
mass: Optional[float] = None,
fraction_glass: float = 0.0,
):
self.usd_path_list = usd_path_list
self.mesh_list = mesh_list
self.glass_object_mesh_paths = []
self.nonglass_object_mesh_paths = []
if len(usd_path_list) != len(mesh_list):
raise Exception("usd_path_list and mesh_list must contain the same number of elements")
self.mesh_map = self._create_mesh_map(usd_path_list, mesh_list)
super().__init__(
set_prim_path,
set_name,
asset_prim_path_base_prefix,
asset_name_prefix,
num_assets,
collision_box,
scale,
mass,
fraction_glass,
)
self._create_random_dynamic_asset_set()
def _create_mesh_map(self, usd_path_list, mesh_list):
"""Gets a mapping from USD reference paths to the base name of the corresponding mesh prim in the referenced USD
file.
Args:
usd_path_list (List[str]): List of possible USD reference paths that the prims of each dynamic object in the
dynamic object set refer to.
mesh_list (List[str]): List of prim path base names for underlying mesh prims. Each base name in mesh_list
corresponds to the mesh prim of the referenced asset in usd_path_list.
Returns:
Dict: Mapping from USD reference paths to the base name of the corresponding mesh prim in the referenced USD
file.
"""
mesh_map = {}
for usd_path, mesh_name in zip(usd_path_list, mesh_list):
mesh_map[usd_path] = mesh_name
return mesh_map
def _create_random_dynamic_asset(self, glass=False):
"""Creates a random dynamic object and adds it to the scene. The reference path of the object is randomly chosen
from self.usd_path_list.
Args:
glass (bool, optional): flag to specify whether the created object should have a glass material applied.
Defaults to False.
"""
object_name = f"{self.asset_name_prefix}_{self.asset_count}"
if glass:
object_path = f"{self.set_prim_path}/{self.asset_prim_path_base_prefix}_{self.asset_count}"
else:
object_path = f"{self.set_prim_path}/{self.asset_prim_path_base_prefix}_nonglass_{self.asset_count}"
usd_path = random.choice(self.usd_path_list)
mesh_path = f"{object_path}/{self.mesh_map[usd_path]}"
position = self.collision_box.get_random_position()
dynamic_prim = DynamicObject(
usd_path=usd_path,
prim_path=object_path,
mesh_path=mesh_path,
name=object_name,
position=position,
scale=self.scale,
mass=self.mass,
)
self.asset_names.append(object_name)
if glass:
color = np.random.rand(3)
material = OmniGlass(
object_path + "_glass",
name=object_name + "_glass",
ior=1.25,
depth=0.001,
thin_walled=False,
color=color,
)
self.glass_mats.append(material)
dynamic_prim.apply_visual_material(material)
self.glass_asset_paths.append(object_path)
self.glass_assets.append(dynamic_prim)
self.glass_object_mesh_paths.append(mesh_path)
else:
self.nonglass_asset_paths.append(object_path)
self.nonglass_assets.append(dynamic_prim)
self.nonglass_object_mesh_paths.append(mesh_path)
self.world.scene.add(dynamic_prim)
self.asset_count += 1
| 6,626 | Python | 40.679245 | 120 | 0.616963 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/dynamic_shape_set.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import random
from typing import Optional
import numpy as np
from omni.isaac.core.materials.omni_glass import OmniGlass
from omni.isaac.core.objects import DynamicCapsule, DynamicCone, DynamicCuboid, DynamicCylinder, DynamicSphere
from .collision_box import CollisionBox
from .dynamic_asset_set import DynamicAssetSet
class DynamicShapeSet(DynamicAssetSet):
"""Container class to hold and manage dynamic shapes, providing an API to keep shapes in motion within a collision
box, and to allow various properties of the shapes to be randomized.
Args:
set_prim_path (str): prim path of the parent Prim to create, which contains all the shapes in the shape set
as its children.
set_name (str): name of the parent prim in the scene.
asset_prim_path_base_prefix (str): prefix of what the shapes are called in the stage (prim path base name).
asset_name_prefix (str): prefix of the shapes' names in the scene.
num_assets (int): number of shapes in the shape set.
collision_box (CollisionBox): collision box in which to place shapes, and allow shapes to move within.
scale (Optional[np.ndarray], optional): local scale to be applied to each shape's dimensions. Shape is (3, ).
Defaults to None, which means left unchanged.
mass (Optional[float], optional): mass of each shape in kg. Defaults to None.
fraction_glass (int, optional): fraction of shapes for which glass material should be applied.
"""
def __init__(
self,
set_prim_path: str,
set_name: str,
asset_prim_path_base_prefix: str,
asset_name_prefix: str,
num_assets: int,
collision_box: CollisionBox,
scale: Optional[np.ndarray] = None,
mass: Optional[float] = None,
fraction_glass: float = 0.0,
):
super().__init__(
set_prim_path,
set_name,
asset_prim_path_base_prefix,
asset_name_prefix,
num_assets,
collision_box,
scale,
mass,
fraction_glass,
)
self._create_random_dynamic_asset_set()
def _create_random_dynamic_asset(self, glass=False):
"""Creates a random dynamic shape (Cuboid, Sphere, Cylinder, Cone, or Capsule) and adds it to the scene.
Args:
glass (bool, optional): flag to specify whether the created shape should have a glass material applied.
Defaults to False.
"""
prim_type = [DynamicCapsule, DynamicCone, DynamicCuboid, DynamicCylinder, DynamicSphere]
shape_name = f"{self.asset_name_prefix}_{self.asset_count}"
if glass:
shape_path = f"{self.set_prim_path}/{self.asset_prim_path_base_prefix}_{self.asset_count}"
else:
shape_path = f"{self.set_prim_path}/{self.asset_prim_path_base_prefix}_nonglass_{self.asset_count}"
position = self.collision_box.get_random_position()
shape_prim = random.choice(prim_type)(
prim_path=shape_path, # The prim path of the cube in the USD stage
name=shape_name, # The unique name used to retrieve the object from the scene later on
position=position, # Using the current stage units which is meters by default.
scale=self.scale,
mass=self.mass,
)
self.asset_names.append(shape_name)
if glass:
color = np.random.rand(3)
material = OmniGlass(
shape_path + "_glass", name=shape_name + "_glass", ior=1.25, depth=0.001, thin_walled=False, color=color
)
self.glass_mats.append(material)
shape_prim.apply_visual_material(material)
self.glass_asset_paths.append(shape_path)
self.glass_assets.append(shape_prim)
else:
self.nonglass_asset_paths.append(shape_path)
self.nonglass_assets.append(shape_prim)
self.world.scene.add(shape_prim)
self.asset_count += 1
| 4,566 | Python | 40.899082 | 120 | 0.63929 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/collision_box.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Optional
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import FixedCuboid
from omni.isaac.core.prims.xform_prim import XFormPrim
from pxr import Usd, UsdGeom
class CollisionBox(XFormPrim):
"""Creates a fixed box with collisions enabled, and provides an API to determine world coordinates of a random
location in the interior of the collision box.
Args:
prim_path (str): top-level prim path (of the collision box) of the Prim to encapsulate or create.
name (str): shortname to be used as a key by Scene class. Note: needs to be unique if the object is added to the
Scene.
position (Optional[np.ndarray], optional): position in the world frame of the collision box. Shape is (3, ).
Defaults to None, which means left unchanged.
translation (Optional[np.ndarray], optional): translation in the local frame of the collision box (with respect
to its parent prim). Shape is (3, ). Defaults to None, which means
left unchanged.
orientation (Optional[np.ndarray], optional): quaternion orientation in the world/local frame of the collision
box (depends if translation or position is specified). Quaternion
is scalar-first (w, x, y, z). Shape is (4, ). Defaults to None,
which means left unchanged.
scale (Optional[np.ndarray], optional): local scale to be applied to the collision box's dimensions. Shape is
(3, ). Defaults to None, which means left unchanged.
width (float): width of the collision box interior in world units (if unrotated, corresponds to x direction).
Defaults to 1.0.
height (float): height of the collision box interior in world units (if unrotated, corresponds to y direction).
Defaults to 1.0.
depth (float): depth of the collision box interior in world units (if unrotated, corresponds to z direction).
Defaults to 1.0.
thickness (float, optional): thickness of the collision box walls in world units. Defaults to 0.2.
visible (bool, optional): set to false for an invisible prim in the stage while rendering. Defaults to False.
"""
def __init__(
self,
prim_path: str,
name: str,
position: Optional[np.ndarray] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.ndarray] = None,
width: float = 1.0,
height: float = 1.0,
depth: float = 1.0,
thickness: float = 0.2,
visible: bool = False,
):
self.world = World.instance()
XFormPrim.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
)
self.width = width
self.height = height
self.depth = depth
self.thickness = thickness
self.visible = visible
self._create_collision_box()
def _create_face(self, suffix, translation, size):
"""Create a face/wall of the Collision Box, which has collisions enabled.
Args:
suffix (str): suffix used for the name of the face so it can be retrieved from the scene. The name of the
face has the form "{collision_box_name}_{suffix}"
translation (np.ndarray): translation of the center of the face (wall) from the center of the Collision
Box, in stage units. Shape is (3, ).
size (np.ndarray): dimensions of the face (wall) in the X, Y, and Z directions. Dimensions are in stage
units. Shape is (3, ).
"""
face_name = f"{self.name}_{suffix}"
face_path = f"{self.prim_path}/{face_name}"
face_cuboid = FixedCuboid(
prim_path=face_path, # The prim path of the cube in the USD stage
name=face_name, # The unique name used to retrieve the object from the scene later on
translation=translation, # Using the current stage units which is cms by default.
scale=size, # most arguments accept mainly numpy arrays.
size=1.0,
visible=self.visible,
)
self.world.scene.add(face_cuboid)
def _create_collision_box(self):
"""Create a Collision Box. The Collision Box consists of 6 faces/walls forming a static box-like volume. Each
wall of the Collision Box has collisions enabled.
"""
dx = self.width / 2.0 + self.thickness / 2.0
dy = self.height / 2.0 + self.thickness / 2.0
dz = self.depth / 2.0 + self.thickness / 2.0
floor_center = np.array([0, 0, -dz])
floor_dimensions = np.array([self.width, self.height, self.thickness])
self._create_face("floor", floor_center, floor_dimensions)
ceiling_center = np.array([0, 0, +dz])
ceiling_dimensions = np.array([self.width, self.height, self.thickness])
self._create_face("ceiling", ceiling_center, ceiling_dimensions)
left_wall_center = np.array([dx, 0, 0])
left_wall_dimensions = np.array([self.thickness, self.height, self.depth])
self._create_face("left_wall", left_wall_center, left_wall_dimensions)
right_wall_center = np.array([-dx, 0, 0])
right_wall_dimensions = np.array([self.thickness, self.height, self.depth])
self._create_face("right_wall", right_wall_center, right_wall_dimensions)
front_wall_center = np.array([0, dy, 0])
front_wall_dimensions = np.array([self.width, self.thickness, self.depth])
self._create_face("front_wall", front_wall_center, front_wall_dimensions)
back_wall_center = np.array([0, -dy, 0])
back_wall_dimensions = np.array([self.width, self.thickness, self.depth])
self._create_face("back_wall", back_wall_center, back_wall_dimensions)
def get_random_local_translation(self):
"""Get a random translation within the Collision Box in local coordinates. Translations are within the
volumetric region contained by the inner walls of the Collision Box. The local coordinate frame is considered
to be the frame of the prim at self.prim_path (center of the Collision Box).
Returns:
np.ndarray: random translation within the Collision Box in the local frame of the Collision Box. Shape is
(3, ).
"""
dim_fractions = np.random.rand(3)
tx = dim_fractions[0] * self.width - self.width / 2.0
ty = dim_fractions[1] * self.height - self.height / 2.0
tz = dim_fractions[2] * self.depth - self.depth / 2.0
translation = np.array([tx, ty, tz])
return translation
def get_random_position(self):
"""Get a random position within the Collision Box in world coordinates. Positions are within the volumetric
region contained by the inner walls of the Collision Box.
Returns:
np.ndarray: random position within the Collision Box in the world frame. Shape is (3, ).
"""
box_prim = self.world.stage.GetPrimAtPath(self.prim_path)
box_transform_matrix = UsdGeom.Xformable(box_prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
box_to_world = np.transpose(box_transform_matrix)
random_local_translation = self.get_random_local_translation()
random_local_translation_homogenous = np.pad(random_local_translation, ((0, 1)), constant_values=1.0)
position_homogenous = box_to_world @ random_local_translation_homogenous
position = position_homogenous[:-1]
return position
| 8,637 | Python | 46.988889 | 120 | 0.618733 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .collision_box import CollisionBox
from .dynamic_asset_set import DynamicAssetSet
from .dynamic_object import DynamicObject
from .dynamic_object_set import DynamicObjectSet
from .dynamic_shape_set import DynamicShapeSet
from .flying_distractors import FlyingDistractors
| 706 | Python | 46.13333 | 76 | 0.830028 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/flying_distractors.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import itertools
from omni.isaac.core import World
from .dynamic_object_set import DynamicObjectSet
from .dynamic_shape_set import DynamicShapeSet
class FlyingDistractors:
"""Container class to hold and manage both dynamic shape sets and dynamic object sets simultaneously. This class
provides an API to keep assets in each asset set in motion within their respective collision boxes, to show/hide
the assets of all the asset sets managed by this class, and to allow various properties of the assets of all the
asset sets managed by this class to be randomized.
"""
def __init__(self):
self.world = World.instance()
self.shape_sets = []
self.object_sets = []
def add(self, asset_set):
"""Add an asset set to be managed by this FlyingDistractors object.
Args:
asset_set (Union[DynamicShapeSet, DynamicObjectSet]): the asset set to add.
Raises:
Exception: if asset_set is neither a DynamicShapeSet nor a DynamicObjectSet.
"""
if isinstance(asset_set, DynamicShapeSet):
self.shape_sets.append(asset_set)
elif isinstance(asset_set, DynamicObjectSet):
self.object_sets.append(asset_set)
else:
raise Exception("The asset set provided is not of type DynamicShapeSet or DynamicObjectSet")
def set_visible(self, visible):
"""Sets the visibility of all assets contained in the managed asset sets.
Args:
visible (bool): flag to set the visibility of all assets contained in the managed asset sets.
"""
for asset_set in itertools.chain(self.shape_sets, self.object_sets):
for asset_name in asset_set.asset_names:
object_xform = self.world.scene.get_object(asset_name)
object_xform.set_visibility(visible=visible)
def reset_asset_positions(self):
"""Reset the positions of all assets contained in the managed asset sets to be within its corresponding
collision box.
"""
for asset_set in itertools.chain(self.shape_sets, self.object_sets):
asset_set.reset_position()
def apply_force_to_assets(self, force_limit):
"""Apply random forces to all assets contained in the managed asset sets.
Args:
force_limit (float): maximum force component to apply.
"""
for asset_set in itertools.chain(self.shape_sets, self.object_sets):
asset_set.apply_force_to_assets(force_limit)
def randomize_asset_glass_color(self):
"""Randomize color of assets in the managed asset sets with glass material applied."""
for asset_set in itertools.chain(self.shape_sets, self.object_sets):
asset_set.randomize_glass_color()
| 3,231 | Python | 38.901234 | 116 | 0.686784 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/dynamic_asset_set.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import itertools
import math
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
from omni.isaac.core import World
from .collision_box import CollisionBox
class DynamicAssetSet(ABC):
"""Container class to hold and manage dynamic assets, providing an API to keep assets in motion within a collision
box, and to allow various properties of the assets to be randomized.
Args:
set_prim_path (str): prim path of the parent Prim to create, which contains all the assets in the asset set as
its children.
set_name (str): name of the parent prim in the scene.
asset_prim_path_base_prefix (str): prefix of what the assets are called in the stage (prim path base name).
asset_name_prefix (str): prefix of the assets' names in the scene.
num_assets (int): number of assets in the asset set.
collision_box (CollisionBox): collision box in which to place assets, and allow assets to move within.
scale (Optional[np.ndarray], optional): local scale to be applied to each asset's dimensions. Shape is (3, ).
Defaults to None, which means left unchanged.
mass (Optional[float], optional): mass of each asset in kg. Defaults to None.
fraction_glass (int, optional): fraction of assets for which glass material should be applied.
"""
def __init__(
self,
set_prim_path: str,
set_name: str,
asset_prim_path_base_prefix: str,
asset_name_prefix: str,
num_assets: int,
collision_box: CollisionBox,
scale: Optional[np.ndarray] = None,
mass: Optional[float] = None,
fraction_glass: float = 0.0,
):
self.world = World.instance()
self.set_prim_path = set_prim_path
self.set_name = set_name
self.asset_prim_path_base_prefix = asset_prim_path_base_prefix
self.asset_name_prefix = asset_name_prefix
self.num_assets = num_assets
self.collision_box = collision_box
self.scale = scale
self.mass = mass
self.fraction_glass = fraction_glass
self.asset_count = 0
self.asset_names = []
self.glass_asset_paths = []
self.nonglass_asset_paths = []
self.glass_assets = []
self.nonglass_assets = []
self.glass_mats = []
def _create_random_dynamic_asset_set(self):
"""Create self.num_assets assets and add them to the dynamic asset set."""
self.world.stage.DefinePrim(self.set_prim_path, "Xform")
num_glass = math.floor(self.num_assets * self.fraction_glass)
for i in range(self.num_assets):
if i < num_glass:
self._create_random_dynamic_asset(glass=True)
else:
self._create_random_dynamic_asset()
@abstractmethod
def _create_random_dynamic_asset(self, glass=False):
pass
def apply_force_to_assets(self, force_limit):
"""Apply a force in a random direction to each asset in the dynamic asset set.
Args:
force_limit (float): maximum force component to apply.
"""
for path in itertools.chain(self.glass_asset_paths, self.nonglass_asset_paths):
# X, Y, and Z components of the force are constrained to be within [-force_limit, force_limit]
random_force = np.random.uniform(-force_limit, force_limit, 3).tolist()
handle = self.world.dc_interface.get_rigid_body(path)
self.world.dc_interface.apply_body_force(handle, random_force, (0, 0, 0), False)
def randomize_glass_color(self):
"""Randomize the color of the assets in the dynamic asset set with a glass material applied."""
for asset in itertools.chain(self.glass_assets):
glass_mat = asset.get_applied_visual_material()
glass_mat.set_color(np.random.rand(3))
def reset_position(self):
"""Reset the positions of assets in the dynamic asset set. The positions at which to place assets are randomly
chosen such that they are within the collision box.
"""
for asset in itertools.chain(self.glass_assets, self.nonglass_assets):
position = self.collision_box.get_random_position()
asset.set_world_pose(position)
| 4,798 | Python | 39.669491 | 118 | 0.651105 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/flying_distractors/dynamic_object.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Optional
import numpy as np
from omni.isaac.core.prims.geometry_prim import GeometryPrim
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import UsdGeom
class DynamicObject(RigidPrim, GeometryPrim):
"""Creates and adds a prim to stage from USD reference path, and wraps the prim with RigidPrim and GeometryPrim to
provide access to APIs for rigid body attributes, physics materials and collisions. Please note that this class
assumes the object has only a single mesh prim defining its geometry.
Args:
usd_path (str): USD reference path the Prim refers to.
prim_path (str): prim path of the Prim to encapsulate or create.
mesh_path (str): prim path of the underlying mesh Prim.
name (str, optional): shortname to be used as a key by Scene class. Note: needs to be unique if the object is
added to the Scene. Defaults to "dynamic_object".
position (Optional[np.ndarray], optional): position in the world frame of the prim. Shape is (3, ). Defaults to
None, which means left unchanged.
translation (Optional[np.ndarray], optional): translation in the local frame of the prim (with respect to its
parent prim). Shape is (3, ). Defaults to None, which means left
unchanged.
orientation (Optional[np.ndarray], optional): quaternion orientation in the world/local frame of the prim
(depends if translation or position is specified). Quaternion is
scalar-first (w, x, y, z). Shape is (4, ). Defaults to None, which
means left unchanged.
scale (Optional[np.ndarray], optional): local scale to be applied to the prim's dimensions. Shape is (3, ).
Defaults to None, which means left unchanged.
visible (bool, optional): set to false for an invisible prim in the stage while rendering. Defaults to True.
mass (Optional[float], optional): mass in kg. Defaults to None.
linear_velocity (Optional[np.ndarray], optional): linear velocity in the world frame. Defaults to None.
angular_velocity (Optional[np.ndarray], optional): angular velocity in the world frame. Defaults to None.
"""
def __init__(
self,
usd_path: str,
prim_path: str,
mesh_path: str,
name: str = "dynamic_object",
position: Optional[np.ndarray] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.ndarray] = None,
visible: bool = True,
mass: Optional[float] = None,
linear_velocity: Optional[np.ndarray] = None,
angular_velocity: Optional[np.ndarray] = None,
) -> None:
if is_prim_path_valid(mesh_path):
prim = get_prim_at_path(mesh_path)
if not prim.IsA(UsdGeom.Mesh):
raise Exception("The prim at path {} cannot be parsed as a Mesh object".format(mesh_path))
self.usd_path = usd_path
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
GeometryPrim.__init__(
self,
prim_path=mesh_path,
name=name,
translation=translation,
orientation=orientation,
visible=visible,
collision=True,
)
self.set_collision_approximation("convexHull")
RigidPrim.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
mass=mass,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
)
| 4,662 | Python | 47.072164 | 120 | 0.614972 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/config/dope_config.yaml | ---
# Default rendering parameters
CONFIG:
renderer: RayTracedLighting
headless: false
width: 512
height: 512
# Index of part in array of classes in PoseCNN training
CLASS_NAME_TO_INDEX:
003_cracker_box: 1
035_power_drill: 2
# prim_type is determined by the usd file.
# To determine, open the usd file in Isaac Sim and see the prim path. If you load it in /World, the path will be /World/<prim_type>
OBJECTS_TO_GENERATE:
- { part_name: 003_cracker_box, num: 1, prim_type: _03_cracker_box }
- { part_name: 035_power_drill, num: 1, prim_type: _35_power_drill }
# Maximum force component to apply to objects to keep them in motion
FORCE_RANGE: 30
# Camera Intrinsics
WIDTH: 512
HEIGHT: 512
F_X: 768.1605834960938
F_Y: 768.1605834960938
C_X: 256
C_Y: 256
# Default Camera Horizontal Aperture
HORIZONTAL_APERTURE: 20.955
# Number of sphere lights added to the scene
NUM_LIGHTS: 6
# Minimum and maximum distances of objects away from the camera (along the optical axis)
MIN_DISTANCE: 0.4
MAX_DISTANCE: 1.4
# Rotation of camera rig with respect to world frame, expressed as XYZ euler angles
CAMERA_RIG_ROTATION:
- 0
- 0
- 0
# Rotation of camera with respect to camera rig, expressed as XYZ euler angles. Please note that in this example, we
# define poses with respect to the camera rig instead of the camera prim. By using the rig's frame as a surrogate for
# the camera's frame, we effectively change the coordinate system of the camera. When
# CAMERA_RIG_ROTATION = np.array([0, 0, 0]) and CAMERA_ROTATION = np.array([0, 0, 0]), this corresponds to the default
# Isaac-Sim camera coordinate system of -z out the face of the camera, +x to the right, and +y up. When
# CAMERA_RIG_ROTATION = np.array([0, 0, 0]) and CAMERA_ROTATION = np.array([180, 0, 0]), this corresponds to
# the YCB Video Dataset camera coordinate system of +z out the face of the camera, +x to the right, and +y down.
CAMERA_ROTATION:
- 180
- 0
- 0
# Minimum and maximum XYZ euler angles for the part being trained on to be rotated, with respect to the camera rig
MIN_ROTATION_RANGE:
- -180
- -90
- -180
# Minimum and maximum XYZ euler angles for the part being trained on to be rotated, with respect to the camera rig
MAX_ROTATION_RANGE:
- 180
- 90
- 180
# How close the center of the part being trained on is allowed to be to the edge of the screen
FRACTION_TO_SCREEN_EDGE: 0.9
# MESH and DOME datasets
SHAPE_SCALE:
- 0.05
- 0.05
- 0.05
SHAPE_MASS: 1
OBJECT_SCALE:
- 1
- 1
- 1
OBJECT_MASS: 1
# MESH dataset
NUM_MESH_SHAPES: 500
NUM_MESH_OBJECTS: 200
MESH_FRACTION_GLASS: 0.15
MESH_FILENAMES:
- 002_master_chef_can
- 004_sugar_box
- 005_tomato_soup_can
- 006_mustard_bottle
- 007_tuna_fish_can
- 008_pudding_box
- 009_gelatin_box
- 010_potted_meat_can
- 011_banana
- 019_pitcher_base
- 021_bleach_cleanser
- 024_bowl
- 025_mug
- 035_power_drill
- 036_wood_block
- 037_scissors
- 040_large_marker
- 051_large_clamp
- 052_extra_large_clamp
- 061_foam_brick
# DOME dataset
NUM_DOME_SHAPES: 30
NUM_DOME_OBJECTS: 20
DOME_FRACTION_GLASS: 0.2
DOME_TEXTURES:
- Clear/evening_road_01_4k
- Clear/kloppenheim_02_4k
- Clear/mealie_road_4k
- Clear/noon_grass_4k
- Clear/qwantani_4k
- Clear/signal_hill_sunrise_4k
- Clear/sunflowers_4k
- Clear/syferfontein_18d_clear_4k
- Clear/venice_sunset_4k
- Clear/white_cliff_top_4k
- Cloudy/abandoned_parking_4k
- Cloudy/champagne_castle_1_4k
- Cloudy/evening_road_01_4k
- Cloudy/kloofendal_48d_partly_cloudy_4k
- Cloudy/lakeside_4k
- Cloudy/sunflowers_4k
- Cloudy/table_mountain_1_4k
- Evening/evening_road_01_4k
- Indoor/adams_place_bridge_4k
- Indoor/autoshop_01_4k
- Indoor/bathroom_4k
- Indoor/carpentry_shop_01_4k
- Indoor/en_suite_4k
- Indoor/entrance_hall_4k
- Indoor/hospital_room_4k
- Indoor/hotel_room_4k
- Indoor/lebombo_4k
- Indoor/old_bus_depot_4k
- Indoor/small_empty_house_4k
- Indoor/studio_small_04_4k
- Indoor/surgery_4k
- Indoor/vulture_hide_4k
- Indoor/wooden_lounge_4k
- Night/kloppenheim_02_4k
- Night/moonlit_golf_4k
- Storm/approaching_storm_4k
| 3,989 | YAML | 25.25 | 131 | 0.740035 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/config/ycb_config.yaml | ---
# Default rendering parameters
CONFIG:
renderer: RayTracedLighting
headless: false
width: 1280
height: 720
# Index of part in array of classes in PoseCNN training
CLASS_NAME_TO_INDEX:
_03_cracker_box: 1
_35_power_drill: 2
# prim_type is determined by the usd file.
# To determine, open the usd file in Isaac Sim and see the prim path. If you load it in /World, the path will be /World/<prim_type>
OBJECTS_TO_GENERATE:
- { part_name: 003_cracker_box, num: 1, prim_type: _03_cracker_box }
- { part_name: 035_power_drill, num: 1, prim_type: _35_power_drill }
# Maximum force component to apply to objects to keep them in motion
FORCE_RANGE: 30
# Camera Intrinsics
WIDTH: 1280
HEIGHT: 720
F_X: 665.80768
F_Y: 665.80754
C_X: 637.642
C_Y: 367.56
# Default Camera Horizontal Aperture
HORIZONTAL_APERTURE: 20.955
# Number of sphere lights added to the scene
NUM_LIGHTS: 6
# Minimum and maximum distances of objects away from the camera (along the optical axis)
MIN_DISTANCE: 0.2
MAX_DISTANCE: 1.2
# Rotation of camera rig with respect to world frame, expressed as XYZ euler angles
CAMERA_RIG_ROTATION:
- 0
- 0
- 0
# Rotation of camera with respect to camera rig, expressed as XYZ euler angles. Please note that in this example, we
# define poses with respect to the camera rig instead of the camera prim. By using the rig's frame as a surrogate for
# the camera's frame, we effectively change the coordinate system of the camera. When
# CAMERA_RIG_ROTATION = np.array([0, 0, 0]) and CAMERA_ROTATION = np.array([0, 0, 0]), this corresponds to the default
# Isaac-Sim camera coordinate system of -z out the face of the camera, +x to the right, and +y up. When
# CAMERA_RIG_ROTATION = np.array([0, 0, 0]) and CAMERA_ROTATION = np.array([180, 0, 0]), this corresponds to
# the YCB Video Dataset camera coordinate system of +z out the face of the camera, +x to the right, and +y down.
CAMERA_ROTATION:
- 180
- 0
- 0
# Minimum and maximum XYZ euler angles for the part being trained on to be rotated, with respect to the camera rig
MIN_ROTATION_RANGE:
- -180
- -90
- -180
# Minimum and maximum XYZ euler angles for the part being trained on to be rotated, with respect to the camera rig
MAX_ROTATION_RANGE:
- 180
- 90
- 180
# How close the center of the part being trained on is allowed to be to the edge of the screen
FRACTION_TO_SCREEN_EDGE: 0.9
# MESH and DOME datasets
SHAPE_SCALE:
- 0.05
- 0.05
- 0.05
SHAPE_MASS: 1
OBJECT_SCALE:
- 1
- 1
- 1
OBJECT_MASS: 1
# MESH dataset
NUM_MESH_SHAPES: 500
NUM_MESH_OBJECTS: 200
MESH_FRACTION_GLASS: 0.15
MESH_FILENAMES:
- 002_master_chef_can
- 004_sugar_box
- 005_tomato_soup_can
- 006_mustard_bottle
- 007_tuna_fish_can
- 008_pudding_box
- 009_gelatin_box
- 010_potted_meat_can
- 011_banana
- 019_pitcher_base
- 021_bleach_cleanser
- 024_bowl
- 025_mug
- 035_power_drill
- 036_wood_block
- 037_scissors
- 040_large_marker
- 051_large_clamp
- 052_extra_large_clamp
- 061_foam_brick
# DOME dataset
NUM_DOME_SHAPES: 30
NUM_DOME_OBJECTS: 20
DOME_FRACTION_GLASS: 0.2
DOME_TEXTURES:
- Clear/evening_road_01_4k
- Clear/kloppenheim_02_4k
- Clear/mealie_road_4k
- Clear/noon_grass_4k
- Clear/qwantani_4k
- Clear/signal_hill_sunrise_4k
- Clear/sunflowers_4k
- Clear/syferfontein_18d_clear_4k
- Clear/venice_sunset_4k
- Clear/white_cliff_top_4k
- Cloudy/abandoned_parking_4k
- Cloudy/champagne_castle_1_4k
- Cloudy/evening_road_01_4k
- Cloudy/kloofendal_48d_partly_cloudy_4k
- Cloudy/lakeside_4k
- Cloudy/sunflowers_4k
- Cloudy/table_mountain_1_4k
- Evening/evening_road_01_4k
- Indoor/adams_place_bridge_4k
- Indoor/autoshop_01_4k
- Indoor/bathroom_4k
- Indoor/carpentry_shop_01_4k
- Indoor/en_suite_4k
- Indoor/entrance_hall_4k
- Indoor/hospital_room_4k
- Indoor/hotel_room_4k
- Indoor/lebombo_4k
- Indoor/old_bus_depot_4k
- Indoor/small_empty_house_4k
- Indoor/studio_small_04_4k
- Indoor/surgery_4k
- Indoor/vulture_hide_4k
- Indoor/wooden_lounge_4k
- Night/kloppenheim_02_4k
- Night/moonlit_golf_4k
- Storm/approaching_storm_4k
| 3,984 | YAML | 25.217105 | 131 | 0.738203 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/tests/test_utils.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import json
import os
import shutil
import numpy as np
import scipy.io as sio
def run_pose_generation_test(writer, output_folder, test_folder):
if writer.lower() == "dope":
run_dope_test(test_folder, output_folder)
elif writer.lower() == "ycbvideo":
run_ycbvideo_tests(test_folder, output_folder)
else:
raise Exception(f"No tests exist for the selected writer: {writer}")
# Cleans up output directory so tests are not reading output files from previous run
def clean_output_dir(output_folder):
if os.path.isdir(output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
# Checks if distance between points is within threshold
def within_threshold(p1, p2, threshold=20):
return np.linalg.norm(np.array(p1) - np.array(p2)) < threshold
def run_dope_test(test_folder, output_folder):
groundtruth_path = os.path.join(test_folder, "dope/000000_groundtruth.json")
# Look at output for 2nd frame because 1st frame does not get generated properly sometimes
output_path = os.path.join(output_folder, "000001.json")
with open(groundtruth_path) as gt_f:
gt_data = json.load(gt_f)
with open(output_path) as op_f:
op_data = json.load(op_f)
gt_objects, op_objects = gt_data["objects"], op_data["objects"]
# Does not work with multiple objects. There should be only one object in testing mode.
if not (len(gt_objects) == 1 and len(op_objects) == 1):
raise Exception(
f"Mismatch in .json files between number of objects. gt_objects: {len(gt_objects)}, op_objects: {len(op_objects)}"
)
for gt_obj, op_obj in zip(gt_objects, op_objects):
if not within_threshold(gt_obj["location"], op_obj["location"], 10):
raise Exception(
f"Distance between groundtruth location and output location exceeds threshold. (location) {gt_pt} and {op_pt}"
)
for gt_pt, op_pt in zip(gt_obj["projected_cuboid"], op_obj["projected_cuboid"]):
if not within_threshold(gt_pt, op_pt, 20.0):
raise Exception(
f"Distance between groundtruth points and output points exceeds threshold. (projected_cuboid) {gt_pt} and {op_pt}"
)
print("Tests pass for DOPE Writer.")
def run_ycbvideo_tests(test_folder, output_folder, threshold=10):
groundtruth_bbox_path = os.path.join(test_folder, "ycbvideo/000000-box_groundtruth.txt")
groundtruth_meta_path = os.path.join(test_folder, "ycbvideo/000000-meta_groundtruth.mat")
# Look at output for 2nd frame because 1st frame does not get generated properly sometimes
output_bbox_path = os.path.join(output_folder, "data/YCB_Video/data/0000", "000001-box.txt")
output_meta_path = os.path.join(output_folder, "data/YCB_Video/data/0000", "000001-meta.mat")
# Compare BBox
gt_bb = open(groundtruth_bbox_path, "r")
op_bb = open(output_bbox_path, "r")
for l1, l2 in zip(gt_bb, op_bb):
for gt_point, bb_point in zip(l1.strip().split()[1:5], l2.strip().split()[1:5]):
if not within_threshold([int(gt_point)], [int(bb_point)], 10):
raise Exception(f"Mismatch between files {groundtruth_bbox_path} and {output_bbox_path}")
gt_bb.close()
op_bb.close()
# Compare Meta File
gt_meta = sio.loadmat(groundtruth_meta_path)
op_meta = sio.loadmat(output_meta_path)
keys_to_compare = ["poses", "intrinsic_matrix", "center"]
print(f"gt_meta:\n{gt_meta}")
print(f"op_meta:\n{op_meta}")
for key in keys_to_compare:
gt = gt_meta[key].flatten()
op = op_meta[key].flatten()
if not len(gt) == len(op):
raise Exception(f"Mismatch between length of pose in {groundtruth_meta_path} and {output_meta_path}")
for i in range(len(gt)):
if abs(gt[i] - op[i]) > threshold:
raise Exception(
f"Mismatch between {key} values in groundtruth and output at index {i}. Groundtruth: {gt[i]} Output: {op[i]}"
)
print(f"{key} matches between groundtruth and output.")
print("Tests pass for YCBVideo Writer.")
| 4,599 | Python | 38.316239 | 134 | 0.661666 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/tests/ycbvideo/test_ycb_config.yaml | # DO NOT MODIFY OR TESTS WILL FAIL
---
CONFIG:
renderer: RayTracedLighting
headless: false
width: 1280
height: 720
CLASS_NAME_TO_INDEX:
_03_cracker_box: 1
OBJECTS_TO_GENERATE:
- { part_name: 003_cracker_box, num: 1, prim_type: _03_cracker_box }
FORCE_RANGE: 30
WIDTH: 1280
HEIGHT: 720
F_X: 665.80768
F_Y: 665.80754
C_X: 637.642
C_Y: 367.56
HORIZONTAL_APERTURE: 20.955
NUM_LIGHTS: 6
MIN_DISTANCE: 1.0
MAX_DISTANCE: 1.0
CAMERA_RIG_ROTATION:
- 0
- 0
- 0
CAMERA_ROTATION:
- 180
- 0
- 0
MIN_ROTATION_RANGE:
- 100
- 100
- 100
MAX_ROTATION_RANGE:
- 100
- 100
- 100
FRACTION_TO_SCREEN_EDGE: 0.0
SHAPE_SCALE:
- 0.05
- 0.05
- 0.05
SHAPE_MASS: 1
OBJECT_SCALE:
- 1
- 1
- 1
OBJECT_MASS: 1
NUM_MESH_SHAPES: 0
NUM_MESH_OBJECTS: 0
MESH_FRACTION_GLASS: 0.15
NUM_DOME_SHAPES: 0
NUM_DOME_OBJECTS: 0
DOME_FRACTION_GLASS: 0.2
DOME_TEXTURES: []
MESH_FILENAMES: []
| 853 | YAML | 13.724138 | 68 | 0.691676 |
2820207922/isaac_ws/standalone_examples/replicator/offline_pose_generation/tests/dope/test_dope_config.yaml | # DO NOT MODIFY OR TESTS WILL FAIL
---
CONFIG:
renderer: RayTracedLighting
headless: false
width: 512
height: 512
CLASS_NAME_TO_INDEX:
_03_cracker_box: 1
OBJECTS_TO_GENERATE:
- { part_name: 003_cracker_box, num: 1, prim_type: _03_cracker_box }
FORCE_RANGE: 30
WIDTH: 512
HEIGHT: 512
F_X: 768.1605834960938
F_Y: 768.1605834960938
C_X: 256
C_Y: 256
HORIZONTAL_APERTURE: 20.955
NUM_LIGHTS: 6
MIN_DISTANCE: 1.0
MAX_DISTANCE: 1.0
CAMERA_RIG_ROTATION:
- 0
- 0
- 0
CAMERA_ROTATION:
- 180
- 0
- 0
MIN_ROTATION_RANGE:
- 100
- 100
- 100
MAX_ROTATION_RANGE:
- 100
- 100
- 100
FRACTION_TO_SCREEN_EDGE: 0.0
SHAPE_SCALE:
- 0.05
- 0.05
- 0.05
SHAPE_MASS: 1
OBJECT_SCALE:
- 1
- 1
- 1
OBJECT_MASS: 1
NUM_MESH_SHAPES: 0
NUM_MESH_OBJECTS: 0
MESH_FRACTION_GLASS: 0.15
NUM_DOME_SHAPES: 0
NUM_DOME_OBJECTS: 0
DOME_FRACTION_GLASS: 0.2
DOME_TEXTURES: []
MESH_FILENAMES: []
| 860 | YAML | 13.844827 | 68 | 0.696512 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.gym/cartpole_train.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# import stable baselines
import carb
try:
from stable_baselines3 import PPO
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3 "
)
exit()
try:
import tensorboard
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install tensorboard in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install tensorboard"
)
exit()
# create isaac environment
from omni.isaac.gym.vec_env import VecEnvBase
env = VecEnvBase(headless=True)
# create task and register task
from cartpole_task import CartpoleTask
task = CartpoleTask(name="Cartpole")
env.set_task(task, backend="torch")
# create agent from stable baselines
model = PPO(
"MlpPolicy",
env,
n_steps=1000,
batch_size=1000,
n_epochs=20,
learning_rate=0.001,
gamma=0.99,
device="cuda:0",
ent_coef=0.0,
vf_coef=0.5,
max_grad_norm=1.0,
verbose=1,
tensorboard_log="./cartpole_tensorboard",
)
model.learn(total_timesteps=100000)
model.save("ppo_cartpole")
env.close()
| 1,721 | Python | 26.774193 | 190 | 0.732714 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.gym/cartpole_play.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# import stable baselines
import carb
try:
from stable_baselines3 import PPO
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3 "
)
exit()
# create isaac environment
from omni.isaac.gym.vec_env import VecEnvBase
env = VecEnvBase(headless=False)
# create task and register task
from cartpole_task import CartpoleTask
task = CartpoleTask(name="Cartpole")
env.set_task(task, backend="torch")
# Run inference on the trained policy
model = PPO.load("ppo_cartpole")
env._world.reset()
obs, _ = env.reset()
while env._simulation_app.is_running():
action, _states = model.predict(obs)
obs, rewards, terminated, truncated, info = env.step(action)
env.close()
| 1,312 | Python | 30.261904 | 190 | 0.759909 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.gym/cartpole_task.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import numpy as np
import omni.kit
import torch
from gymnasium import spaces
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import create_prim
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.viewports import set_camera_view
class CartpoleTask(BaseTask):
def __init__(self, name, offset=None) -> None:
# task-specific parameters
self._cartpole_position = [0.0, 0.0, 2.0]
self._reset_dist = 3.0
self._max_push_effort = 400.0
# values used for defining RL buffers
self._num_observations = 4
self._num_actions = 1
self._device = "cpu"
self.num_envs = 1
# a few class buffers to store RL-related states
self.obs = torch.zeros((self.num_envs, self._num_observations))
self.resets = torch.zeros((self.num_envs, 1))
# set the action and observation space for RL
self.action_space = spaces.Box(
np.ones(self._num_actions, dtype=np.float32) * -1.0, np.ones(self._num_actions, dtype=np.float32) * 1.0
)
self.observation_space = spaces.Box(
np.ones(self._num_observations, dtype=np.float32) * -np.Inf,
np.ones(self._num_observations, dtype=np.float32) * np.Inf,
)
# trigger __init__ of parent class
BaseTask.__init__(self, name=name, offset=offset)
def set_up_scene(self, scene) -> None:
# retrieve file path for the Cartpole USD file
assets_root_path = get_assets_root_path()
usd_path = assets_root_path + "/Isaac/Robots/Cartpole/cartpole.usd"
# add the Cartpole USD to our stage
create_prim(prim_path="/World/Cartpole", prim_type="Xform", position=self._cartpole_position)
add_reference_to_stage(usd_path, "/World/Cartpole")
# Get stage handle
stage = omni.usd.get_context().get_stage()
if not stage:
print("Stage could not be used.")
else:
for prim in stage.Traverse():
prim_path = prim.GetPath()
prim_type = prim.GetTypeName()
print(f"prim_path: {prim_path}, prim_type: {prim_type}")
# create an ArticulationView wrapper for our cartpole - this can be extended towards accessing multiple cartpoles
self._cartpoles = ArticulationView(prim_paths_expr="/World/Cartpole*", name="cartpole_view")
# add Cartpole ArticulationView and ground plane to the Scene
scene.add(self._cartpoles)
scene.add_default_ground_plane()
# set default camera viewport position and target
self.set_initial_camera_params()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
set_camera_view(eye=camera_position, target=camera_target, camera_prim_path="/OmniverseKit_Persp")
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
# randomize all envs
indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device)
self.reset(indices)
def reset(self, env_ids=None):
if env_ids is None:
env_ids = torch.arange(self.num_envs, device=self._device)
num_resets = len(env_ids)
# randomize DOF positions
dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# randomize DOF velocities
dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._cartpoles.set_joint_positions(dof_pos, indices=indices)
self._cartpoles.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.resets[env_ids] = 0
def pre_physics_step(self, actions) -> None:
reset_env_ids = self.resets.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset(reset_env_ids)
actions = torch.tensor(actions)
forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device)
forces[:, self._cart_dof_idx] = self._max_push_effort * actions[0]
indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device)
self._cartpoles.set_joint_efforts(forces, indices=indices)
def get_observations(self):
dof_pos = self._cartpoles.get_joint_positions()
dof_vel = self._cartpoles.get_joint_velocities()
# collect pole and cart joint positions and velocities for observation
cart_pos = dof_pos[:, self._cart_dof_idx]
cart_vel = dof_vel[:, self._cart_dof_idx]
pole_pos = dof_pos[:, self._pole_dof_idx]
pole_vel = dof_vel[:, self._pole_dof_idx]
self.obs[:, 0] = cart_pos
self.obs[:, 1] = cart_vel
self.obs[:, 2] = pole_pos
self.obs[:, 3] = pole_vel
return self.obs
def calculate_metrics(self) -> None:
cart_pos = self.obs[:, 0]
cart_vel = self.obs[:, 1]
pole_angle = self.obs[:, 2]
pole_vel = self.obs[:, 3]
# compute reward based on angle of pole and cart velocity
reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel)
# apply a penalty if cart is too far from center
reward = torch.where(torch.abs(cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward)
# apply a penalty if pole is too far from upright
reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
return reward.item()
def is_done(self) -> None:
cart_pos = self.obs[:, 0]
pole_pos = self.obs[:, 2]
# reset the robot if cart has reached reset_dist or pole is too far from upright
resets = torch.where(torch.abs(cart_pos) > self._reset_dist, 1, 0)
resets = torch.where(torch.abs(pole_pos) > math.pi / 2, 1, resets)
self.resets = resets
return resets.item()
| 7,209 | Python | 41.662722 | 121 | 0.636288 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.dofbot/pick_place.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.dofbot.controllers.pick_place_controller import PickPlaceController
from omni.isaac.dofbot.tasks import PickPlace
my_world = World(stage_units_in_meters=1.0)
my_task = PickPlace()
my_world.add_task(my_task)
my_world.reset()
task_params = my_task.get_params()
dofbot_name = task_params["robot_name"]["value"]
my_dofbot = my_world.scene.get_object(dofbot_name)
my_controller = PickPlaceController(
name="pick_place_controller", gripper=my_dofbot.gripper, robot_articulation=my_dofbot
)
articulation_controller = my_dofbot.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=observations[task_params["cube_name"]["value"]]["position"],
placing_position=observations[task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[dofbot_name]["joint_positions"],
end_effector_offset=np.array([0, -0.06, 0]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,954 | Python | 38.897958 | 96 | 0.72262 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.dofbot/follow_target_with_rmpflow.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import World
from omni.isaac.dofbot.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.dofbot.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task")
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
dofbot_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_dofbot = my_world.scene.get_object(dofbot_name)
my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=my_dofbot)
articulation_controller = my_dofbot.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
target_end_effector_position=observations[target_name]["position"],
target_end_effector_orientation=observations[target_name]["orientation"],
)
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,764 | Python | 42.048779 | 98 | 0.747732 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.dofbot/follow_target_with_ik.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.dofbot import KinematicsSolver
from omni.isaac.dofbot.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.dofbot.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task")
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
dofbot_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_dofbot = my_world.scene.get_object(dofbot_name)
my_controller = KinematicsSolver(my_dofbot)
articulation_controller = my_dofbot.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
observations = my_world.get_observations()
# IK does not work well on dofbot with orientation targets
actions, succ = my_controller.compute_inverse_kinematics(target_position=observations[target_name]["position"])
# actions,succ = my_controller.compute_inverse_kinematics(target_position=observations[target_name]["position"],
# target_orientation=observations[target_name]["orientation"], orientation_tolerance = np.pi/2)
if succ:
articulation_controller.apply_action(actions)
else:
carb.log_warn("IK did not converge to a solution. No action is being taken.")
simulation_app.close()
| 2,079 | Python | 43.255318 | 120 | 0.746513 |
2820207922/isaac_ws/standalone_examples/api/omni.kit.asset_converter/asset_usd_converter.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import asyncio
import os
import omni
from omni.isaac.kit import SimulationApp
async def convert(in_file, out_file, load_materials=False):
# This import causes conflicts when global
import omni.kit.asset_converter
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.ignore_materials = not load_materials
# converter_context.ignore_animation = False
# converter_context.ignore_cameras = True
# converter_context.single_mesh = True
# converter_context.smooth_normals = True
# converter_context.preview_surface = False
# converter_context.support_point_instancer = False
# converter_context.embed_mdl_in_usd = False
# converter_context.use_meter_as_world_unit = True
# converter_context.create_world_as_default_root_prim = False
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
def asset_convert(args):
supported_file_formats = ["stl", "obj", "fbx"]
for folder in args.folders:
local_asset_output = folder + "_converted"
result = omni.client.create_folder(f"{local_asset_output}")
for folder in args.folders:
print(f"\nConverting folder {folder}...")
(result, models) = omni.client.list(folder)
for i, entry in enumerate(models):
if i >= args.max_models:
print(f"max models ({args.max_models}) reached, exiting conversion")
break
model = str(entry.relative_path)
model_name = os.path.splitext(model)[0]
model_format = (os.path.splitext(model)[1])[1:]
# Supported input file formats
if model_format in supported_file_formats:
input_model_path = folder + "/" + model
converted_model_path = folder + "_converted/" + model_name + "_" + model_format + ".usd"
if not os.path.exists(converted_model_path):
status = asyncio.get_event_loop().run_until_complete(
convert(input_model_path, converted_model_path, True)
)
if not status:
print(f"ERROR Status is {status}")
print(f"---Added {converted_model_path}")
if __name__ == "__main__":
kit = SimulationApp()
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.kit.asset_converter")
parser = argparse.ArgumentParser("Convert OBJ/STL assets to USD")
parser.add_argument(
"--folders", type=str, nargs="+", default=None, help="List of folders to convert (space seperated)."
)
parser.add_argument(
"--max-models", type=int, default=50, help="If specified, convert up to `max-models` per folder."
)
parser.add_argument(
"--load-materials", action="store_true", help="If specified, materials will be loaded from meshes"
)
args, unknown_args = parser.parse_known_args()
if args.folders is not None:
# Ensure Omniverse Kit is launched via SimulationApp before asset_convert() is called
asset_convert(args)
else:
print(f"No folders specified via --folders argument, exiting")
# cleanup
kit.close()
| 4,049 | Python | 36.850467 | 108 | 0.652013 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cloner/clone_ants.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import sys
import carb
import numpy as np
from omni.isaac.cloner import GridCloner
from omni.isaac.core import World
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
# create initial robot
asset_path = assets_root_path + "/Isaac/Robots/Ant/ant.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Ants/Ant_0")
# create GridCloner instance
cloner = GridCloner(spacing=2)
# generate paths for clones
target_paths = cloner.generate_paths("/World/Ants/Ant", 4)
# clone
position_offsets = np.array([[0, 0, 1]] * 4)
cloner.clone(
source_prim_path="/World/Ants/Ant_0",
prim_paths=target_paths,
position_offsets=position_offsets,
replicate_physics=True,
base_env_path="/World/Ants",
)
# create ArticulationView
ants = ArticulationView(prim_paths_expr="/World/Ants/.*/torso", name="ants_view")
my_world.scene.add(ants)
my_world.reset()
for i in range(1000):
print(ants.get_world_poses())
my_world.step()
simulation_app.close()
| 1,909 | Python | 29.806451 | 81 | 0.756417 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kit/load_stage.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
import omni
from omni.isaac.kit import SimulationApp
# This sample loads a usd stage and starts simulation
CONFIG = {"width": 1280, "height": 720, "sync_loads": True, "headless": False, "renderer": "RayTracedLighting"}
# Set up command line arguments
parser = argparse.ArgumentParser("Usd Load sample")
parser.add_argument(
"--usd_path", type=str, help="Path to usd file, should be relative to your default assets folder", required=True
)
parser.add_argument("--headless", default=False, action="store_true", help="Run stage headless")
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
# Start the omniverse application
CONFIG["headless"] = args.headless
kit = SimulationApp(launch_config=CONFIG)
# Locate Isaac Sim assets folder to load sample
from omni.isaac.core.utils.nucleus import get_assets_root_path, is_file
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
kit.close()
sys.exit()
usd_path = assets_root_path + args.usd_path
# make sure the file exists before we try to open it
try:
result = is_file(usd_path)
except:
result = False
if result:
omni.usd.get_context().open_stage(usd_path)
else:
carb.log_error(
f"the usd path {usd_path} could not be opened, please make sure that {args.usd_path} is a valid usd file in {assets_root_path}"
)
kit.close()
sys.exit()
# Wait two frames so that stage starts loading
kit.update()
kit.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
kit.update()
print("Loading Complete")
omni.timeline.get_timeline_interface().play()
# Run in test mode, exit after a fixed number of steps
if args.test is True:
for i in range(10):
# Run in realtime mode, we don't specify the step size
kit.update()
else:
while kit.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
omni.timeline.get_timeline_interface().stop()
kit.close()
| 2,599 | Python | 31.5 | 135 | 0.727972 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kit/hello_world.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni
from omni.isaac.kit import SimulationApp
# The most basic usage for creating a simulation app
kit = SimulationApp()
for i in range(100):
kit.update()
omni.kit.app.get_app().print_and_log("Hello World!")
kit.close() # Cleanup application
| 690 | Python | 30.40909 | 76 | 0.781159 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kit/livestream.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
# This sample enables a livestream server to connect to when running headless
CONFIG = {
"width": 1280,
"height": 720,
"window_width": 1920,
"window_height": 1080,
"headless": True,
"renderer": "RayTracedLighting",
"display_options": 3286, # Set display options to show default grid
}
# Start the omniverse application
kit = SimulationApp(launch_config=CONFIG)
from omni.isaac.core.utils.extensions import enable_extension
# Default Livestream settings
kit.set_setting("/app/window/drawMouse", True)
kit.set_setting("/app/livestream/proto", "ws")
kit.set_setting("/app/livestream/websocket/framerate_limit", 120)
kit.set_setting("/ngx/enabled", False)
# Note: Only one livestream extension can be enabled at a time
# Enable Native Livestream extension
# Default App: Streaming Client from the Omniverse Launcher
enable_extension("omni.kit.livestream.native")
# Enable WebSocket Livestream extension(Deprecated)
# Default URL: http://localhost:8211/streaming/client/
# enable_extension("omni.services.streamclient.websocket")
# Enable WebRTC Livestream extension
# Default URL: http://localhost:8211/streaming/webrtc-client/
# enable_extension("omni.services.streamclient.webrtc")
# Run until closed
while kit._app.is_running() and not kit.is_exiting():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.close()
| 1,853 | Python | 33.333333 | 77 | 0.764166 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kit/change_resolution.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import random
from omni.isaac.kit import SimulationApp
# Simple example showing how to change resolution
kit = SimulationApp({"headless": True})
kit.update()
for i in range(100):
width = random.randint(128, 1980)
height = random.randint(128, 1980)
kit.set_setting("/app/renderer/resolution/width", width)
kit.set_setting("/app/renderer/resolution/height", height)
kit.update()
print(f"resolution set to: {width}, {height}")
# cleanup
kit.close()
| 905 | Python | 32.555554 | 76 | 0.758011 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/add_cubes.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
my_world = World(stage_units_in_meters=1.0)
cube_1 = my_world.scene.add(
VisualCuboid(
prim_path="/new_cube_1",
name="visual_cube",
position=np.array([0, 0, 0.5]),
size=0.3,
color=np.array([255, 255, 255]),
)
)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_2",
name="cube_1",
position=np.array([0, 0, 1.0]),
scale=np.array([0.6, 0.5, 0.2]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
cube_3 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_3",
name="cube_2",
position=np.array([0, 0, 3.0]),
scale=np.array([0.1, 0.1, 0.1]),
size=1.0,
color=np.array([0, 0, 255]),
linear_velocity=np.array([0, 0, 0.4]),
)
)
my_world.scene.add_default_ground_plane()
for i in range(5):
my_world.reset()
for i in range(500):
my_world.step(render=True)
print(cube_2.get_angular_velocity())
print(cube_2.get_world_pose())
simulation_app.close()
| 1,702 | Python | 26.031746 | 76 | 0.63631 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/cloth.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
import torch
from omni.isaac.core import World
from omni.isaac.core.materials.particle_material import ParticleMaterial
from omni.isaac.core.prims.soft.cloth_prim import ClothPrim
from omni.isaac.core.prims.soft.cloth_prim_view import ClothPrimView
from omni.isaac.core.prims.soft.particle_system import ParticleSystem
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.physx.scripts import deformableUtils, physicsUtils
from pxr import Gf, UsdGeom
# The example shows how to create and manipulate environments with particle cloth through the ClothPrimView
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
class ParticleClothExample:
def __init__(self):
self._array_container = torch.Tensor
self.my_world = World(stage_units_in_meters=1.0, backend="torch", device="cuda")
self.stage = simulation_app.context.get_stage()
self.num_envs = 10
self.dimx = 5
self.dimy = 5
self.my_world.scene.add_default_ground_plane()
self.initial_positions = None
self.makeEnvs()
def makeEnvs(self):
for i in range(self.num_envs):
env_path = "/World/Env" + str(i)
env = UsdGeom.Xform.Define(self.stage, env_path)
# set up the geometry
cloth_path = env.GetPrim().GetPath().AppendChild("cloth")
plane_mesh = UsdGeom.Mesh.Define(self.stage, cloth_path)
tri_points, tri_indices = deformableUtils.create_triangle_mesh_square(dimx=5, dimy=5, scale=1.0)
if self.initial_positions is None:
self.initial_positions = torch.zeros((self.num_envs, len(tri_points), 3))
plane_mesh.GetPointsAttr().Set(tri_points)
plane_mesh.GetFaceVertexIndicesAttr().Set(tri_indices)
plane_mesh.GetFaceVertexCountsAttr().Set([3] * (len(tri_indices) // 3))
init_loc = Gf.Vec3f(i * 2, 0.0, 2.0)
physicsUtils.setup_transform_as_scale_orient_translate(plane_mesh)
physicsUtils.set_or_add_translate_op(plane_mesh, init_loc)
physicsUtils.set_or_add_orient_op(plane_mesh, Gf.Rotation(Gf.Vec3d([1, 0, 0]), 15 * i).GetQuat())
self.initial_positions[i] = torch.tensor(init_loc) + torch.tensor(plane_mesh.GetPointsAttr().Get())
particle_system_path = env.GetPrim().GetPath().AppendChild("particleSystem")
particle_material_path = env.GetPrim().GetPath().AppendChild("particleMaterial")
self.particle_material = ParticleMaterial(
prim_path=particle_material_path, drag=0.1, lift=0.3, friction=0.6
)
radius = 0.5 * (0.6 / 5.0)
restOffset = radius
contactOffset = restOffset * 1.5
self.particle_system = ParticleSystem(
prim_path=particle_system_path,
simulation_owner=self.my_world.get_physics_context().prim_path,
rest_offset=restOffset,
contact_offset=contactOffset,
solid_rest_offset=restOffset,
fluid_rest_offset=restOffset,
particle_contact_offset=contactOffset,
)
# note that no particle material is applied to the particle system at this point.
# this can be done manually via self.particle_system.apply_particle_material(self.particle_material)
# or to pass the material to the clothPrim which binds it internally to the particle system
self.cloth = ClothPrim(
name="clothPrim" + str(i),
prim_path=str(cloth_path),
particle_system=self.particle_system,
particle_material=self.particle_material,
)
self.my_world.scene.add(self.cloth)
# create a view to deal with all the cloths
self.clothView = ClothPrimView(prim_paths_expr="/World/Env*/cloth", name="clothView1")
self.my_world.scene.add(self.clothView)
self.my_world.reset(soft=False)
def play(self):
while simulation_app.is_running():
if self.my_world.is_playing():
# deal with sim re-initialization after restarting sim
if self.my_world.current_time_step_index == 0:
# initialize simulation views
self.my_world.reset(soft=False)
self.my_world.step(render=True)
if self.my_world.current_time_step_index % 50 == 1:
for i in range(self.num_envs):
print(
"cloth {} average height = {:.2f}".format(
i, self.clothView.get_world_positions()[i, :, 2].mean()
)
)
# reset some random environments
if self.my_world.current_time_step_index % 200 == 1:
indices = torch.tensor(
np.random.choice(range(self.num_envs), self.num_envs // 2, replace=False), dtype=torch.long
)
new_positions = self.initial_positions[indices] + torch.tensor([0, 0, 5])
self.clothView.set_world_positions(new_positions, indices)
updated_positions = self.clothView.get_world_positions()
for i in indices:
print("reset index {} average height = {:.2f}".format(i, updated_positions[i, :, 2].mean()))
simulation_app.close()
ParticleClothExample().play()
| 6,331 | Python | 45.558823 | 112 | 0.632128 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/add_frankas.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
from omni.isaac.core.utils.types import ArticulationAction
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Franka_1")
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Franka_2")
articulated_system_1 = my_world.scene.add(Robot(prim_path="/World/Franka_1", name="my_franka_1"))
articulated_system_2 = my_world.scene.add(Robot(prim_path="/World/Franka_2", name="my_franka_2"))
for i in range(5):
print("resetting...")
my_world.reset()
articulated_system_1.set_world_pose(position=np.array([0.0, 2.0, 0.0]) / get_stage_units())
articulated_system_2.set_world_pose(position=np.array([0.0, -2.0, 0.0]) / get_stage_units())
articulated_system_1.set_joint_positions(np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]))
for j in range(500):
my_world.step(render=True)
if j == 100:
articulated_system_2.get_articulation_controller().apply_action(
ArticulationAction(joint_positions=np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]))
)
if j == 400:
print("Franka 1's joint positions are: ", articulated_system_1.get_joint_positions())
print("Franka 2's joint positions are: ", articulated_system_2.get_joint_positions())
if args.test is True:
break
simulation_app.close()
| 2,637 | Python | 41.548386 | 107 | 0.711794 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/simulation_callbacks.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
from omni.isaac.core import SimulationContext
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
assets_root_path = get_assets_root_path()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
simulation_context = SimulationContext()
add_reference_to_stage(asset_path, "/Franka")
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
art = Articulation("/Franka")
art.initialize()
dof_ptr = art.get_dof_index("panda_joint2")
simulation_context.play()
def step_callback_1(step_size):
art.set_joint_positions([-1.5], [dof_ptr])
return
def step_callback_2(step_size):
print(
"Current joint 2 position @ step "
+ str(simulation_context.current_time_step_index)
+ " : "
+ str(art.get_joint_positions([dof_ptr])[0])
)
print("TIME: ", simulation_context.current_time)
return
def render_callback(event):
print("Render Frame")
simulation_context.add_physics_callback("physics_callback_1", step_callback_1)
simulation_context.add_physics_callback("physics_callback_2", step_callback_2)
simulation_context.add_render_callback("render_callback", render_callback)
# Simulate 60 timesteps
for i in range(60):
print("step", i)
simulation_context.step(render=False)
# Render one frame
simulation_context.render()
simulation_context.stop()
simulation_app.close()
| 2,052 | Python | 30.10606 | 78 | 0.752437 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/data_logging.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
from omni.isaac.core import World
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Franka_1")
articulated_system_1 = my_world.scene.add(Robot(prim_path="/World/Franka_1", name="my_franka_1"))
my_world.reset()
data_logger = my_world.get_data_logger()
def frame_logging_func(tasks, scene):
return {
"joint_positions": scene.get_object("my_franka_1").get_joint_positions().tolist(),
"applied_joint_positions": scene.get_object("my_franka_1").get_applied_action().joint_positions.tolist(),
}
data_logger.add_data_frame_logging_func(frame_logging_func)
data_logger.start()
for j in range(100):
my_world.step(render=True)
data_logger.save(log_path="./isaac_sim_data.json")
data_logger.reset()
data_logger.load(log_path="./isaac_sim_data.json")
print(data_logger.get_data_frame(data_frame_index=2))
simulation_app.close()
| 1,922 | Python | 32.736842 | 113 | 0.752341 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/deformable.py | from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils
import torch
from omni.isaac.core import World
from omni.isaac.core.materials.deformable_material import DeformableMaterial
from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim
from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.physx.scripts import deformableUtils, physicsUtils
from pxr import Gf, UsdGeom, UsdLux
# The example shows how to create and manipulate environments with deformable deformable through the DeformablePrimView
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
class DeformableExample:
def __init__(self):
self._array_container = torch.Tensor
self.my_world = World(stage_units_in_meters=1.0, backend="torch", device="cuda")
self.stage = simulation_app.context.get_stage()
self.num_envs = 10
self.dimx = 5
self.dimy = 5
self.my_world.scene.add_default_ground_plane()
self.initial_positions = None
self.makeEnvs()
def makeEnvs(self):
for i in range(self.num_envs):
init_loc = Gf.Vec3f(i * 2 - self.num_envs, 0.0, 0.0)
env_scope = UsdGeom.Scope.Define(self.stage, "/World/Envs")
env_path = "/World/Envs/Env" + str(i)
env = UsdGeom.Xform.Define(self.stage, env_path)
physicsUtils.set_or_add_translate_op(UsdGeom.Xformable(env), init_loc)
mesh_path = env.GetPrim().GetPath().AppendChild("deformable")
skin_mesh = UsdGeom.Mesh.Define(self.stage, mesh_path)
tri_points, tri_indices = deformableMeshUtils.createTriangleMeshCube(8)
skin_mesh.GetPointsAttr().Set(tri_points)
skin_mesh.GetFaceVertexIndicesAttr().Set(tri_indices)
skin_mesh.GetFaceVertexCountsAttr().Set([3] * (len(tri_indices) // 3))
physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh)
physicsUtils.set_or_add_translate_op(skin_mesh, (0.0, 0.0, 2.0))
physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([1, 0, 0]), 15 * i).GetQuat())
deformable_material_path = env.GetPrim().GetPath().AppendChild("deformableMaterial")
self.deformable_material = DeformableMaterial(
prim_path=deformable_material_path,
dynamic_friction=0.5,
youngs_modulus=5e4,
poissons_ratio=0.4,
damping_scale=0.1,
elasticity_damping=0.1,
)
self.deformable = DeformablePrim(
name="deformablePrim" + str(i),
prim_path=str(mesh_path),
deformable_material=self.deformable_material,
vertex_velocity_damping=0.0,
sleep_damping=1.0,
sleep_threshold=0.05,
settling_threshold=0.1,
self_collision=True,
self_collision_filter_distance=0.05,
solver_position_iteration_count=20,
kinematic_enabled=False,
simulation_hexahedral_resolution=2,
collision_simplification=True,
)
self.my_world.scene.add(self.deformable)
# create a view to deal with all the deformables
self.deformableView = DeformablePrimView(prim_paths_expr="/World/Envs/Env*/deformable", name="deformableView1")
self.my_world.scene.add(self.deformableView)
self.my_world.reset(soft=False)
# mesh data is available only after cooking
# rest_points are represented with respect to the env positions, but simulation_mesh_nodal_positions can be either global or local positions
# However, because we don't currently consider subspace root path with World/SimulationContext initialization, the environment xforms are not identified
# below and the following call will be positions w.r.t to a global frame.
self.initial_positions = self.deformableView.get_simulation_mesh_nodal_positions().cpu()
self.initial_velocities = self.deformableView.get_simulation_mesh_nodal_velocities().cpu()
# print(self.initial_positions)
# self.initial_positions = self.deformableView.get_simulation_mesh_rest_points().cpu()
# for i in range(self.num_envs):
# self.initial_positions[i] += torch.tensor([i * 2, 0.0, 2.0])
# print(self.initial_positions[i])
def play(self):
while simulation_app.is_running():
if self.my_world.is_playing():
# deal with sim re-initialization after restarting sim
if self.my_world.current_time_step_index == 1:
# initialize simulation views
self.my_world.reset(soft=False)
self.my_world.step(render=True)
if self.my_world.current_time_step_index == 200:
for i in range(self.num_envs):
print(
"deformable {} average height = {:.2f}".format(
i, self.deformableView.get_simulation_mesh_nodal_positions()[i, :, 2].mean()
)
)
print(
"deformable {} average vertical speed = {:.2f}".format(
i, self.deformableView.get_simulation_mesh_nodal_velocities()[i, :, 2].mean()
)
)
# reset some random environments
if self.my_world.current_time_step_index % 500 == 1:
indices = torch.tensor(
np.random.choice(range(self.num_envs), self.num_envs // 2, replace=False), dtype=torch.long
)
new_positions = self.initial_positions[indices] + torch.tensor([0, 0, 5])
new_velocities = self.initial_velocities[indices] + torch.tensor([0, 0, 3])
self.deformableView.set_simulation_mesh_nodal_positions(new_positions, indices)
self.deformableView.set_simulation_mesh_nodal_velocities(new_velocities, indices)
updated_positions = self.deformableView.get_simulation_mesh_nodal_positions()
updated_velocities = self.deformableView.get_simulation_mesh_nodal_velocities()
for i in indices:
print("reset index {} average height = {:.2f}".format(i, updated_positions[i, :, 2].mean()))
print(
"reset index {} average vertical speed = {:.2f}".format(i, updated_velocities[i, :, 2].mean())
)
simulation_app.close()
DeformableExample().play()
| 7,196 | Python | 47.959183 | 160 | 0.617982 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/visual_materials.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import random
import sys
import carb
from omni.isaac.core import World
from omni.isaac.core.materials.omni_glass import OmniGlass
from omni.isaac.core.materials.omni_pbr import OmniPBR
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Materials/Textures/Synthetic/bubbles_2.png"
my_world = World(stage_units_in_meters=1.0)
textured_material = OmniPBR(
prim_path="/World/visual_cube_material",
name="omni_pbr",
color=np.array([1, 0, 0]),
texture_path=asset_path,
texture_scale=[1.0, 1.0],
texture_translate=[0.5, 0],
)
glass = OmniGlass(
prim_path=f"/World/visual_cube_material_2",
ior=1.25,
depth=0.001,
thin_walled=False,
color=np.array([random.random(), random.random(), random.random()]),
)
cube_1 = my_world.scene.add(
VisualCuboid(
prim_path="/new_cube_1",
name="visual_cube",
position=np.array([0, 0, 0.5]),
size=1.0,
color=np.array([255, 255, 255]),
visual_material=textured_material,
)
)
cube_2 = my_world.scene.add(
VisualCuboid(
prim_path="/new_cube_2",
name="visual_cube_2",
position=np.array([2, 0.39, 0.5]),
size=1.0,
color=np.array([255, 255, 255]),
visual_material=glass,
)
)
visual_material = cube_2.get_applied_visual_material()
visual_material.set_color(np.array([1.0, 0.5, 0.0]))
my_world.scene.add_default_ground_plane()
my_world.reset()
for i in range(10000):
my_world.step(render=True)
if args.test is True:
break
simulation_app.close()
| 2,549 | Python | 27.333333 | 90 | 0.695175 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/time_stepping.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
assets_root_path = get_assets_root_path()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
simulation_context = SimulationContext(stage_units_in_meters=1.0)
add_reference_to_stage(asset_path, "/Franka")
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
def step_callback(step_size):
print("simulate with step: ", step_size)
return
def render_callback(event):
print("update app with step: ", event.payload["dt"])
simulation_context.add_physics_callback("physics_callback", step_callback)
simulation_context.add_render_callback("render_callback", render_callback)
simulation_context.stop()
simulation_context.play()
print("step physics once with a step size of 1/60 second, these are the default settings")
simulation_context.step(render=False)
print("step physics & rendering once with a step size of 1/60 second, these are the default settings")
simulation_context.step(render=True)
print("step physics & rendering once with a step size of 1/60 second")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0)
simulation_context.step(render=True)
print("step physics 10 steps at a 1/600s per step and rendering at 1.0/60s")
simulation_context.set_simulation_dt(physics_dt=1.0 / 600.0, rendering_dt=1.0 / 60.0)
simulation_context.step(render=True)
print("step physics once at 600Hz without rendering")
simulation_context.set_simulation_dt(physics_dt=1.0 / 600.0, rendering_dt=1.0 / 60.0)
simulation_context.step(render=False)
print("step physics 10 steps at a 1/600s per step and rendering at 1.0/60s")
simulation_context.set_simulation_dt(physics_dt=1.0 / 600.0, rendering_dt=1.0 / 60.0)
for step in range(10):
simulation_context.step(render=False)
simulation_context.render()
print("render a frame, moving editor timeline forward by 1.0/60s, physics does not simulate")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0)
simulation_context.render()
print("render a frame, moving editor timeline forward by 1.0/60s, physics does not simulate")
simulation_context.set_simulation_dt(physics_dt=0.0, rendering_dt=1.0 / 60)
simulation_context.step(render=True)
print("step physics once 1/60s per step and rendering 10 times at 1.0/600s")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 600.0)
for step in range(10):
simulation_context.step(render=True)
print("step physics once 1/60s per step and rendering once at 1.0/600s by explicitly calling step and render")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 600.0)
simulation_context.step(render=False)
simulation_context.render()
print("step physics once 1/60s per step, rendering a frame does not move editor timeline forward")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=0.0)
simulation_context.step(render=False)
simulation_context.render()
print("step physics once 1/60s per step, rendering a frame does not move editor timeline forward")
simulation_context.set_simulation_dt(physics_dt=1.0 / 60.0, rendering_dt=0.0)
simulation_context.step(render=True)
print("render a new frame with simulation stopped, editor timeline does not move forward")
simulation_context.stop()
simulation_context.render()
print("cleanup and exit")
simulation_context.stop()
simulation_app.close()
| 4,101 | Python | 40.434343 | 110 | 0.774201 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/control_robot.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import SimulationContext
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
assets_root_path = get_assets_root_path()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
simulation_context = SimulationContext()
add_reference_to_stage(asset_path, "/Franka")
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
art = Articulation("/Franka")
art.initialize()
dof_ptr = art.get_dof_index("panda_joint2")
simulation_context.play()
# NOTE: before interacting with dc directly you need to step physics for one step at least
# simulation_context.step(render=True) which happens inside .play()
for i in range(1000):
art.set_joint_positions([-1.5], [dof_ptr])
simulation_context.step(render=True)
simulation_context.stop()
simulation_app.close()
| 1,500 | Python | 36.524999 | 90 | 0.786667 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/rigid_contact_view.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
import torch
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims.geometry_prim import GeometryPrim
from omni.isaac.core.prims.geometry_prim_view import GeometryPrimView
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
class RigidViewExample:
def __init__(self):
self._array_container = torch.Tensor
self.my_world = World(stage_units_in_meters=1.0, backend="torch")
self.stage = simulation_app.context.get_stage()
def makeEnv(self):
self.cube_height = 1.0
self.top_cube_height = self.cube_height + 3.0
self.cube_dx = 5.0
self.cube_y = 2.0
self.top_cube_y = self.cube_y + 0.0
self.my_world._physics_context.set_gravity(-10)
self.my_world.scene.add_default_ground_plane()
for i in range(3):
DynamicCuboid(
prim_path=f"/World/Box_{i+1}", name=f"box_{i}", size=1.0, color=np.array([0.5, 0, 0]), mass=1.0
)
DynamicCuboid(
prim_path=f"/World/TopBox_{i+1}",
name=f"top_box_{i}",
size=1.0,
color=np.array([0.0, 0.0, 0.5]),
mass=1.0,
)
# add top box as filters to the view to receive contacts between the bottom boxes and top boxes
self._box_view = RigidPrimView(
prim_paths_expr="/World/Box_*",
name="box_view",
positions=self._array_container(
[
[0, self.cube_y, self.cube_height],
[-self.cube_dx, self.cube_y, self.cube_height],
[self.cube_dx, self.cube_y, self.cube_height],
]
),
contact_filter_prim_paths_expr=["/World/TopBox_*"],
)
# a view just to manipulate the top boxes
self._top_box_view = RigidPrimView(
prim_paths_expr="/World/TopBox_*",
name="top_box_view",
positions=self._array_container(
[
[0.0, self.top_cube_y, self.top_cube_height],
[-self.cube_dx, self.top_cube_y, self.top_cube_height],
[self.cube_dx, self.top_cube_y, self.top_cube_height],
]
),
track_contact_forces=True,
)
# can get contact forces with non-rigid body prims such as geometry prims either via the single prim class GeometryPrim, or the view class GeometryPrimView
self._geom_prim = GeometryPrim(
prim_path="/World/defaultGroundPlane",
name="groundPlane",
collision=True,
track_contact_forces=True,
prepare_contact_sensor=True,
contact_filter_prim_paths_expr=["/World/Box_1", "/World/Box_2"],
)
self._geom_view = GeometryPrimView(
prim_paths_expr="/World/defaultGroundPlane*",
name="groundPlaneView",
collisions=self._array_container([True]),
track_contact_forces=True,
prepare_contact_sensors=True,
contact_filter_prim_paths_expr=["/World/Box_1", "/World/Box_2", "/World/Box_3"],
)
self.my_world.scene.add(self._box_view)
self.my_world.scene.add(self._top_box_view)
self.my_world.scene.add(self._geom_prim)
self.my_world.scene.add(self._geom_view)
self.my_world.reset(soft=False)
def play(self):
self.makeEnv()
while simulation_app.is_running():
if self.my_world.is_playing():
# deal with sim re-initialization after restarting sim
if self.my_world.current_time_step_index == 0:
# initialize simulation views
self.my_world.reset(soft=False)
self.my_world.step(render=True)
if self.my_world.current_time_step_index % 100 == 1:
states = self._box_view.get_current_dynamic_state()
top_states = self._top_box_view.get_current_dynamic_state()
net_forces = self._box_view.get_net_contact_forces(None, dt=1 / 60)
forces_matrix = self._box_view.get_contact_force_matrix(None, dt=1 / 60)
top_net_forces = self._top_box_view.get_net_contact_forces(None, dt=1 / 60)
print("==================================================================")
print("Bottom box net forces: \n", net_forces)
print("Top box net forces: \n", top_net_forces)
print("Bottom box forces from top ones: \n", forces_matrix)
print("Bottom box positions: \n", states.positions)
print("Top box positions: \n", top_states.positions)
print("Bottom box velocities: \n", states.linear_velocities)
print("Top box velocities: \n", top_states.linear_velocities)
print("ground net force from GeometryPrimView : \n", self._geom_view.get_net_contact_forces(dt=1 / 60))
print(
"ground force matrix from GeometryPrimView: \n", self._geom_view.get_contact_force_matrix(dt=1 / 60)
)
print("ground net force from GeometryPrim : \n", self._geom_prim.get_net_contact_forces(dt=1 / 60))
print("ground force matrix from GeometryPrim: \n", self._geom_prim.get_contact_force_matrix(dt=1 / 60))
simulation_app.close()
RigidViewExample().play()
| 6,241 | Python | 42.347222 | 163 | 0.585002 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.core/simulate_robot.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import create_prim
from omni.isaac.core.utils.stage import add_reference_to_stage, is_stage_loading
assets_root_path = get_assets_root_path()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
simulation_context = SimulationContext()
add_reference_to_stage(asset_path, "/Franka")
create_prim("/DistantLight", "DistantLight")
# wait for things to load
simulation_app.update()
while is_stage_loading():
simulation_app.update()
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
for i in range(1000):
simulation_context.step(render=True)
simulation_context.stop()
simulation_app.close()
| 1,366 | Python | 34.973683 | 80 | 0.791362 |
2820207922/isaac_ws/standalone_examples/api/omni.replicator.isaac/randomization_demo.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.cloner import GridCloner
from omni.isaac.core import World
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
# create the world
world = World(stage_units_in_meters=1.0, physics_prim_path="/physicsScene", backend="numpy")
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder, closing app..")
simulation_app.close()
usd_path = assets_root_path + "/Isaac/Environments/Grid/default_environment.usd"
add_reference_to_stage(usd_path=usd_path, prim_path="/World/defaultGroundPlane")
# set up grid cloner
cloner = GridCloner(spacing=1.5)
cloner.define_base_env("/World/envs")
define_prim("/World/envs/env_0")
# set up the first environment
DynamicSphere(prim_path="/World/envs/env_0/object", radius=0.1, position=np.array([0.75, 0.0, 0.2]))
add_reference_to_stage(
usd_path=assets_root_path + "/Isaac/Robots/Franka/franka.usd", prim_path="/World/envs/env_0/franka"
)
# clone environments
num_envs = 4
prim_paths = cloner.generate_paths("/World/envs/env", num_envs)
env_pos = cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=prim_paths)
# creates the views and set up world
object_view = RigidPrimView(prim_paths_expr="/World/envs/*/object", name="object_view")
franka_view = ArticulationView(prim_paths_expr="/World/envs/*/franka", name="franka_view")
world.scene.add(object_view)
world.scene.add(franka_view)
world.reset()
num_dof = franka_view.num_dof
import omni.replicator.core as rep
# set up randomization with omni.replicator.isaac, imported as dr
import omni.replicator.isaac as dr
dr.physics_view.register_simulation_context(world)
dr.physics_view.register_rigid_prim_view(object_view)
dr.physics_view.register_articulation_view(franka_view)
with dr.trigger.on_rl_frame(num_envs=num_envs):
with dr.gate.on_interval(interval=20):
dr.physics_view.randomize_simulation_context(
operation="scaling", gravity=rep.distribution.uniform((1, 1, 0.0), (1, 1, 2.0))
)
with dr.gate.on_interval(interval=50):
dr.physics_view.randomize_rigid_prim_view(
view_name=object_view.name, operation="direct", force=rep.distribution.uniform((0, 0, 2.5), (0, 0, 5.0))
)
with dr.gate.on_interval(interval=10):
dr.physics_view.randomize_articulation_view(
view_name=franka_view.name,
operation="direct",
joint_velocities=rep.distribution.uniform(tuple([-2] * num_dof), tuple([2] * num_dof)),
)
with dr.gate.on_env_reset():
dr.physics_view.randomize_rigid_prim_view(
view_name=object_view.name,
operation="additive",
position=rep.distribution.normal((0.0, 0.0, 0.0), (0.2, 0.2, 0.0)),
velocity=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
)
dr.physics_view.randomize_articulation_view(
view_name=franka_view.name,
operation="additive",
joint_positions=rep.distribution.uniform(tuple([-0.5] * num_dof), tuple([0.5] * num_dof)),
position=rep.distribution.normal((0.0, 0.0, 0.0), (0.2, 0.2, 0.0)),
)
frame_idx = 0
while simulation_app.is_running():
if world.is_playing():
reset_inds = list()
if frame_idx % 200 == 0:
# triggers reset every 200 steps
reset_inds = np.arange(num_envs)
dr.physics_view.step_randomization(reset_inds)
world.step(render=True)
frame_idx += 1
simulation_app.close()
| 4,393 | Python | 38.945454 | 116 | 0.700433 |
2820207922/isaac_ws/standalone_examples/api/omni.kit.app/app_framework.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import os
import omni.kit.app
from omni.isaac.kit import AppFramework
argv = [
"--empty",
"--ext-folder",
f'{os.path.abspath(os.environ["ISAAC_PATH"])}/exts',
"--no-window",
"--/app/asyncRendering=False",
"--/app/fastShutdown=True",
"--enable",
"omni.usd",
"--enable",
"omni.kit.uiapp",
]
# startup
app = AppFramework("test_app", argv)
import omni.usd
stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
while not stage_task.done():
app.update()
print("exiting")
app.close()
| 997 | Python | 23.949999 | 76 | 0.711133 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.dynamic_control/franka_articulation.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
# This sample loads an articulation and prints its information
import omni
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.dynamic_control import _dynamic_control
stage = simulation_app.context.get_stage()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
omni.usd.get_context().open_stage(asset_path)
# start simulation
omni.timeline.get_timeline_interface().play()
# perform timestep
simulation_app.update()
dc = _dynamic_control.acquire_dynamic_control_interface()
# Get handle to articulation
art = dc.get_articulation("/panda")
if art == _dynamic_control.INVALID_HANDLE:
print("*** '%s' is not an articulation" % "/panda")
else:
# Print information about articulation
root = dc.get_articulation_root_body(art)
print(str("Got articulation handle %d \n" % art) + str("--- Hierarchy\n"))
body_states = dc.get_articulation_body_states(art, _dynamic_control.STATE_ALL)
print(str("--- Body states:\n") + str(body_states) + "\n")
dof_states = dc.get_articulation_dof_states(art, _dynamic_control.STATE_ALL)
print(str("--- DOF states:\n") + str(dof_states) + "\n")
dof_props = dc.get_articulation_dof_properties(art)
print(str("--- DOF properties:\n") + str(dof_props) + "\n")
# Simulate robot coming to a rest configuration
for i in range(100):
simulation_app.update()
# Simulate robot for a fixed number of frames and specify a joint position target
for i in range(100):
dof_ptr = dc.find_articulation_dof(art, "panda_joint2")
# This should be called each frame of simulation if state on the articulation is being changed.
dc.wake_up_articulation(art)
# Set joint position target
dc.set_dof_position_target(dof_ptr, -1.5)
simulation_app.update()
simulation_app.close()
| 2,529 | Python | 35.142857 | 99 | 0.732305 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.shapenet/usd_convertor.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Convert ShapeNetCore V2 to USD without materials.
By only converting the ShapeNet geometry, we can more quickly load assets into scenes for the purpose of creating
large datasets or for online training of Deep Learning models.
"""
import argparse
import os
import carb
from omni.isaac.kit import SimulationApp
if "SHAPENET_LOCAL_DIR" not in os.environ:
carb.log_error("SHAPENET_LOCAL_DIR not defined:")
carb.log_error(
"Please specify the SHAPENET_LOCAL_DIR environment variable to the location of your local shapenet database, exiting"
)
exit()
kit = SimulationApp()
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.shapenet")
from omni.isaac.shapenet import utils
parser = argparse.ArgumentParser("Convert ShapeNet assets to USD")
parser.add_argument(
"--categories", type=str, nargs="+", default=None, help="List of ShapeNet categories to convert (space seperated)."
)
parser.add_argument(
"--max_models", type=int, default=50, help="If specified, convert up to `max_models` per category, default is 50"
)
parser.add_argument(
"--load_materials", action="store_true", help="If specified, materials will be loaded from shapenet meshes"
)
args, unknown_args = parser.parse_known_args()
# Ensure Omniverse Kit is launched via SimulationApp before shapenet_convert() is called
utils.shapenet_convert(args.categories, args.max_models, args.load_materials)
# cleanup
kit.close()
| 1,900 | Python | 34.203703 | 125 | 0.769474 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/franka_pick_up.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.franka.controllers.pick_place_controller import PickPlaceController
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Franka")
gripper = ParallelGripper(
end_effector_prim_path="/World/Franka/panda_rightfinger",
joint_prim_names=["panda_finger_joint1", "panda_finger_joint2"],
joint_opened_positions=np.array([0.05, 0.05]),
joint_closed_positions=np.array([0.0, 0.0]),
action_deltas=np.array([0.05, 0.05]),
)
my_franka = my_world.scene.add(
SingleManipulator(
prim_path="/World/Franka", name="my_franka", end_effector_prim_name="panda_rightfinger", gripper=gripper
)
)
cube = my_world.scene.add(
DynamicCuboid(
name="cube",
position=np.array([0.3, 0.3, 0.3]),
prim_path="/World/Cube",
scale=np.array([0.0515, 0.0515, 0.0515]),
size=1.0,
color=np.array([0, 0, 1]),
)
)
my_world.scene.add_default_ground_plane()
my_franka.gripper.set_default_state(my_franka.gripper.joint_opened_positions)
my_world.reset()
my_controller = PickPlaceController(
name="pick_place_controller", gripper=my_franka.gripper, robot_articulation=my_franka
)
articulation_controller = my_franka.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=cube.get_local_pose()[0],
placing_position=np.array([-0.3, -0.3, 0.0515 / 2.0]),
current_joint_positions=my_franka.get_joint_positions(),
end_effector_offset=np.array([0, 0.005, 0]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 3,410 | Python | 34.53125 | 112 | 0.708798 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/ur10_pick_up.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import SurfaceGripper
from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/UR10/ur10.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/UR10")
gripper_usd = assets_root_path + "/Isaac/Robots/UR10/Props/short_gripper.usd"
add_reference_to_stage(usd_path=gripper_usd, prim_path="/World/UR10/ee_link")
gripper = SurfaceGripper(end_effector_prim_path="/World/UR10/ee_link", translate=0.1611, direction="x")
ur10 = my_world.scene.add(
SingleManipulator(prim_path="/World/UR10", name="my_ur10", end_effector_prim_name="ee_link", gripper=gripper)
)
ur10.set_joints_default_state(positions=np.array([-np.pi / 2, -np.pi / 2, -np.pi / 2, -np.pi / 2, np.pi / 2, 0]))
cube = my_world.scene.add(
DynamicCuboid(
name="cube",
position=np.array([0.3, 0.3, 0.3]),
prim_path="/World/Cube",
scale=np.array([0.0515, 0.0515, 0.0515]),
size=1.0,
color=np.array([0, 0, 1]),
)
)
my_world.scene.add_default_ground_plane()
ur10.gripper.set_default_state(opened=True)
my_world.reset()
my_controller = PickPlaceController(name="pick_place_controller", gripper=ur10.gripper, robot_articulation=ur10)
articulation_controller = ur10.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=cube.get_local_pose()[0],
placing_position=np.array([0.7, 0.7, 0.0515 / 2.0]),
current_joint_positions=ur10.get_joint_positions(),
end_effector_offset=np.array([0, 0, 0.02]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 3,376 | Python | 37.375 | 113 | 0.712085 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/rmpflow_supported_robots/supported_robot_follow_target_example.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
from pprint import pprint
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import cuboid
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import create_prim
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.motion_generation.articulation_motion_policy import ArticulationMotionPolicy
from omni.isaac.motion_generation.interface_config_loader import (
get_supported_robot_policy_pairs,
load_supported_motion_policy_config,
)
from omni.isaac.motion_generation.lula import RmpFlow
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=True,
help="Print useful runtime information such as the list of supported robots",
)
parser.add_argument(
"--robot-name",
type=str,
default="Cobotta_Pro_900",
help="Key to use to access RMPflow config files for a specific robot.",
)
parser.add_argument(
"--usd-path",
type=str,
default="/Isaac/Robots/Denso/cobotta_pro_900.usd",
help="Path to supported robot on Nucleus Server",
)
parser.add_argument("--add-orientation-target", action="store_true", default=False, help="Add orientation target")
args, unknown = parser.parse_known_args()
robot_name = args.robot_name
usd_path = get_assets_root_path() + args.usd_path
prim_path = "/my_robot"
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
light_prim = create_prim("/DistantLight", "DistantLight")
light_prim.GetAttribute("inputs:intensity").Set(5000)
my_world = World(stage_units_in_meters=1.0)
robot = my_world.scene.add(Robot(prim_path=prim_path, name=robot_name))
if args.verbose:
print("Names of supported robots with provided RMPflow config")
print("\t", list(get_supported_robot_policy_pairs().keys()))
print()
# The load_supported_motion_policy_config() function is currently the simplest way to load supported robots.
# In the future, Isaac Sim will provide a centralized registry of robots with Lula robot description files
# and RMP configuration files stored alongside the robot USD.
rmp_config = load_supported_motion_policy_config(robot_name, "RMPflow")
if args.verbose:
print(
f"Successfully referenced RMPflow config for {robot_name}. Using the following parameters to initialize RmpFlow class:"
)
pprint(rmp_config)
print()
# Initialize an RmpFlow object
rmpflow = RmpFlow(**rmp_config)
physics_dt = 1 / 60.0
articulation_rmpflow = ArticulationMotionPolicy(robot, rmpflow, physics_dt)
articulation_controller = robot.get_articulation_controller()
# Make a target to follow
target_cube = cuboid.VisualCuboid(
"/World/target", position=np.array([0.5, 0, 0.5]), color=np.array([1.0, 0, 0]), size=0.1
)
# Make an obstacle to avoid
obstacle = cuboid.VisualCuboid(
"/World/obstacle", position=np.array([0.8, 0, 0.5]), color=np.array([0, 1.0, 0]), size=0.1
)
rmpflow.add_obstacle(obstacle)
my_world.reset()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
# Set rmpflow target to be the current position of the target cube.
if args.add_orientation_target:
target_orientation = target_cube.get_world_pose()[1]
else:
target_orientation = None
rmpflow.set_end_effector_target(
target_position=target_cube.get_world_pose()[0], target_orientation=target_orientation
)
# Query the current obstacle position
rmpflow.update_world()
actions = articulation_rmpflow.get_next_articulation_action()
articulation_controller.apply_action(actions)
simulation_app.close()
| 4,364 | Python | 33.642857 | 128 | 0.730981 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/rmpflow_supported_robots/README.md | This standalone example provides a generic script for running a follow-target example on any supported robot that uses RMPflow to reach a target while avoiding obstacles. The purpose of this script is to show only how to use RMPflow, and for the sake of simplicity, it does not use the task/controller paradigm that is typical in other Isaac Sim examples.
The ./supported_robot_follow_target_example.py script takes in runtime with the path to the robot USD asset (which is assumed to be stored on the Nucleus Server) and the name of the robot. By running the script with the default command line arguments, the list of supported robot names will be printed in the terminal.
###### Command Line Arguments
The supported command-line arguments are as follows:
-v,--verbose: If True, prints out useful runtime information such as the list of supported robot names that map to RMPflow config files. Defaults to 'True'
--robot-name: Name of robot that maps to the stored RMPflow config. Defaults to "Cobotta_Pro_900"
--usd-path: Path to robot USD asset on the Nucleus Server. Defaults to "/Isaac/Robots/Denso/cobotta_pro_900.usd". The typical location of a specific robot is under
"/Isaac/Robots/{manufacturer_name}/{robot_name}/{robot_name}.usd"
--add-orientation-target: Add the orientation of the target cube to the RMPflow target. Defaults to False.
###### Choosing Correct Robot Name Argument
With the default arguments, the above script will run using the Cobotta Pro 900 robot and will produce the following output:
Names of supported robots with provided RMPflow config
['Franka', 'UR3', 'UR3e', 'UR5', 'UR5e', 'UR10', 'UR10e', 'UR16e', 'Rizon4', 'DofBot', 'Cobotta_Pro_900', 'Cobotta_Pro_1300', 'RS007L', 'RS007N', 'RS013N', 'RS025N', 'RS080N', 'FestoCobot']
Successfully referenced RMPflow config for Cobotta_Pro_900. Using the following parameters to initialize RmpFlow class:
{
'end_effector_frame_name': 'gripper_center',
'ignore_robot_state_updates': False,
'maximum_substep_size': 0.00334,
'rmpflow_config_path': '/path/to/omni_isaac_sim/_build/linux-x86_64/release/exts/omni.isaac.motion_generation/motion_policy_configs/./Denso/cobotta_pro_900/rmpflow/cobotta_rmpflow_common.yaml',
'robot_description_path': '/path/to/omni_isaac_sim/_build/linux-x86_64/release/exts/omni.isaac.motion_generation/motion_policy_configs/./Denso/cobotta_pro_900/rmpflow/robot_descriptor.yaml',
'urdf_path': '/path/to/omni_isaac_sim/_build/linux-x86_64/release/exts/omni.isaac.motion_generation/motion_policy_configs/./Denso/cobotta_pro_900/rmpflow/../cobotta_pro_900_gripper_frame.urdf'
}
The names of supported robots are suitable for the `--robot-name` argument, and each must correctly correspond to the robot USD path. In a future release, configuration data for supported robots will be centralized such that only a single argument will be required. The specific method of accessing supported robot RMPflow configs provided here will then be deprecated.
The remaining output shows the RMPflow configuration information that is found under the name "Cobotta_Pro_900". This configuration is used to initialize the `RmpFlow` class.
###### Examples of loading other robots
Multiple valid combinations of command line arguments are shown for different supported robots:
python.sh supported_robot_follow_target_example.py --robot-name RS080N --usd-path "/Isaac/Robots/Kawasaki/RS080N/rs080n_onrobot_rg2.usd"
python.sh supported_robot_follow_target_example.py --robot-name UR16e --usd-path "/Isaac/Robots/UniversalRobots/ur16e/ur16e.usd"
python.sh supported_robot_follow_target_example.py --robot-name FestoCobot --usd-path "/Isaac/Robots/Festo/FestoCobot/festo_cobot.usd"
| 3,811 | Markdown | 72.307691 | 371 | 0.755445 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/ik_solver.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import Optional
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from omni.isaac.motion_generation.articulation_kinematics_solver import ArticulationKinematicsSolver
from omni.isaac.motion_generation.lula.kinematics import LulaKinematicsSolver
class KinematicsSolver(ArticulationKinematicsSolver):
def __init__(self, robot_articulation: Articulation, end_effector_frame_name: Optional[str] = None) -> None:
urdf_extension_path = get_extension_path_from_name("omni.isaac.urdf")
self._kinematics = LulaKinematicsSolver(
robot_description_path=os.path.join(os.path.dirname(__file__), "../rmpflow/robot_descriptor.yaml"),
urdf_path=os.path.join(urdf_extension_path, "data/urdf/robots/cobotta_pro_900/cobotta_pro_900.urdf"),
)
if end_effector_frame_name is None:
end_effector_frame_name = "onrobot_rg6_base_link"
ArticulationKinematicsSolver.__init__(self, robot_articulation, self._kinematics, end_effector_frame_name)
return
| 1,547 | Python | 50.599998 | 114 | 0.76212 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/pick_up_example.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from controllers.pick_place import PickPlaceController
from omni.isaac.core import World
from tasks.pick_place import PickPlace
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
target_position = np.array([-0.3, 0.6, 0])
target_position[2] = 0.0515 / 2.0
my_task = PickPlace(name="denso_pick_place", target_position=target_position)
my_world.add_task(my_task)
my_world.reset()
my_denso = my_world.scene.get_object("cobotta_robot")
# initialize the controller
my_controller = PickPlaceController(name="controller", robot_articulation=my_denso, gripper=my_denso.gripper)
task_params = my_world.get_task("denso_pick_place").get_params()
articulation_controller = my_denso.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
# forward the observation values to the controller to get the actions
actions = my_controller.forward(
picking_position=observations[task_params["cube_name"]["value"]]["position"],
placing_position=observations[task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, 0, 0.25]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 2,371 | Python | 38.533333 | 109 | 0.719106 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/follow_target_example.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from controllers.rmpflow import RMPFlowController
from omni.isaac.core import World
from tasks.follow_target import FollowTarget
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
# Initialize the Follow Target task with a target location for the cube to be followed by the end effector
my_task = FollowTarget(name="denso_follow_target", target_position=np.array([0.5, 0, 0.5]))
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("denso_follow_target").get_params()
target_name = task_params["target_name"]["value"]
denso_name = task_params["robot_name"]["value"]
my_denso = my_world.scene.get_object(denso_name)
# initialize the controller
my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=my_denso)
# make RmpFlow aware of the ground plane
ground_plane = my_world.scene.get_object(name="default_ground_plane")
my_controller.add_obstacle(ground_plane)
articulation_controller = my_denso.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
target_end_effector_position=observations[target_name]["position"],
target_end_effector_orientation=observations[target_name]["orientation"],
)
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 2,270 | Python | 38.842105 | 106 | 0.748899 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/gripper_control.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Could not find Isaac Sim assets folder")
asset_path = assets_root_path + "/Isaac/Robots/Denso/cobotta_pro_900.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/cobotta")
# define the gripper
gripper = ParallelGripper(
# We chose the following values while inspecting the articulation
end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link",
joint_prim_names=["finger_joint", "right_outer_knuckle_joint"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.628, -0.628]),
action_deltas=np.array([-0.628, 0.628]),
)
# define the manipulator
my_denso = my_world.scene.add(
SingleManipulator(
prim_path="/World/cobotta",
name="cobotta_robot",
end_effector_prim_name="onrobot_rg6_base_link",
gripper=gripper,
)
)
# set the default positions of the other gripper joints to be opened so
# that its out of the way of the joints we want to control when gripping an object for instance.
joints_default_positions = np.zeros(12)
joints_default_positions[7] = 0.628
joints_default_positions[8] = 0.628
my_denso.set_joints_default_state(positions=joints_default_positions)
my_world.scene.add_default_ground_plane()
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
gripper_positions = my_denso.gripper.get_joint_positions()
if i < 500:
# close the gripper slowly
my_denso.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] + 0.1, gripper_positions[1] - 0.1])
)
if i > 500:
# open the gripper slowly
my_denso.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] - 0.1, gripper_positions[1] + 0.1])
)
if i == 1000:
i = 0
if args.test is True:
break
simulation_app.close()
| 3,250 | Python | 37.247058 | 108 | 0.707692 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/rmpflow/robot_descriptor.yaml | # The robot description defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF.
# Lula will only use these joints to control the robot position.
cspace:
- joint_1
- joint_2
- joint_3
- joint_4
- joint_5
- joint_6
default_q: [
0.0,0.3,1.2,0.0,0.0,0.0
]
# Most dimensions of the cspace have a direct corresponding element
# in the URDF. This list of rules defines how unspecified coordinates
# should be extracted or how values in the URDF should be overwritten.
cspace_to_urdf_rules:
- {name: finger_joint, rule: fixed, value: 0.0}
- {name: left_inner_knuckle_joint, rule: fixed, value: 0.0}
- {name: right_inner_knuckle_joint, rule: fixed, value: 0.0}
- {name: right_outer_knuckle_joint, rule: fixed, value: 0.0}
- {name: left_inner_finger_joint, rule: fixed, value: 0.0}
- {name: right_inner_finger_joint, rule: fixed, value: 0.0}
# Lula uses collision spheres to define the robot geometry in order to avoid
# collisions with external obstacles. If no spheres are specified, Lula will
# not be able to avoid obstacles.
collision_spheres:
- J1:
- "center": [0.0, 0.0, 0.1]
"radius": 0.08
- "center": [0.0, 0.0, 0.15]
"radius": 0.08
- "center": [0.0, 0.0, 0.2]
"radius": 0.08
- J2:
- "center": [0.0, 0.08, 0.0]
"radius": 0.08
- "center": [0.0, 0.174, 0.0]
"radius": 0.08
- "center": [-0.0, 0.186, 0.05]
"radius": 0.065
- "center": [0.0, 0.175, 0.1]
"radius": 0.065
- "center": [-0.0, 0.18, 0.15]
"radius": 0.065
- "center": [0.0, 0.175, 0.2]
"radius": 0.065
- "center": [0.0, 0.175, 0.25]
"radius": 0.065
- "center": [0.0, 0.175, 0.3]
"radius": 0.065
- "center": [0.0, 0.175, 0.35]
"radius": 0.065
- "center": [0.0, 0.175, 0.4]
"radius": 0.065
- "center": [0.0, 0.175, 0.45]
"radius": 0.065
- "center": [0.0, 0.175, 0.5]
"radius": 0.065
- "center": [-0.002, 0.1, 0.507]
"radius": 0.07
- J3:
- "center": [0.0, 0.025, 0.0]
"radius": 0.065
- "center": [0.0, -0.025, 0.0]
"radius": 0.065
- "center": [0.0, -0.025, 0.05]
"radius": 0.065
- "center": [0.0, -0.025, 0.1]
"radius": 0.065
- "center": [0.0, -0.025, 0.15]
"radius": 0.06
- "center": [0.0, -0.025, 0.2]
"radius": 0.06
- "center": [0.0, -0.025, 0.25]
"radius": 0.06
- "center": [0.0, -0.025, 0.3]
"radius": 0.06
- "center": [0.0, -0.025, 0.35]
"radius": 0.055
- "center": [0.0, -0.025, 0.4]
"radius": 0.055
- J5:
- "center": [0.0, 0.05, 0.0]
"radius": 0.055
- "center": [0.0, 0.1, 0.0]
"radius": 0.055
- J6:
- "center": [0.0, 0.0, -0.05]
"radius": 0.05
- "center": [0.0, 0.0, -0.1]
"radius": 0.05
- "center": [0.0, 0.0, -0.15]
"radius": 0.05
- "center": [0.0, 0.0, 0.04]
"radius": 0.035
- "center": [0.0, 0.0, 0.08]
"radius": 0.035
- "center": [0.0, 0.0, 0.12]
"radius": 0.035
- right_inner_knuckle:
- "center": [0.0, 0.0, 0.0]
"radius": 0.02
- "center": [0.0, -0.03, 0.025]
"radius": 0.02
- "center": [0.0, -0.05, 0.05]
"radius": 0.02
- right_inner_finger:
- "center": [0.0, 0.02, 0.0]
"radius": 0.015
- "center": [0.0, 0.02, 0.015]
"radius": 0.015
- "center": [0.0, 0.02, 0.03]
"radius": 0.015
- "center": [0.0, 0.025, 0.04]
"radius": 0.01
- left_inner_knuckle:
- "center": [0.0, 0.0, 0.0]
"radius": 0.02
- "center": [0.0, -0.03, 0.025]
"radius": 0.02
- "center": [0.0, -0.05, 0.05]
"radius": 0.02
- left_inner_finger:
- "center": [0.0, 0.02, 0.0]
"radius": 0.015
- "center": [0.0, 0.02, 0.015]
"radius": 0.015
- "center": [0.0, 0.02, 0.03]
"radius": 0.015
- "center": [0.0, 0.025, 0.04]
"radius": 0.01
| 4,073 | YAML | 27.690141 | 80 | 0.51289 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/rmpflow/denso_rmpflow_common.yaml | # Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Artificially limit the robot joints. For example:
# A joint with range +-pi would be limited to +-(pi-.01)
joint_limit_buffers: [.01, .01, .01, .01, .01, .01]
# RMPflow has many modifiable parameters, but these serve as a great start.
# Most parameters will not need to be modified
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 1.
cspace_trajectory_rmp:
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp:
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp:
max_velocity: 1.
velocity_damping_region: .3
damping_gain: 1000.0
metric_weight: 100.
target_rmp:
accel_p_gain: 30.
accel_d_gain: 85.
accel_norm_eps: .075
metric_alpha_length_scale: .05
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
proximity_metric_boost_length_scale: .02
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
axis_target_rmp:
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
proximity_metric_boost_length_scale: .08
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp:
damping_gain: 50.
damping_std_dev: .04
damping_robustness_eps: 1e-2
damping_velocity_gate_length_scale: .01
repulsion_gain: 800.
repulsion_std_dev: .01
metric_modulation_radius: .5
metric_scalar: 10000.
metric_exploder_std_dev: .02
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 100.
canonical_resolve:
max_acceleration_norm: 50.
projection_tolerance: .01
verbose: false
# body_cylinders are used to promote self-collision avoidance between the robot and its base
# The example below defines the robot base to be a capsule defined by the absolute coordinates pt1 and pt2.
# The semantic name provided for each body_cylinder does not need to be present in the robot URDF.
body_cylinders:
- name: base
pt1: [0,0,.12]
pt2: [0,0,0.]
radius: .08
- name: second_link
pt1: [0,0,.12]
pt2: [0,0,.12]
radius: .16
# body_collision_controllers defines spheres located at specified frames in the robot URDF
# These spheres will not be allowed to collide with the capsules enumerated under body_cylinders
# By design, most frames in industrial robots are kinematically unable to collide with the robot base.
# It is often only necessary to define body_collision_controllers near the end effector
body_collision_controllers:
- name: J5
radius: .05
- name: J6
radius: .05
- name: right_inner_finger
radius: .02
- name: left_inner_finger
radius: .02
- name: right_inner_knuckle
radius: .02
- name: left_inner_knuckle
radius: .02
| 3,947 | YAML | 32.743589 | 107 | 0.64682 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/tasks/pick_place.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import Optional
import numpy as np
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
class PickPlace(tasks.PickPlace):
def __init__(
self,
name: str = "denso_pick_place",
cube_initial_position: Optional[np.ndarray] = None,
cube_initial_orientation: Optional[np.ndarray] = None,
target_position: Optional[np.ndarray] = None,
offset: Optional[np.ndarray] = None,
) -> None:
tasks.PickPlace.__init__(
self,
name=name,
cube_initial_position=cube_initial_position,
cube_initial_orientation=cube_initial_orientation,
target_position=target_position,
cube_size=np.array([0.0515, 0.0515, 0.0515]),
offset=offset,
)
return
def set_robot(self) -> SingleManipulator:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Could not find Isaac Sim assets folder")
asset_path = assets_root_path + "/Isaac/Robots/Denso/cobotta_pro_900.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/cobotta")
gripper = ParallelGripper(
end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link",
joint_prim_names=["finger_joint", "right_outer_knuckle_joint"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.628, -0.628]),
action_deltas=np.array([-0.2, 0.2]),
)
manipulator = SingleManipulator(
prim_path="/World/cobotta",
name="cobotta_robot",
end_effector_prim_name="onrobot_rg6_base_link",
gripper=gripper,
)
joints_default_positions = np.zeros(12)
joints_default_positions[7] = 0.628
joints_default_positions[8] = 0.628
manipulator.set_joints_default_state(positions=joints_default_positions)
return manipulator
| 2,648 | Python | 39.753846 | 81 | 0.66352 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/tasks/follow_target.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import Optional
import numpy as np
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
# Inheriting from the base class Follow Target
class FollowTarget(tasks.FollowTarget):
def __init__(
self,
name: str = "denso_follow_target",
target_prim_path: Optional[str] = None,
target_name: Optional[str] = None,
target_position: Optional[np.ndarray] = None,
target_orientation: Optional[np.ndarray] = None,
offset: Optional[np.ndarray] = None,
) -> None:
tasks.FollowTarget.__init__(
self,
name=name,
target_prim_path=target_prim_path,
target_name=target_name,
target_position=target_position,
target_orientation=target_orientation,
offset=offset,
)
return
def set_robot(self) -> SingleManipulator:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Could not find Isaac Sim assets folder")
asset_path = assets_root_path + "/Isaac/Robots/Denso/cobotta_pro_900.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/cobotta")
gripper = ParallelGripper(
end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link",
joint_prim_names=["finger_joint", "right_outer_knuckle_joint"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.628, -0.628]),
action_deltas=np.array([-0.628, 0.628]),
)
manipulator = SingleManipulator(
prim_path="/World/cobotta",
name="cobotta_robot",
end_effector_prim_name="onrobot_rg6_base_link",
gripper=gripper,
)
joints_default_positions = np.zeros(12)
joints_default_positions[7] = 0.628
joints_default_positions[8] = 0.628
manipulator.set_joints_default_state(positions=joints_default_positions)
return manipulator
| 2,693 | Python | 39.208955 | 81 | 0.664686 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/controllers/pick_place.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.manipulators.controllers as manipulators_controllers
from omni.isaac.core.articulations import Articulation
from omni.isaac.manipulators.grippers import ParallelGripper
from .rmpflow import RMPFlowController
class PickPlaceController(manipulators_controllers.PickPlaceController):
def __init__(self, name: str, gripper: ParallelGripper, robot_articulation: Articulation, events_dt=None) -> None:
if events_dt is None:
events_dt = [0.005, 0.002, 1, 0.05, 0.0008, 0.005, 0.0008, 0.1, 0.0008, 0.008]
manipulators_controllers.PickPlaceController.__init__(
self,
name=name,
cspace_controller=RMPFlowController(
name=name + "_cspace_controller", robot_articulation=robot_articulation
),
gripper=gripper,
events_dt=events_dt,
end_effector_initial_height=0.6,
)
return
| 1,362 | Python | 41.593749 | 118 | 0.713656 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/controllers/rmpflow.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import omni.isaac.motion_generation as mg
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.extensions import get_extension_path_from_name
class RMPFlowController(mg.MotionPolicyController):
def __init__(self, name: str, robot_articulation: Articulation, physics_dt: float = 1.0 / 60.0) -> None:
self.rmpflow = mg.lula.motion_policies.RmpFlow(
robot_description_path=os.path.join(os.path.dirname(__file__), "../rmpflow/robot_descriptor.yaml"),
rmpflow_config_path=os.path.join(os.path.dirname(__file__), "../rmpflow/denso_rmpflow_common.yaml"),
urdf_path=os.path.join(os.path.dirname(__file__), "../rmpflow/cobotta_pro_900.urdf"),
end_effector_frame_name="gripper_center",
maximum_substep_size=0.00334,
)
self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmpflow, physics_dt)
mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp)
(
self._default_position,
self._default_orientation,
) = self._articulation_motion_policy._robot_articulation.get_world_pose()
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
return
def reset(self):
mg.MotionPolicyController.reset(self)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
| 2,038 | Python | 45.340908 | 112 | 0.705103 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import matplotlib.pyplot as plt
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
my_world = World(stage_units_in_meters=1.0)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_2",
name="cube_1",
position=np.array([5.0, 3, 1.0]),
scale=np.array([0.6, 0.5, 0.2]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
cube_3 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_3",
name="cube_2",
position=np.array([-5, 1, 3.0]),
scale=np.array([0.1, 0.1, 0.1]),
size=1.0,
color=np.array([0, 0, 255]),
linear_velocity=np.array([0, 0, 0.4]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 25.0]),
frequency=20,
resolution=(256, 256),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
my_world.scene.add_default_ground_plane()
my_world.reset()
camera.initialize()
i = 0
camera.add_motion_vectors_to_frame()
while simulation_app.is_running():
my_world.step(render=True)
print(camera.get_current_frame())
if i == 100:
points_2d = camera.get_image_coords_from_world_points(
np.array([cube_3.get_world_pose()[0], cube_2.get_world_pose()[0]])
)
points_3d = camera.get_world_points_from_image_coords(points_2d, np.array([24.94, 24.9]))
print(points_2d)
print(points_3d)
imgplot = plt.imshow(camera.get_rgba()[:, :, :3])
plt.show()
print(camera.get_current_frame()["motion_vectors"])
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
simulation_app.close()
| 2,399 | Python | 29 | 97 | 0.64777 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/imu_sensor.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.sensor import IMUSensor
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Carter/nova_carter_sensors.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Carter")
my_carter = my_world.scene.add(
Articulation(prim_path="/World/Carter", name="my_carter", position=np.array([0, 0.0, 0.5]))
)
wheel_dof_names = ["joint_wheel_left", "joint_wheel_right"]
my_controller = DifferentialController(name="simple_control", wheel_radius=0.04295, wheel_base=0.4132)
imu_sensor = my_world.scene.add(
IMUSensor(
prim_path="/World/Carter/caster_wheel_left/imu_sensor",
name="imu",
frequency=60,
translation=np.array([0, 0, 0]),
)
)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
wheel_dof_indices = [my_carter.get_dof_index(wheel_dof_names[i]) for i in range(len(wheel_dof_names))]
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
print(imu_sensor.get_current_frame())
actions = ArticulationAction()
if i >= 0 and i < 1000:
# forward
actions = my_controller.forward(command=[0.05, 0])
elif i >= 1000 and i < 1265:
# rotate
actions = my_controller.forward(command=[0.0, np.pi / 12])
elif i >= 1265 and i < 2000:
# forward
actions = my_controller.forward(command=[0.05, 0])
elif i == 2000:
i = 0
i += 1
joint_actions = ArticulationAction()
joint_actions.joint_velocities = np.zeros(my_carter.num_dof)
if actions.joint_velocities is not None:
for j in range(len(wheel_dof_indices)):
joint_actions.joint_velocities[wheel_dof_indices[j]] = actions.joint_velocities[j]
my_carter.apply_action(joint_actions)
simulation_app.close()
| 3,133 | Python | 35.022988 | 110 | 0.687201 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/effort_sensor.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# In this example, please drag the cube along the arm and see how the effort measurement from the effort sensor changes
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.sensor.scripts.effort_sensor import EffortSensor
from pxr import UsdPhysics
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0, physics_dt=1.0 / 60, rendering_dt=1.0 / 60)
my_world.scene.add_default_ground_plane(z_position=-1)
asset_path = assets_root_path + "/Isaac/Robots/Simple/simple_articulation.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/Articulation")
arm_joint = "/Articulation/Arm/RevoluteJoint"
prim = get_prim_at_path(arm_joint)
joint = UsdPhysics.RevoluteJoint(prim)
joint.CreateAxisAttr("Y")
DynamicCuboid(
prim_path="/World/Cube",
name="cube_1",
position=np.array([1.5, 0, 0.2]),
color=np.array([255, 0, 0]),
size=0.1,
mass=1,
)
my_world.reset()
effort_sensor = EffortSensor(prim_path=arm_joint)
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
reading = effort_sensor.get_sensor_reading()
print(f"Sensor Time: {reading.time} Value: {reading.value} Validity: {reading.is_valid}")
if my_world.current_time_step_index == 0:
my_world.reset()
simulation_app.close()
| 2,253 | Python | 33.151515 | 119 | 0.743009 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/contact_sensor.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.sensor import ContactSensor
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/Ant/ant.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Ant")
ant = my_world.scene.add(Articulation(prim_path="/World/Ant/torso", name="ant", translation=np.array([0, 0, 1.5])))
ant_foot_prim_names = ["right_back_foot", "left_back_foot", "front_right_foot", "front_left_foot"]
translations = np.array(
[[0.38202, -0.40354, -0.0887], [-0.4, -0.40354, -0.0887], [-0.4, 0.4, -0.0887], [0.4, 0.4, -0.0887]]
)
ant_sensors = []
for i in range(4):
ant_sensors.append(
my_world.scene.add(
ContactSensor(
prim_path="/World/Ant/" + ant_foot_prim_names[i] + "/contact_sensor",
name="ant_contact_sensor_{}".format(i),
min_threshold=0,
max_threshold=10000000,
radius=0.1,
translation=translations[i],
)
)
)
ant_sensors[0].add_raw_contact_data_to_frame()
my_world.reset()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
print(ant_sensors[0].get_current_frame())
if my_world.current_time_step_index == 0:
my_world.reset()
simulation_app.close()
| 2,504 | Python | 32.4 | 115 | 0.686102 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/rotating_lidar_rtx.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.sensor import LidarRtx
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Carter/nova_carter_sensors.usd"
my_carter = my_world.scene.add(
WheeledRobot(
prim_path="/World/Carter",
name="my_carter",
wheel_dof_names=["joint_wheel_left", "joint_wheel_right"],
create_robot=True,
usd_path=asset_path,
position=np.array([0, 0.0, 0.5]),
)
)
# config_file_name="Example_Rotary"
my_lidar = my_world.scene.add(
LidarRtx(prim_path="/World/Carter/chassis_link/front_hawk/right/lidar_rig/lidar", name="lidar")
)
cube_1 = my_world.scene.add(
DynamicCuboid(prim_path="/World/cube", name="cube_1", position=np.array([2, 2, 2.5]), scale=np.array([20, 0.2, 5]))
)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/World/cube_2", name="cube_2", position=np.array([2, -2, 2.5]), scale=np.array([20, 0.2, 5])
)
)
my_controller = DifferentialController(name="simple_control", wheel_radius=0.04295, wheel_base=0.4132)
my_world.reset()
my_lidar.add_range_data_to_frame()
my_lidar.add_point_cloud_data_to_frame()
my_lidar.enable_visualization()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 1000:
print(my_lidar.get_current_frame())
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i >= 1000 and i < 1265:
# rotate
my_carter.apply_wheel_actions(my_controller.forward(command=[0.0, np.pi / 12]))
elif i >= 1265 and i < 2000:
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i == 2000:
i = 0
i += 1
if args.test is True and i > 100:
break
my_world.stop()
simulation_app.update()
simulation_app.close()
| 3,309 | Python | 33.123711 | 119 | 0.684195 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.