file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments with velocity-tracking commands. These environments are based on the `legged_gym` environments provided by Rudin et al. Reference: https://github.com/leggedrobotics/legged_gym """
336
Python
24.923075
86
0.764881
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .curriculums import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
370
Python
29.916664
94
0.732432
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/mdp/curriculums.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create curriculum for the learning environment. The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable the curriculum introduced by the function. """ from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.terrains import TerrainImporter if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def terrain_levels_vel( env: RLTaskEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Curriculum based on the distance the robot walked when commanded to move at a desired velocity. This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the difficulty when the robot walks less than half of the distance required by the commanded velocity. .. note:: It is only possible to use this term with the terrain type ``generator``. For further information on different terrain types, check the :class:`omni.isaac.orbit.terrains.TerrainImporter` class. Returns: The mean terrain level for the given environment ids. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] terrain: TerrainImporter = env.scene.terrain command = env.command_manager.get_command("base_velocity") # compute the distance the robot walked distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) # robots that walked far enough progress to harder terrains move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 # robots that walked less than half of their required distance go to simpler terrains move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 move_down *= ~move_up # update terrain levels terrain.update_env_origins(env_ids, move_up, move_down) # return the mean terrain level return torch.mean(terrain.terrain_levels.float())
2,376
Python
41.446428
112
0.742424
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import ContactSensor if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def feet_air_time(env: RLTaskEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> torch.Tensor: """Reward long steps taken by the feet using L2-kernel. This function rewards the agent for taking steps that are longer than a threshold. This helps ensure that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of the time for which the feet are in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids] last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] reward = torch.sum((last_air_time - threshold) * first_contact, dim=1) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Reward long steps taken by the feet for bipeds. This function rewards the agent for taking steps up to a specified threshold and also keep one foot at a time in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] in_contact = contact_time > 0.0 in_mode_time = torch.where(in_contact, contact_time, air_time) single_stance = torch.sum(in_contact.int(), dim=1) == 1 reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0] reward = torch.clamp(reward, max=threshold) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward
2,595
Python
43.75862
119
0.717148
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for velocity-based locomotion environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
363
Python
35.399996
94
0.763085
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from orbit.ext_template.tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_D_CFG # isort: skip @configclass class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-d self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.randomization.base_external_force_torque = None self.randomization.push_robot = None
1,608
Python
33.234042
103
0.687189
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import AnymalDRoughEnvCfg @configclass class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.randomization.base_external_force_torque = None self.randomization.push_robot = None
1,382
Python
30.431817
60
0.656295
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Template-Velocity-Flat-Anymal-D-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Template-Velocity-Flat-Anymal-D-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Template-Velocity-Rough-Anymal-D-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, ) gym.register( id="Template-Velocity-Rough-Anymal-D-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, )
1,474
Python
26.830188
77
0.685889
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "anymal_d_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "anymal_d_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
1,417
Python
26.26923
58
0.645025
jonasmaximilian/orbit.test/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
168
Python
23.142854
56
0.720238
jonasmaximilian/orbit.test/docs/CHANGELOG.rst
Changelog --------- 0.1.0 (2024-01-29) ~~~~~~~~~~~~~~~~~~ Added ^^^^^ * Created an initial template for building an extension or project based on Orbit
155
reStructuredText
13.181817
81
0.593548
j3soon/omni-nerf-extension/compose.yaml
version: "3" services: nerfstudio-renderer: image: j3soon/nerfstudio-renderer build: context: nerfstudio_renderer args: - CUDA_VERSION=11.8.0 - CUDA_ARCHITECTURES=86 - OS_VERSION=22.04 - SERVER_PORT=10001 container_name: nerfstudio-renderer ports: - "10001:10001" environment: - DISPLAY=$DISPLAY volumes: - /tmp/.X11-unix:/tmp/.X11-unix - ./nerfstudio_renderer/src:/src:ro - ./assets:/workspace:ro - cache:/home/user/.cache shm_size: '6gb' deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] pygame-viewer: image: j3soon/pygame-viewer build: context: pygame_viewer container_name: pygame-viewer stdin_open: true tty: true network_mode: host environment: - DISPLAY=$DISPLAY volumes: - /tmp/.X11-unix:/tmp/.X11-unix - ./pygame_viewer:/src:ro isaac-sim-viewer: # Ref: https://github.com/j3soon/isaac-extended?tab=readme-ov-file#docker-container-with-display image: j3soon/isaac-sim-viewer build: context: extension container_name: isaac-sim-viewer entrypoint: [bash] stdin_open: true tty: true network_mode: host environment: - ACCEPT_EULA=Y - PRIVACY_CONSENT=Y - DISPLAY=$DISPLAY volumes: - ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw - ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw - ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw - ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw - ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw - ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw - ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw - ~/docker/isaac-sim/documents:/root/Documents:rw - /tmp/.X11-unix:/tmp/.X11-unix - ./assets:/workspace - ./extension:/src deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: cache:
2,191
YAML
27.102564
100
0.599726
j3soon/omni-nerf-extension/extension/README.md
# Extension Project Template This project was automatically generated. - `app` - It is a folder link to the location of your *Omniverse Kit* based app. - `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path). Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better. Look for "omni.nerf.viewport" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately. Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.: ``` > app\omni.code.bat --ext-folder exts --enable company.hello.world ``` # App Link Setup If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included. Run: ``` > link_app.bat ``` If successful you should see `app` folder link in the root of this repo. If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app: ``` > link_app.bat --app create ``` You can also just pass a path to create link to: ``` > link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4" ``` # Sharing Your Extensions This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths. Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts` Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual. To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
2,042
Markdown
37.547169
258
0.757101
j3soon/omni-nerf-extension/extension/tools/scripts/link_app.py
import argparse import json import os import sys import packmanapi import urllib3 def find_omniverse_apps(): http = urllib3.PoolManager() try: r = http.request("GET", "http://127.0.0.1:33480/components") except Exception as e: print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}") sys.exit(1) apps = {} for x in json.loads(r.data.decode("utf-8")): latest = x.get("installedVersions", {}).get("latest", "") if latest: for s in x.get("settings", []): if s.get("version", "") == latest: root = s.get("launch", {}).get("root", "") apps[x["slug"]] = (x["name"], root) break return apps def create_link(src, dst): print(f"Creating a link '{src}' -> '{dst}'") packmanapi.link(src, dst) APP_PRIORITIES = ["code", "create", "view"] if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher") parser.add_argument( "--path", help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'", required=False, ) parser.add_argument( "--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False ) args = parser.parse_args() path = args.path if not path: print("Path is not specified, looking for Omniverse Apps...") apps = find_omniverse_apps() if len(apps) == 0: print( "Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers." ) sys.exit(0) print("\nFound following Omniverse Apps:") for i, slug in enumerate(apps): name, root = apps[slug] print(f"{i}: {name} ({slug}) at: '{root}'") if args.app: selected_app = args.app.lower() if selected_app not in apps: choices = ", ".join(apps.keys()) print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}") sys.exit(0) else: selected_app = next((x for x in APP_PRIORITIES if x in apps), None) if not selected_app: selected_app = next(iter(apps)) print(f"\nSelected app: {selected_app}") _, path = apps[selected_app] if not os.path.exists(path): print(f"Provided path doesn't exist: {path}") else: SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__)) create_link(f"{SCRIPT_ROOT}/../../app", path) print("Success!")
2,814
Python
32.117647
133
0.562189
j3soon/omni-nerf-extension/extension/tools/packman/config.packman.xml
<config remotes="cloudfront"> <remote2 name="cloudfront"> <transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" /> </remote2> </config>
211
XML
34.333328
123
0.691943
j3soon/omni-nerf-extension/extension/tools/packman/bootstrap/install_package.py
# Copyright 2019 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import shutil import sys import tempfile import zipfile __author__ = "hfannar" logging.basicConfig(level=logging.WARNING, format="%(message)s") logger = logging.getLogger("install_package") class TemporaryDirectory: def __init__(self): self.path = None def __enter__(self): self.path = tempfile.mkdtemp() return self.path def __exit__(self, type, value, traceback): # Remove temporary data created shutil.rmtree(self.path) def install_package(package_src_path, package_dst_path): with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir: zip_file.extractall(temp_dir) # Recursively copy (temp_dir will be automatically cleaned up on exit) try: # Recursive copy is needed because both package name and version folder could be missing in # target directory: shutil.copytree(temp_dir, package_dst_path) except OSError as exc: logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path) else: logger.info("Package successfully installed to %s" % package_dst_path) install_package(sys.argv[1], sys.argv[2])
1,844
Python
33.166666
108
0.703362
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/config/extension.toml
[package] # Semantic Versioning is used: https://semver.org/ version = "0.0.2" # Lists people or organizations that are considered the "authors" of the package. authors = ["Johnson Sun", "Yan-Bin Diau"] # The title and description fields are primarily for displaying extension info in UI title = "omni nerf viewport" description="A simple python extension example to use as a starting point for your extensions." # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "" # One of categories for UI. category = "Example" # Keywords for the extension keywords = ["kit", "example"] # Location of change log file in target (final) folder of extension, relative to the root. # More info on writing changelog: https://keepachangelog.com/en/1.0.0/ changelog="docs/CHANGELOG.md" # Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file). # Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image. preview_image = "data/preview.png" # Icon is shown in Extensions window, it is recommended to be square, of size 256x256. icon = "data/icon.png" # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} # Main python module this extension provides, it will be publicly available as "import omni.nerf.viewport". [[python.module]] name = "omni.nerf.viewport" [[test]] # Extra dependencies only to be used during test run dependencies = [ "omni.kit.ui_test" # UI testing extension ]
1,601
TOML
32.374999
118
0.74391
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/omni/nerf/viewport/extension.py
import platform import cv2 import numpy as np import omni.ext import omni.ui as ui import omni.usd import rpyc from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, Usd, UsdGeom # Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)` def some_public_function(x: int): print("[omni.nerf.viewport] some_public_function was called with x: ", x) return x ** x # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class OmniNerfViewportExtension(omni.ext.IExt): def __init__(self): super().__init__() self.is_python_supported: bool = platform.python_version().startswith("3.10") """The Python version must match the backend version for RPyC to work.""" self.camera_position: Gf.Vec3d = None self.camera_rotation: Gf.Vec3d = None # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): # To see the Python print output in Omniverse Code, open the `Script Editor`. # In Isaac Sim, see the startup console instead. print("[omni.nerf.viewport] omni nerf viewport startup") # Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd/stage/get-current-stage.html self.usd_context = omni.usd.get_context() # Subscribe to event streams # Ref: https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/event_streams.html # Ref: https://docs.omniverse.nvidia.com/kit/docs/kit-manual/104.0/carb.events/carb.events.IEventStream.html#carb.events.IEventStream.create_subscription_to_pop_by_type # Listen to rendering events. Only triggered when the viewport is rendering is updated. # Will not be triggered when no viewport is visible on the screen. # Examples on using `get_rendering_event_stream` can be found by installing Isaac Sim # and searching for `get_rendering_event_stream` under `~/.local/share/ov/pkg/isaac_sim-2023.1.1`. self.rendering_event_stream = self.usd_context.get_rendering_event_stream() self.rendering_event_delegate = self.rendering_event_stream.create_subscription_to_pop( self._on_rendering_event, name="NeRF Viewport Update" ) # TODO: Consider subscribing to update events # Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/events.html#subscribe-to-update-events # Allocate memory self.rgba_w, self.rgba_h = 1280, 720 # Follow default camera resolution 1280x720 self.rgba = np.ones((self.rgba_h, self.rgba_w, 4), dtype=np.uint8) * 128 """RGBA image buffer. The shape is (H, W, 4), following the NumPy convention.""" self.rgba[:,:,3] = 255 # Init RPyC connection if self.is_python_supported: self.init_rpyc() # Build UI self.build_ui() def init_rpyc(self): # TODO: Make the following configurable host = 'localhost' port = 10001 model_config_path = '/workspace/outputs/poster/nerfacto/DATE_TIME/config.yml' model_checkpoint_path = '/workspace/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt' device = 'cuda' self.rpyc_conn = rpyc.classic.connect(host, port) self.rpyc_conn.execute('from nerfstudio_renderer import NerfStudioRenderQueue') self.rpyc_conn.execute('from pathlib import Path') self.rpyc_conn.execute('import torch') self.rpyc_conn.execute(f'rq = NerfStudioRenderQueue(model_config_path=Path("{model_config_path}"), checkpoint_path="{model_checkpoint_path}", device=torch.device("{device}"))') def build_ui(self): """Build the UI. Should be called upon startup.""" # Please refer to the `Omni::UI Doc` tab in Omniverse Code for efficient development. # Ref: https://youtu.be/j1Pwi1KRkhk # Ref: https://github.com/NVIDIA-Omniverse # Ref: https://youtu.be/dNLFpVhBrGs self.ui_window = ui.Window("NeRF Viewport", width=self.rgba_w, height=self.rgba_h) with self.ui_window.frame: with ui.ZStack(): # Camera Viewport # Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.kit.viewport.docs/latest/overview.html#simplest-example # Don't create a new viewport widget as below, since the viewport widget will often flicker. # Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/release-notes/known-limits.html # ``` # from omni.kit.widget.viewport import ViewportWidget # self.ui_viewport_widget = ViewportWidget( # resolution = (640, 360), # width = 640, # height = 360, # ) # self.viewport_api = self.ui_viewport_widget.viewport_api # ```` # Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/python-snippets/viewport/change-viewport-active-camera.html # Instead, the viewport is obtained from the active viewport in new renderings. # NeRF Viewport # Examples on using ByteImageProvider can be found by installing Isaac Sim # and searching for `set_bytes_data` under `~/.local/share/ov/pkg/isaac_sim-2023.1.1`. # Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html # Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ImageWithProvider.html self.ui_nerf_provider = ui.ByteImageProvider() # TODO: Potentially optimize with `set_bytes_data_from_gpu` self.ui_nerf_img = ui.ImageWithProvider( self.ui_nerf_provider, width=ui.Percent(100), height=ui.Percent(100), ) # TODO: Larger image size? with ui.VStack(height=0): self.ui_lbl_py = ui.Label("(To Be Updated)") state = "supported" if platform.python_version().startswith("3.10") else "NOT supported" self.ui_lbl_py.text = f"Python {platform.python_version()} is {state}" # UI for setting the NeRF mesh # Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/scatter_tool.html with ui.HStack(): self.ui_lbl_mesh = ui.Label("NeRF Mesh", width=65) # Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/ui/widgets/stringfield.html self._mesh_prim_model = ui.SimpleStringModel() ui.StringField(model=self._mesh_prim_model) ui.Button( " S ", width=0, height=0, clicked_fn=self._on_btn_set_click, tooltip="Get From Selection", ) ui.Button("Reset Camera", width=20, clicked_fn=self.on_btn_reset_click) self.update_ui() def update_ui(self): print("[omni.nerf.viewport] Updating UI") # Ref: https://forums.developer.nvidia.com/t/refresh-window-ui/221200 self.ui_window.frame.rebuild() def _on_btn_set_click(self): self._mesh_prim_model.as_string = self._get_selected_prim_path() def on_btn_reset_click(self): # TODO: Allow resetting the camera to a specific position # Below doesn't seem to work # stage: Usd.Stage = self.usd_context.get_stage() # prim: Usd.Prim = stage.GetPrimAtPath('/OmniverseKit_Persp') # # `UsdGeom.Xformable(prim).SetTranslateOp` doesn't seem to exist # prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(0, 0, 0.1722)) # prim.GetAttribute("xformOp:rotateXYZ").Set(Gf.Vec3d(0, -152, 0)) # print("translateOp", prim.GetAttribute("xformOp:translate").Get()) # print("rotateXYZOp", prim.GetAttribute("xformOp:rotateXYZ").Get()) print("[omni.nerf.viewport] (TODO) Reset Camera") def _get_selected_prim_path(self): """Get the selected prim. Return '' if no prim is selected.""" # Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/object_info.html#step-5-get-the-selected-prims-data selected_prim_paths = self.usd_context.get_selection().get_selected_prim_paths() if not selected_prim_paths: return '' return selected_prim_paths[0] def _on_rendering_event(self, event): """Called by rendering_event_stream.""" # No need to check event type, since there is only one event type: `NEW_FRAME`. if self.is_python_supported and self._mesh_prim_model.as_string != '': viewport_api = get_active_viewport() # We chose to use Viewport instead of Isaac Sim's Camera Sensor to avoid dependency on Isaac Sim. # We want the extension to work with any Omniverse app, not just Isaac Sim. # Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/features/sensors_simulation/isaac_sim_sensors_camera.html camera_to_world_mat: Gf.Matrix4d = viewport_api.transform object_to_world_mat: Gf.Matrix4d = Gf.Matrix4d() if self._mesh_prim_model.as_string != '': stage: Usd.Stage = self.usd_context.get_stage() selected_prim: Usd.Prim = stage.GetPrimAtPath(self._mesh_prim_model.as_string) selected_xform: UsdGeom.Xformable = UsdGeom.Xformable(selected_prim) object_to_world_mat = selected_xform.GetLocalTransformation() # In USD, pre-multiplication is used for matrices. # Ref: https://openusd.org/dev/api/usd_geom_page_front.html#UsdGeom_LinAlgBasics world_to_object_mat: Gf.Matrix4d = object_to_world_mat.GetInverse() camera_to_object_mat: Gf.Matrix4d = camera_to_world_mat * world_to_object_mat camera_to_object_pos: Gf.Vec3d = camera_to_object_mat.ExtractTranslation() # I suspect that the `Decompose` function will extract the rotation in the order of the input axes. # So for EulerXYZ, we want to first extract and remove the Z rotation, then Y, then X. # Then we reverse the order to get the XYZ rotation. # I haven't spend time looking into the source code to confirm this hypothesis though. # Ref: https://forums.developer.nvidia.com/t/how-to-get-euler-angle-of-the-prim-through-script-with-script-editor/269704/3 # Ref: https://github.com/PixarAnimationStudios/OpenUSD/blob/2864f3d04f396432f22ec5d6928fc37d34bb4c90/pxr/base/gf/rotation.cpp#L108 # must remove scale before rotation camera_to_object_mat.Orthonormalize() camera_to_object_rot: Gf.Vec3d = Gf.Vec3d(*reversed(camera_to_object_mat.ExtractRotation().Decompose(*reversed(Gf.Matrix3d())))) # TODO: Consider using viewport camera projection matrix `viewport_api.projection`? # Not same as below due to the potential difference in rotation matrix representation # ``` # from scipy.spatial.transform import Rotation as R # camera_rotation: Gf.Vec3d = R.from_matrix(camera_mat.ExtractRotationMatrix()).as_euler('xyz', degrees=True) # in degrees # ``` # TODO: Consider object transform (if it is moved or rotated) # No need to transform from Isaac Sim space to Nerfstudio space, since they are both in the same space. # Ref: https://github.com/j3soon/coordinate-system-conventions if camera_to_object_pos != self.camera_position or camera_to_object_rot != self.camera_rotation: self.camera_position = camera_to_object_pos self.camera_rotation = camera_to_object_rot print("[omni.nerf.viewport] New camera position:", camera_to_object_pos) print("[omni.nerf.viewport] New camera rotation:", camera_to_object_rot) self.rpyc_conn.execute(f'rq.update_camera({list(camera_to_object_pos)}, {list(np.deg2rad(camera_to_object_rot))})') image = self.rpyc_conn.eval('rq.get_rgb_image()') if image is None: return print("[omni.nerf.viewport] NeRF viewport updated") image = np.array(image) # received with shape (H*, W*, 3) image = cv2.resize(image, (self.rgba_w, self.rgba_h), interpolation=cv2.INTER_LINEAR) # resize to (H, W, 3) self.rgba[:,:,:3] = image * 255 else: # If python version is not supported, render the dummy image. self.rgba[:,:,:3] = (self.rgba[:,:,:3] + np.ones((self.rgba_h, self.rgba_w, 3), dtype=np.uint8)) % 256 self.ui_nerf_provider.set_bytes_data(self.rgba.flatten().tolist(), (self.rgba_w, self.rgba_h)) def on_shutdown(self): print("[omni.nerf.viewport] omni nerf viewport shutdown") if self.is_python_supported: self.rpyc_conn.execute('del rq') def destroy(self): # Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/object_info.html#step-3-4-use-usdcontext-to-listen-for-selection-changes self.stage_event_stream = None self.stage_event_delegate.unsubscribe()
13,852
Python
59.49345
184
0.632472
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/omni/nerf/viewport/__init__.py
from .extension import *
25
Python
11.999994
24
0.76
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/omni/nerf/viewport/tests/__init__.py
from .test_hello_world import *
31
Python
30.999969
31
0.774194
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/omni/nerf/viewport/tests/test_hello_world.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test # Extnsion for writing UI tests (simulate UI interaction) import omni.kit.ui_test as ui_test # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.nerf.viewport # Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class Test(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): pass # After running each test async def tearDown(self): pass # Actual test, notice it is "async" function, so "await" can be used if needed async def test_hello_public_function(self): result = omni.nerf.viewport.some_public_function(4) self.assertEqual(result, 256) async def test_window_button(self): # Find a label in our window label = ui_test.find("My Window//Frame/**/Label[*]") # Find buttons in our window add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'") reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'") # Click reset button await reset_button.click() self.assertEqual(label.widget.text, "empty") await add_button.click() self.assertEqual(label.widget.text, "count: 1") await add_button.click() self.assertEqual(label.widget.text, "count: 2")
1,672
Python
34.595744
142
0.681818
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [0.0.1] - 2021-04-26 - Initial version of extension UI template with a window
178
Markdown
18.888887
80
0.702247
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/docs/README.md
# Python Extension Example [omni.nerf.viewport] This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
177
Markdown
34.599993
126
0.785311
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/docs/index.rst
omni.nerf.viewport ############################# Example of Python only extension .. toctree:: :maxdepth: 1 README CHANGELOG .. automodule::"omni.nerf.viewport" :platform: Windows-x86_64, Linux-x86_64 :members: :undoc-members: :show-inheritance: :imported-members: :exclude-members: contextmanager
337
reStructuredText
15.095237
43
0.617211
j3soon/omni-nerf-extension/nerfstudio_renderer/README.md
# Nerfstudio Renderer The following instructions assume you are in the `/nerfstudio_renderer` directory under the git repository root: ```sh git clone https://github.com/j3soon/omni-nerf-extension.git cd omni-nerf-extension cd nerfstudio_renderer ``` ## Launching NeRF Studio > You can skip this section if you want to download the example poster model checkpoint and mesh. Follow the [installation](https://docs.nerf.studio/quickstart/installation.html#use-docker-image) guide, specifically: ```sh mkdir data docker run --rm -it --gpus all \ -u $(id -u) \ -v $(pwd)/data:/workspace/ \ -v $HOME/.cache/:/home/user/.cache/ \ -p 7007:7007 \ --shm-size=12gb \ dromni/nerfstudio:0.3.4 ``` The following subsections assume you have launched the container and using its interactive shell. ### Training a NeRF Model Follow the [training model](https://docs.nerf.studio/quickstart/first_nerf.html) guide, specifically: ```sh # in the container # Download some test data: ns-download-data nerfstudio --capture-name=poster # Train model without normal prediction (used in the provided example poster assets for simplicity) ns-train nerfacto --data data/nerfstudio/poster # or train model with normal prediction (preferred) ns-train nerfacto --data data/nerfstudio/poster --pipeline.model.predict-normals True # wait for training to finish ``` > If you have trouble downloading the dataset, please refer to [this pull request](https://github.com/nerfstudio-project/nerfstudio/pull/3045). ### View the NeRF Model ```sh # in the container # change the DATE_TIME to the actual value DATE_TIME=2023-12-30_111633 # View model ns-viewer --load-config outputs/poster/nerfacto/$DATE_TIME/config.yml # open the printed URL ``` ### Exporting a Mesh Follow the [export geometry](https://docs.nerf.studio/quickstart/export_geometry.html) guide, specifically: ```sh # in the container # change the DATE_TIME to the actual value DATE_TIME=2023-12-30_111633 # Export mesh # center is (-0.2, 0.1, -0.2) ns-export tsdf --load-config outputs/poster/nerfacto/$DATE_TIME/config.yml --output-dir exports/mesh/ --target-num-faces 50000 --num-pixels-per-side 2048 --use-bounding-box True --bounding-box-min -0.55 -0.25 -0.55 --bounding-box-max 0.15 0.45 0.15 ``` > Or use [Poisson Surface Reconstruction](https://docs.nerf.studio/quickstart/export_geometry.html#poisson-surface-reconstruction) instead, if the network supports predicting normals. ### View the Mesh Open the mesh (`mesh.obj`) in Blender or any other 3D viewer. ## Download Model Checkpoint and Mesh > You can skip this section if you want to train the example poster model checkpoint and extract mesh by yourself. (TODO: Add link to a download a pre-trained model in release) ## Rename the Model Directory and the Checkpoint File Rename the timestamp and checkpoint files to the same name as the placeholder for simplicity: ```sh # change the DATE_TIME to the name of the placeholder DATE_TIME=2023-12-30_111633 CHECKPOINT_NAME=step-000029999 cp -r ./data/outputs/poster/nerfacto/$DATE_TIME ./data/outputs/poster/nerfacto/DATE_TIME mv ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/$CHECKPOINT_NAME.ckpt ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt ``` You can check if the renaming succeeded with the following commands: ```sh ls ./data/outputs/poster/nerfacto/DATE_TIME/config.yml ls ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt ``` ## Running with Docker Compose Run the PyGame test window with the following commands: ```sh xhost +local:docker docker compose up # in new shell docker exec -it pygame-window /workspace/run.sh # the initial execution might result in a delay due to the download of the pre-trained torch model. # please re-run the script if the script times out. ``` > There seems to be an issue in `nerfstudio-renderer` that uses old code > upon restart. I'm not aware of a reliable fix for this issue yet. > However, running `docker compose down && docker rm $(docker ps -aq)` > seems to fix the issue (`docker compose down` isn't enough). I believe it is due to the `pip install` in docker entrypoint. Please keep this in > mind when modifying the renderer code. For development purposes, you can run the following command to run the PyGame test window directly in the `nerfstudio-renderer` container: ```sh docker compose build xhost +local:docker docker compose up # in new shell docker exec -it nerfstudio-renderer /workspace/tests/run_local.sh ``` The `run_local.sh` script will re-copy and re-install the package before launching the PyGame window, so this method will not encounter the old code issue mentioned above. ## Running Inside Docker Alternatively, it is possible to connect to the server with [rpyc](https://github.com/tomerfiliba-org/rpyc) in the `pygame-window` container. ```python import rpyc import random import time # Make connection conn = rpyc.classic.connect('localhost', port=10001) # Imports conn.execute('import nerfstudio_renderer') conn.execute('from pathlib import Path') conn.execute('import torch') # Create a NerfStudioRenderQueue # For some reason, netref-based methods keep resulting in timeouts. conn.execute('rq = nerfstudio_renderer.NerfStudioRenderQueue(model_config_path=Path("/workspace/outputs/poster/nerfacto/DATE_TIME/config.yml"), checkpoint_path="/workspace/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt", device=torch.device("cuda"))') # Update camera pose position = [random.random() for _ in range(3)] rotation = [0., -152, 0.] conn.execute(f'rq.update_camera({position}, {rotation})') # Wait for some time... time.sleep(3) # Obtain a rendered image image = conn.eval('rq.get_rgb_image()') # Delete remote render queue conn.execute('del rq') ``` Please note that the use of rpyc does not perfectly decouple the client and server. The client must be using the same Python version as the server, otherwise, there will be compatibility issues. ## Notes - `NerfStudioRenderQueue.update_camera` can be called whenever needed. The renderer will progressively render better images serially. Each update to the camera will result in an asynchronous rendering series. - `NerfStudioRenderQueue.get_rgb_image` will always return a newly rendered image. - These two calls need not to be paired. - After a call to `NerfStudioRenderQueue.get_rgb_image`, its return value will become `None` until: 1. Another image from a *newer* camera update is completed. 2. Another image from the same camera update is completed, in higher quality than the previous ones, and no images from newer updates have been ready at that point. - **No-Way-Back Guarantee**: If an image from a newer update (say, the 10-th update) is ready at `NerfStudioRenderQueue.get_rgb_image` (even if it is never retrieved), it is guaranteed no image from the 1-st to 9-th updates will be given by future calls. - Therefore, it is safe to call `NerfStudioRenderQueue.get_rgb_image` multiple times just to check if a newer render is done between these calls. - You may not immediately get newest renders, but you will never get two renders in reversed time ordering.
7,197
Markdown
37.698925
277
0.764624
j3soon/omni-nerf-extension/nerfstudio_renderer/src/setup.py
from setuptools import setup setup( name='nerfstudio_renderer', version='0.1', packages=['nerfstudio_renderer'], install_requires=['rpyc'], )
159
Python
16.777776
37
0.666667
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/renderer.py
from collections import defaultdict from typing import Dict import torch import yaml from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.scene_box import SceneBox from nerfstudio_renderer.utils import * class NerfStudioRenderer(): """ The class is responsible for giving rendered images, given the position, rotation, width, height, and fov of a camera. """ def __init__(self, model_config_path, checkpoint_path, device): """ Parameters ---------- model_config_path : Path The path to model configuration .yml file. checkpoint_path : Path or str The path to model checkpoint .ckpt file. device : torch.device Device for the model to run on. Usually CUDA or CPU. """ # Originally, `nerfstudio.utils.eval_setup` is used to load the entire Pipeline, which takes as input a TrainerConfig yml file. # During the TrainerConfig setup (`nerfstudio.configs.base_config`) process, the constructor of VanillaPipeline is called. # It will set up several components to form a complete pipeline, including a DataManager. # The DataManager (VanillaDataManager) will perform operations to obtain DataParser outputs. # During the setup process of the DataParser (NerfstudioDataParser), an assert is made, which forces the presence of training dataset. # See: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L86 # Thus, even when performing inference, training dataset is needed. # The following code is a workaround that doesn't require to set up the entire Pipeline. # It load solely the model checkpoint with regard to its TrainerConfig YAML, without needing to set up the entire Pipeline. # Note that all code below are based on the v0.3.4 tag: https://github.com/nerfstudio-project/nerfstudio/tree/v0.3.4 self.device = device # 1. Entrypoint `eval_setup` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L68 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L88 config = yaml.load(model_config_path.read_text(), Loader=yaml.Loader) # Using zero or average appearance embedding is a inference-time choice, # not a training-time choice (that would require inference-time to match such a choice). # Therefore, we simply choose to use zero appearance embedding # See Section B of the NeRF-W paper's supplementary material # Ref: https://arxiv.org/abs/2008.02268v3 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L247-L254 if config.pipeline.model.use_average_appearance_embedding: print("WARNING: Forcing zero appearance embedding, although model config specifies to use average appearance embedding.") config.pipeline.model.use_average_appearance_embedding = False # Disable predict normals # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L116 if config.pipeline.model.predict_normals: print("WARNING: Forcing not predicting normals.") config.pipeline.model.predict_normals = False # TODO: Support configuring `eval_num_rays_per_chunk` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L92-L93 # 1.1. Call to `VanillaPipelineConfig.setup`, which inherits `InstantiateConfig.setup` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L103 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L52 # 1.2. Call to `VanillaPipelineConfig._target` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L224 # 1.3. Call to `VanillaPipeline.__init__` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L224 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L251 # 1.3.1. Call to `VanillaDataManagerConfig.setup`, which inherits `InstantiateConfig.setup` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L263-L265 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54 # 1.3.2. Call to `VanillaDataManagerConfig._target` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L320 # 1.3.3. Call to `VanillaDataManager.__init__` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L320 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L378 # 1.3.4. Call to `get_dataparser_outputs` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L403 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/base_dataparser.py#L155 # 1.3.5. `_generate_dataparser_outputs` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/base_dataparser.py#L165 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L85 # Gather model-related arguments # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L256-L263 # in x,y,z order # assumes that the scene is centered at the origin aabb_scale = config.pipeline.datamanager.dataparser.scene_scale scene_box = SceneBox( aabb=torch.tensor( [[-aabb_scale, -aabb_scale, -aabb_scale], [aabb_scale, aabb_scale, aabb_scale]], dtype=torch.float32 ) ) # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L319-L322 metadata={ "depth_filenames": None, # depth filenames are only required during training "depth_unit_scale_factor": config.pipeline.datamanager.dataparser.depth_unit_scale_factor, } # 1.4. Call to `VanillaPipeline.setup` # Setting num_train_data to 0 is fine, since we are not using average appearance embedding. # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L272 num_train_data = 0 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L275 grad_scaler = None # only required during training # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L270-L276 self.model = config.pipeline.model.setup( scene_box=scene_box, num_train_data=num_train_data, metadata=metadata, device=device, grad_scaler=grad_scaler, ) # Move model to device self.model.to(device) # 2. Call to `pipeline.eval()` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L105 self.model.eval() # 3. Call to `eval_load_checkpoint` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L108 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L35 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L62 loaded_state = torch.load(checkpoint_path, map_location="cpu") # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L63 loaded_state, step = loaded_state["pipeline"], loaded_state["step"] # 4. Call to `VanillaPipeline.load_pipeline` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L63 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L423 # Alter loaded model state dict for loading and update model to step # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L430-L433 state = { (key[len("module.") :] if key.startswith("module.") else key): value for key, value in loaded_state.items() } self.model.update_to_step(step) # 5. Call to `Pipeline.load_state_dict` # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L434 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L109 # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L110-L119 is_ddp_model_state = True model_state = {} for key, value in state.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # Drop the embedding layer for appearance embedding that requires the number of training images, # since we are not using average appearance embedding. # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L112 model_state = { key: value for key, value in model_state.items() if 'embedding_appearance' not in key } # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L120-L122 # remove "module." prefix added by DDP if is_ddp_model_state: model_state = { key[len("module.") :]: value for key, value in model_state.items() } # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L130 self.model.load_state_dict(model_state, strict=False) # Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/models/base_model.py#L175 @torch.no_grad() def get_outputs_for_camera_ray_bundle(self, model, camera_ray_bundle: RayBundle, invalidated_fn) -> Dict[str, torch.Tensor]: """Takes in camera parameters and computes the output of the model. Args: camera_ray_bundle: ray bundle to calculate outputs over """ num_rays_per_chunk = model.config.eval_num_rays_per_chunk image_height, image_width = camera_ray_bundle.origins.shape[:2] num_rays = len(camera_ray_bundle) outputs_lists = defaultdict(list) for i in range(0, num_rays, num_rays_per_chunk): if invalidated_fn(): return None start_idx = i end_idx = i + num_rays_per_chunk ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx) outputs = model.forward(ray_bundle=ray_bundle) for output_name, output in outputs.items(): # type: ignore if not torch.is_tensor(output): # TODO: handle lists of tensors as well continue outputs_lists[output_name].append(output) outputs = {} for output_name, outputs_list in outputs_lists.items(): outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore return outputs def render_at(self, position, rotation, width, height, fov, invalidated_fn): """ Parameters ---------- position : list[float] A 3-element list specifying the camera position. rotation : list[float] A 3-element list specifying the camera rotation, in euler angles. width : int The width of the camera. height : int The height of the camera. fov : float The vertical field-of-view of the camera. invalidated_fn : Callable[[], bool] Function that returns whether the request is invalidated. Returns ---------- np.array An np array of rgb values. """ # Obtain a Cameras object, and transform it to the same device as the model. c2w_matrix = camera_to_world_matrix(position, rotation) cameras = create_cameras(c2w_matrix, width, height, fov).to(self.device) # Obtain a ray bundle with this Cameras ray_bundle = cameras.generate_rays(camera_indices=0, aabb_box=None) # Inference with torch.no_grad(): # See: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/models/base_model.py#L175 outputs = self.get_outputs_for_camera_ray_bundle(self.model, ray_bundle, invalidated_fn) if outputs is None: # Allow early return between the calculation of ray bundles if the request is invalidated. return None # Return results return outputs['rgb'].cpu().numpy()
15,697
Python
60.803149
172
0.707524
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/__init__.py
from .renderer import NerfStudioRenderer from .render_queue import RendererCameraConfig, NerfStudioRenderQueue
111
Python
36.333321
69
0.882883
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/render_queue.py
import json import threading import time from collections import deque from nerfstudio_renderer.renderer import * class RendererCameraConfig: """ This class contains functions used to load camera configurations for the NerfStudioRenderQueue to use. The configuration is a list of dicts. The NerfStudioRenderQueue is then able to render differently sized images with respect to each configuration, for performance considerations for example. """ def __init__(self, cameras_config): """ Parameters ---------- cameras_config : list[dict] A list of dicts that describes different camera configurations. Each element is of the form { 'width': int, # The rendered image width (in pixels) 'height': int, # The rendered image height (in pixels) 'fov': float, # The vertical field-of-view of the camera } """ self.cameras = cameras_config def default_config(): """ Returns a default configuration, where there are 3 cameras, two for accelerated and estimated rendering, and the other for high-resolution display. Returns ---------- RendererCameraConfig A default config. """ # These configurations are chosen empirically, and may be subject to change. # Nerfstudio camera defaults: # - vertical FoV: 50 degrees # Isaac Sim camera defaults: # - Size: 1280x720 # - Focal Length: 18.14756 # - Horizontal Aperture: 20.955 # - Vertical Aperture: (Value Unused) # - (Calculated) horizontal FoV = math.degrees(2 * math.atan(20.955 / (2 * 18.14756))) = 60 # The following vertical FoV is chosen to follow the Isaac Sim camera defaults. # Some useful equations: # - focal_length = width / (2 * math.tan(math.radians(fov_horizontal) / 2)) # - focal_length = height / (2 * math.tan(math.radians(fov_vertical) / 2)) # - fov_vertical = math.degrees(2 * math.atan(height / (2 * focal_length))) # - fov_horizontal = math.degrees(2 * math.atan(width / (2 * focal_length))) # - fov_horizontal = math.degrees(2 * math.atan(horiz_aperture / (2 * focal_length))) # Ref: https://forums.developer.nvidia.com/t/change-intrinsic-camera-parameters/180309/6 # - aspect_ratio = width / height # - fov_vertical = math.degrees(2 * math.atan((height / width) * math.tan(math.radians(fov_horizontal) / 2))) return RendererCameraConfig([ # fov_vertical = math.degrees(2 * math.atan((height / width) * math.tan(math.radians(fov_horizontal) / 2))) # = 35.98339777135764 # 0.05x resolution { 'width': 64, 'height': 36, 'fov': 35.98339777135764 }, # 0.1x resolution { 'width': 128, 'height': 72, 'fov': 35.98339777135764 }, # 0.25x resolution { 'width': 320, 'height': 180, 'fov': 35.98339777135764 }, # 0.5x resolution { 'width': 640, 'height': 360, 'fov': 35.98339777135764 }, # 1x resolution { 'width': 1280, 'height': 720, 'fov': 35.98339777135764 }, ]) def load_config(file_path=None): """ Returns a configuration defined by a json-formatted file. Parameters ---------- file_path : str, optional The path to the config file. Returns ---------- RendererCameraConfig A config specified by `file_path`, or a default one. """ if file_path is None: return RendererCameraConfig.default_config() with open(file_path, 'r') as f: return RendererCameraConfig(json.load(f)) class NerfStudioRenderQueue(): """ The class encapsulates NerfStudioRenderer and provides a mechanism that aims at minimizing rendering latency, via an interface that allows registration of rendering requests. The render queue attempts to deliver rendering results of the latest request in time, so requests are not guaranteed to be served. Attributes ---------- camera_config : RendererCameraConfig The different configurations of cameras (different qualities, etc.). renderer : NerfStudioRenderer The NerfStudioRenderer used to actually give rendered images. """ def __init__(self, model_config_path, checkpoint_path, device, thread_count=3, camera_config_path=None): """ Parameters ---------- model_config_path : str The path to model configuration .yml file. camera_config_path : str, optional The path to the config file. Uses `RendererCameraConfig.default_config()` when not assigned. """ # Construct camera config and renderer self.camera_config = RendererCameraConfig.load_config(camera_config_path) self.renderer = NerfStudioRenderer(model_config_path, checkpoint_path, device) # Data maintained for optimization: self._last_request_camera_position = (-np.inf, -np.inf, -np.inf) """The camera position of the last accepted request.""" self._last_request_camera_rotation = (-np.inf, -np.inf, -np.inf) """The camera rotation of the last accepted request.""" self._request_deque = deque(maxlen=thread_count) """The queue/buffer of render requests. Since we want to drop stale requests/responses, the max size of the deque is simply set as the thread count. The deque acts like a request buffer instead of a task queue, which drops older requests when full. """ self._request_deque_pop_lock = threading.Lock() """The lock for the request deque. Although deque is thread-safe, we still need to lock it when popping the deque while empty to create blocking behavior. """ self._last_request_timestamp = time.time() """The timestamp of the last accepted request.""" self._last_request_timestamp_lock = threading.Lock() """The timestamp lock for the last request timestamp.""" self._last_response_timestamp = time.time() """The timestamp of the last sent response.""" self._last_response_timestamp_lock = threading.Lock() """The timestamp lock for the last response timestamp.""" self._image_response_buffer = None """The latest rendered image buffer, which will be cleared immediately after retrieval.""" self._image_response_buffer_lock = threading.Lock() """The image lock for the image response buffer.""" for i in range(thread_count): t = threading.Thread(target=self._render_task) t.daemon = True t.start() # We choose to use threading here instead of multiprocessing # due to lower overhead. We are aware of the GIL, but since # the bottleneck should lie in the rendering process, which # is implemented in C++ by PyTorch, the GIL should be released # during PyTorch function calls. # Ref: https://discuss.pytorch.org/t/can-pytorch-by-pass-python-gil/55498 # After going through some documents, we conclude that switching # to multiprocessing may not be a good idea, since the overhead # of inter-process communication may be high, and the # implementation is not trivial. def get_rgb_image(self): """ Retrieve the most recently ready rgb image. If no rgb images have been rendered since last call of `get_rgb_image`, returns None. Returns ---------- np.array or None If applicable, returns an np array of size (width, height, 3) and with values ranging from 0 to 1. Otherwise, returns None. """ with self._image_response_buffer_lock: image = self._image_response_buffer self._image_response_buffer = None return image def update_camera(self, position, rotation): """ Notifies an update to the camera pose. This may or may not result in a new render request. Parameters ---------- position : list[float] A 3-element list specifying the camera position. rotation : list[float] A 3-element list specifying the camera rotation, in euler angles. """ if self._is_input_similar(position, rotation): return self._last_request_camera_position = position.copy() self._last_request_camera_rotation = rotation.copy() now = time.time() with self._last_request_timestamp_lock: self._last_request_timestamp = now # Queue this render request, with request timestamp attached. self._request_deque.append((position, rotation, now)) def _render_task(self): while True: with self._request_deque_pop_lock: if len(self._request_deque) == 0: time.sleep(0.05) continue task = self._request_deque.pop() position, rotation, timestamp = task # For each render request, render lower quality images first, and then higher quality ones. # This rendering request and response may be dropped, as newer requests/responses invalidate older ones. for camera in self.camera_config.cameras: # A request can be invalidated if there are newer requests. with self._last_request_timestamp_lock: if timestamp - self._last_request_timestamp < 0: continue # Render the image # Early return if the request is invalidated. def request_invalidated(): with self._last_request_timestamp_lock: return timestamp - self._last_request_timestamp < 0 image = self.renderer.render_at(position, rotation, camera['width'], camera['height'], camera['fov'], request_invalidated) if image is None: continue # A response must be dropped if there are newer responses. with self._last_response_timestamp_lock: if timestamp - self._last_response_timestamp < 0: continue self._last_response_timestamp = timestamp with self._image_response_buffer_lock: self._image_response_buffer = image # Checks if camera pose is similar to what was recorded. def _is_input_similar(self, position, rotation): return position == self._last_request_camera_position and rotation == self._last_request_camera_rotation
11,091
Python
41.992248
138
0.600126
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/utils.py
import numpy as np import torch from nerfstudio.cameras.cameras import Cameras, CameraType from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length from scipy.spatial.transform import Rotation as R def camera_to_world_matrix(position, rotation): """ Constructs a camera-to-world (c2w) transformation matrix, based on the position and rotation of the camera. Parameters ---------- position : list[float] A 3-element list of floats representing the position of the camera. rotation : list[float] A 3-element list of floats representing the rotation of the camera, in euler angles. Returns ---------- np.array A 4x4 camera-to-world matrix. """ camera_to_world_matrix = np.eye(4) rot_matrix = R.from_euler('xyz', rotation).as_matrix() camera_to_world_matrix[:3, :3] = rot_matrix camera_to_world_matrix[:3, 3] = position return camera_to_world_matrix def create_cameras(camera_to_world_matrix, width, height, fov): """ Constructs a Cameras object based on a c2w matrix, and a camera configuration from RendererCameraConfig. Parameters ---------- camera_to_world_matrix : np.array A 3-element list of floats representing the position of the camera. width : int The width of the camera. height : int The height of the camera. fov : float The vertical field-of-view of the camera. Returns ---------- Cameras A Cameras object. """ # Compute camera focal length focal_length = three_js_perspective_camera_focal_length(fov, height) # Only use the first 3 rows of the c2w matrix, as the last row is always [0 0 0 1]. camera_to_worlds = torch.tensor(camera_to_world_matrix)[:3].view(1, 3, 4) return Cameras( fx=torch.tensor([focal_length]), fy=torch.tensor([focal_length]), cx=width/2, cy=height/2, camera_to_worlds=camera_to_worlds, camera_type=CameraType.PERSPECTIVE, times=None, )
2,071
Python
28.183098
92
0.654273
j3soon/omni-nerf-extension/pygame_viewer/pygame_test.py
import argparse import time import cv2 import numpy as np import pygame import rpyc def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=10001) parser.add_argument("--model_config_path", type=str, required=True) parser.add_argument("--model_checkpoint_path", type=str, required=True) parser.add_argument("--device", type=str, choices=['cpu', 'cuda'], default='cuda') parser.add_argument("--rpyc", type=bool, default=False) args = parser.parse_args() return args def main(args): if not args.rpyc: # Remote: Make Connection & Import conn = rpyc.classic.connect(args.host, args.port) conn.execute('from nerfstudio_renderer import NerfStudioRenderQueue') conn.execute('from pathlib import Path') conn.execute('import torch') else: from nerfstudio_renderer import NerfStudioRenderQueue from pathlib import Path import torch if not args.rpyc: # Create a Remote NerfStudioRenderQueue conn.execute(f'rq = NerfStudioRenderQueue(model_config_path=Path("{args.model_config_path}"), checkpoint_path="{args.model_checkpoint_path}", device=torch.device("{args.device}"))') else: rq = NerfStudioRenderQueue( model_config_path=Path(args.model_config_path), checkpoint_path=args.model_checkpoint_path, device=torch.device(args.device), ) # Initialize Pygame pygame.init() # Set the width and height of the window width, height = 640, 360 window_size = (width, height) # Create a Pygame window screen = pygame.display.set_mode(window_size) # Create a clock to control the frame rate clock = pygame.time.Clock() # Camera curve time & global screen buffer camera_curve_time = 0 screen_buffer = np.zeros((width, height, 3), dtype=np.uint8) # Camera pose for the poster NeRF model camera_position = [0, 0, 0] camera_rotation = [0, 0, 0] running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False # Retrieve image if not args.rpyc: image = conn.eval('rq.get_rgb_image()') else: image = rq.get_rgb_image() if image is not None: image = np.array(image) # received with shape (H*, W*, 3) image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR) # resize to (H, W, 3) image = np.transpose(image, (1, 0, 2)) screen_buffer[:] = image * 255 animation_progress = (np.sin(camera_curve_time) + 1) / 2 # Cover the screen buffer with an indicator of camera position hud_width, hud_height = 100, 50 bar_x, bar_y = 20, 24 bar_w, bar_h = 60, 2 # white background camera_position_indicator = np.ones((hud_width, hud_height, 3)) * 255 # horizontal line camera_position_indicator[bar_x:bar_x+bar_w, bar_y:bar_y+bar_h, :] = 0 # square indicator of current position hud_x = round(bar_x + bar_w * animation_progress) camera_position_indicator[hud_x-5:hud_x+5, 20:30, :] = 0 screen_buffer[width-hud_width:, height-hud_height:, :] = camera_position_indicator # Convert the NumPy array to a Pygame surface image_surface = pygame.surfarray.make_surface(screen_buffer) # Blit the surface to the screen screen.blit(image_surface, (0, 0)) pygame.display.flip() # Control the frame rate clock.tick(30) # Move Camera camera_position[2] = animation_progress # Update Camera if not args.rpyc: conn.execute(f'rq.update_camera({camera_position}, {camera_rotation})') else: rq.update_camera(camera_position, camera_rotation) if int(time.time()) % 5 == 0: camera_curve_time += 1.0 / 30.0 if not args.rpyc: # Delete remote render queue conn.execute('del rq') # Quit Pygame pygame.quit() if __name__ == '__main__': main(parse_args())
4,242
Python
32.409449
189
0.615747
j3soon/OmniIsaacGymEnvs-KukaReacher/setup.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Installation script for the 'isaacgymenvs' python package.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from setuptools import setup, find_packages import os # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ "numpy==1.23.5", "protobuf==3.20.2", "omegaconf==2.3.0", "hydra-core==1.3.2", "urllib3==1.26.16", "psutil==5.9.3", "rl-games==1.6.1" ] # Installation operation setup( name="omniisaacgymenvs", author="NVIDIA", version="2023.1.0a", description="RL environments for robot learning in NVIDIA Isaac Sim.", keywords=["robotics", "rl"], include_package_data=True, install_requires=INSTALL_REQUIRES, packages=find_packages("."), classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7, 3.8"], zip_safe=False, ) # EOF
2,473
Python
36.484848
94
0.740801
j3soon/OmniIsaacGymEnvs-KukaReacher/README.md
# Kuka Reacher Reinforcement Learning Sim2Real Environment for Omniverse Isaac Gym/Sim This repository adds a KukaReacher environment based on [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) (commit [cc1aab0](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/tree/cc1aab0f904ade860fc0761d62edb6e706ab89ec)), and plan to include Sim2Real code to control a real-world [Kuka](https://www.kuka.com/en-us/products/robotics-systems/industrial-robots) with the policy learned by reinforcement learning in Omniverse Isaac Gym/Sim. The RL code in this branch is only tested on Linux using Isaac Sim 2023.1.0. The Sim2Real code isn't implemented yet. This repo is compatible with the following repositories: - [OmniIsaacGymEnvs-DofbotReacher](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher) - [OmniIsaacGymEnvs-UR10Reacher](https://github.com/j3soon/OmniIsaacGymEnvs-UR10Reacher) - [OmniIsaacGymEnvs-KukaReacher](https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher) - [OmniIsaacGymEnvs-HiwinReacher](https://github.com/j3soon/OmniIsaacGymEnvs-HiwinReacher) ## Preview ![](docs/media/KukaKR120R2500ProReacher-Vectorized.gif) (KukaKR120R2500Pro) ## Installation Prerequisites: - Before starting, please make sure your hardware and software meet the [system requirements](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html#system-requirements). - [Install Omniverse Isaac Sim 2023.1.0](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) (Must setup Cache and Nucleus) - You may try out newer versions of Isaac Sim along with [their corresponding patch](https://github.com/j3soon/isaac-extended#conda-issue-on-linux), but it is not guaranteed to work. - Double check that Nucleus is correctly installed by [following these steps](https://github.com/j3soon/isaac-extended#nucleus). - Your computer & GPU should be able to run the Cartpole example in [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) - (Optional) [Set up a Kuka](https://www.kuka.com/en-us/products/robotics-systems/industrial-robots) in the real world Make sure to install Isaac Sim in the default directory and clone this repository to the home directory. Otherwise, you will encounter issues if you didn't modify the commands below accordingly. We will use Anaconda to manage our virtual environment: 1. Clone this repository and the patches repo: - Linux ```sh cd ~ git clone https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher.git git clone https://github.com/j3soon/isaac-extended.git ``` - Windows ```sh cd %USERPROFILE% git clone https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher.git git clone https://github.com/j3soon/isaac-extended.git ``` 2. Generate [instanceable](https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html) Kuka assets for training: [Launch the Script Editor](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gui_interactive_scripting.html#script-editor) in Isaac Sim. Copy the content in `omniisaacgymenvs/utils/usd_utils/create_instanceable_${ROBOT_NAME_LOWER}_from_urdf.py` and execute it inside the Script Editor window. Wait until you see the text `Done!`. The `${ROBOT_NAME_LOWER}` should be replaced with one of the following: - `kukakr120r2500pro` The URDF files in `/thirdparty/*` are provided by [ROS-Industrial](https://github.com/ros-industrial/kuka_experimental/tree/melodic-devel). The details on how to download this file can be found in the commit message of [fa39cbf](https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher/commit/fa39cbf37da53a7f96f3979b0a0a1f9e9a9cd103). 3. [Download and Install Anaconda](https://www.anaconda.com/products/distribution#Downloads). ```sh # For 64-bit Linux (x86_64/x64/amd64/intel64) wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh bash Anaconda3-2022.10-Linux-x86_64.sh ``` For Windows users, make sure to use `Anaconda Prompt` instead of `Anaconda Powershell Prompt`, `Command Prompt`, or `Powershell` for the following commands. 4. Patch Isaac Sim 2023.1.0 - Linux ```sh export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cp $ISAAC_SIM/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh.bak cp ~/isaac-extended/isaac_sim-2023.1.0-patch/linux/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh ``` - Windows > (To be updated) 5. [Set up conda environment for Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html#advanced-running-with-anaconda) - Linux ```sh # conda remove --name isaac-sim --all export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cd $ISAAC_SIM conda env create -f environment.yml conda activate isaac-sim cd ~/OmniIsaacGymEnvs-KukaReacher pip install -e . ``` - Windows > (To be updated) 6. Activate conda environment - Linux ```sh export ROBOT_NAME="KukaKR120R2500Pro" export ROBOT_NAME_LOWER="kukakr120r2500pro" export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cd $ISAAC_SIM conda activate isaac-sim source setup_conda_env.sh ``` - Windows ```sh set ROBOT_NAME="KukaKR120R2500Pro" set ROBOT_NAME_LOWER="kukakr120r2500pro" set ISAAC_SIM="%LOCALAPPDATA%\ov\pkg\isaac_sim-2023.1.0" cd %ISAAC_SIM% conda activate isaac-sim call setup_conda_env.bat ``` Please note that you should execute the commands in Step 6 for every new shell. For Windows users, replace `~` to `%USERPROFILE%` for all the following commands. ## Dummy Policy This is a sample to make sure you have setup the environment correctly. You should see a single Kuka in Isaac Sim. ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/dummy_${ROBOT_NAME_LOWER}_policy.py task=${ROBOT_NAME}Reacher test=True num_envs=1 ``` Alternatively, you can replace the dummy policy with a random policy with `omniisaacgymenvs/scripts/random_policy.py`. ## Training You can launch the training in `headless` mode as follows: ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/rlgames_train.py task=${ROBOT_NAME}Reacher headless=True ``` The number of environments is set to 2048 by default. If your GPU has small memory, you can decrease the number of environments by changing the arguments `num_envs` as below: ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/rlgames_train.py task=${ROBOT_NAME}Reacher headless=True num_envs=2048 ``` You can also skip training by downloading the pre-trained model checkpoint by: ```sh cd ~/OmniIsaacGymEnvs-KukaReacher wget https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher/releases/download/v1.0.0/runs.zip unzip runs.zip ``` The learning curve of the pre-trained model: ![](docs/media/KukaKR120R2500ProReacher-Learning-Curve.png) (KukaKR120R2500Pro) ## Testing Make sure you have stored the model checkpoints at `~/OmniIsaacGymEnvs-KukaReacher/runs`, you can check it with the following command: ```sh ls ~/OmniIsaacGymEnvs-KukaReacher/runs/${ROBOT_NAME}Reacher/nn/ ``` In order to achieve the highest rewards, you may not want to use the latest checkpoint `./runs/${ROBOT_NAME}Reacher/nn/${ROBOT_NAME}Reacher.pth`. Instead, use the checkpoint with highest rewards such as `./runs/${ROBOT_NAME}Reacher/nn/last_${ROBOT_NAME}Reacher_ep_1000_rew_XXX.pth`. You can replace `${ROBOT_NAME}Reacher.pth` with the latest checkpoint before following the steps below, or simply modify the commands below to use the latest checkpoint. You can visualize the learned policy by the following command: ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/rlgames_train.py task=${ROBOT_NAME}Reacher test=True num_envs=512 checkpoint=./runs/${ROBOT_NAME}Reacher/nn/${ROBOT_NAME}Reacher.pth ``` Likewise, you can decrease the number of environments by modifying the parameter `num_envs=512`. ## Sim2Real > (To be updated) ## Demo We provide an interactable demo based on the `${ROBOT_NAME}Reacher` RL example. In this demo, you can click on any of the Kuka in the scene to manually control the robot with your keyboard as follows: - `Q`/`A`: Control Joint 0. - `W`/`S`: Control Joint 1. - `E`/`D`: Control Joint 2. - `R`/`F`: Control Joint 3. - `T`/`G`: Control Joint 4. - `Y`/`H`: Control Joint 5. - `ESC`: Unselect a selected Kuka and yields manual control Launch this demo with the following command. Note that this demo limits the maximum number of Kuka in the scene to 128. ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/rlgames_demo.py task=${ROBOT_NAME}Reacher num_envs=64 ``` ## Running in Docker If you have a [NVIDIA Enterprise subscription](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/planning.html), you can run all services with Docker Compose. For users without a subscription, you can pull the [Isaac Docker image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim), but should still install Omniverse Nucleus beforehand. (only Isaac itself is dockerized) Follow [this tutorial](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_container.html#isaac-sim-setup-remote-headless-container) to generate your NGC API Key. Please note that you should clone this repositories in your home directory and generate instanceable assets beforehand as mentioned in the [Installation](#installation) section. We will now set up the docker environment. 1. Build the docker image ```sh docker pull nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1 docker build . -t j3soon/isaac-sim ``` 2. Launch an Isaac Container in Headless mode: ```sh scripts/run_docker_headless.sh ./runheadless.native.sh ``` Alternatively, launch an Isaac Container with GUI (The host machine should include a desktop environment): ```sh scripts/run_docker.sh ./runapp.sh ``` 3. Install this repository ```sh cd ~/OmniIsaacGymEnvs-KukaReacher pip install -e . ``` 4. Run any command in the docker container > Make sure to add `headless=True` if the container is launched in headless mode. For an example, running the training script: ```sh cd ~/OmniIsaacGymEnvs-KukaReacher python omniisaacgymenvs/scripts/rlgames_train.py task=${ROBOT_NAME}Reacher headless=True num_envs=2048 ``` You can watch the training progress with: ```sh docker exec -it isaac-sim /bin/bash cd ~/OmniIsaacGymEnvs-KukaReacher tensorboard --logdir=./runs ``` ## Acknowledgement This project has been made possible through the support of [ElsaLab][elsalab], [Raccoon][rccn], and [NVIDIA AI Technology Center (NVAITC)][nvaitc]. For a complete list of contributors to the code of this repository, please visit the [contributor list](https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher/graphs/contributors). [![](docs/media/logos/elsalab.png)][elsalab] [![](docs/media/logos/rccn.png)][rccn] [![](docs/media/logos/nvaitc.png)][nvaitc] [elsalab]: https://github.com/elsa-lab [rccn]: http://rccn.dev/ [nvaitc]: https://github.com/NVAITC Disclaimer: this is not an official NVIDIA product. > **Note**: below are the original README of [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs). # Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim ## About this repository This repository contains Reinforcement Learning examples that can be run with the latest release of [Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html). RL examples are trained using PPO from [rl_games](https://github.com/Denys88/rl_games) library and examples are built on top of Isaac Sim's `omni.isaac.core` and `omni.isaac.gym` frameworks. Please see [release notes](docs/release_notes.md) for the latest updates. <img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="614" height="307"/> ## Installation Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release. *Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2023.1.0, to ensure examples work as expected.* Note that the 2022.2.1 OmniIsaacGymEnvs release will no longer work with the latest Isaac Sim 2023.1.0 release. Due to a change in USD APIs, line 138 in rl_task.py is no longer valid. To run the previous OIGE release with the latest Isaac Sim release, please comment out lines 137 and 138 in rl_task.py or set `add_distant_light` to `False` in the task config file. No changes are required if running with the latest release of OmniIsaacGymEnvs. Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim. To install `omniisaacgymenvs`, first clone this repository: ```bash git clone https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs.git ``` Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`. To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path. ``` For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $* For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh ``` Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`: ```bash PYTHON_PATH -m pip install -e . ``` The following error may appear during the initial installation. This error is harmless and can be ignored. ``` ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. ``` ### Running the examples *Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.* To train your first policy, run: ```bash PYTHON_PATH scripts/rlgames_train.py task=Cartpole ``` An Isaac Sim app window should be launched. Once Isaac Sim initialization completes, the Cartpole scene will be constructed and simulation will start running automatically. The process will terminate once training finishes. Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar. To achieve maximum performance, launch training in `headless` mode as follows: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True ``` #### A Note on the Startup Time of the Simulation Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually be optimized in future releases. ### Extension Workflow The extension workflow provides a simple user interface for creating and launching RL tasks. To launch Isaac Sim for the extension workflow, run: ```bash ./<isaac_sim_root>/isaac-sim.gym.sh --ext-folder </parent/directory/to/OIGE> ``` Note: `isaac_sim_root` should be located in the same directory as `python.sh`. The UI window can be activated from `Isaac Examples > RL Examples` by navigating the top menu bar. For more details on the extension workflow, please refer to the [documentation](docs/extension_workflow.md). ### Loading trained models // Checkpoints Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME` defaults to the task name, but can also be overridden via the `experiment` argument. To load a trained checkpoint and continue training, use the `checkpoint` argument: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth ``` To load a trained checkpoint and only perform inference (no training), pass `test=True` as an argument, along with the checkpoint name. To avoid rendering overhead, you may also want to run with fewer environments using `num_envs=64`: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64 ``` Note that if there are special characters such as `[` or `=` in the checkpoint names, you will need to escape them and put quotes around the string. For example, `checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"` We provide pre-trained checkpoints on the [Nucleus](https://docs.omniverse.nvidia.com/nucleus/latest/index.html) server under `Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints`. Run the following command to launch inference with pre-trained checkpoint: Localhost (To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html)): ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64 ``` Production server: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64 ``` When running with a pre-trained checkpoint for the first time, we will automatically download the checkpoint file to `omniisaacgymenvs/checkpoints`. For subsequent runs, we will re-use the file that has already been downloaded, and will not overwrite existing checkpoints with the same name in the `checkpoints` folder. ## Runing from Docker Latest Isaac Sim Docker image can be found on [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim). A utility script is provided at `docker/run_docker.sh` to help initialize this repository and launch the Isaac Sim docker container. The script can be run with: ```bash ./docker/run_docker.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py headless=True task=Ant ``` To run the Isaac Sim docker with UI, use the following script: ```bash ./docker/run_docker_viewer.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py task=Ant ``` To avoid re-installing OIGE each time a container is launched, we also provide a dockerfile that can be used to build an image with OIGE installed. To build the image, run: ```bash docker build -t isaac-sim-oige -f docker/dockerfile . ``` Then, start a container with the built image: ```bash ./docker/run_dockerfile.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py task=Ant headless=True ``` ## Livestream OmniIsaacGymEnvs supports livestream through the [Omniverse Streaming Client](https://docs.omniverse.nvidia.com/app_streaming-client/app_streaming-client/overview.html). To enable this feature, add the commandline argument `enable_livestream=True`: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True enable_livestream=True ``` Connect from the Omniverse Streaming Client once the SimulationApp has been created. Note that enabling livestream is equivalent to training with the viewer enabled, thus the speed of training/inferencing will decrease compared to running in headless mode. ## Training Scripts All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`. To test out a task without RL in the loop, run the random policy script with: ```bash PYTHON_PATH scripts/random_policy.py task=Cartpole ``` This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated. To run a simple form of PPO from `rl_games`, use the single-threaded training script: ```bash PYTHON_PATH scripts/rlgames_train.py task=Cartpole ``` This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI. ### Configuration and command line arguments We use [Hydra](https://hydra.cc/docs/intro/) to manage the config. Common arguments for the training scripts are: * `task=TASK` - Selects which task to use. Any of `AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `CartpoleCamera`, `Crazyflie`, `FactoryTaskNutBoltPick`, `FactoryTaskNutBoltPlace`, `FactoryTaskNutBoltScrew`, `FrankaCabinet`, `FrankaDeformable`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM` (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task`) * `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`). * `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config). * `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config * `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step. * `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU. * `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU. * `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax. * `multi_gpu=MULTI_GPU` - Whether to train using multiple GPUs. Defaults to `False`. Note that this option is only available with `rlgames_train.py`. * `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training. * `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing. * `headless=HEADLESS` - Whether to run in headless mode. * `enable_livestream=ENABLE_LIVESTREAM` - Whether to enable Omniverse streaming. * `experiment=EXPERIMENT` - Sets the name of the experiment. * `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments. * `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation). * `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used. Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`. #### Hydra Notes Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file. The way that the `task` and `train` portions of the config works are through the use of config groups. You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/) The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`. In some places in the config you will find other variables referenced (for example, `num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy. This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation). ### Tensorboard Tensorboard can be launched during training via the following command: ```bash PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries ``` ## WandB support You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating. ## Training with Multiple GPUs To train with multiple GPUs, use the following command, where `--proc_per_node` represents the number of available GPUs: ```bash PYTHON_PATH -m torch.distributed.run --nnodes=1 --nproc_per_node=2 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` ## Multi-Node Training To train across multiple nodes/machines, it is required to launch an individual process on each node. For the master node, use the following command, where `--proc_per_node` represents the number of available GPUs, and `--nnodes` represents the number of nodes: ```bash PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` Note that the port (`5555`) can be replaced with any other available port. For non-master nodes, use the following command, replacing `--node_rank` with the index of each machine: ```bash PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` For more details on multi-node training with PyTorch, please visit [here](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance. ## Tasks Source code for tasks can be found in `omniisaacgymenvs/tasks`. Each task follows the frameworks provided in `omni.isaac.core` and `omni.isaac.gym` in Isaac Sim. Refer to [docs/framework.md](docs/framework.md) for how to create your own tasks. Full details on each of the tasks available can be found in the [RL examples documentation](docs/rl_examples.md). ## Demo We provide an interactable demo based on the `AnymalTerrain` RL example. In this demo, you can click on any of the ANYmals in the scene to go into third-person mode and manually control the robot with your keyboard as follows: - `Up Arrow`: Forward linear velocity command - `Down Arrow`: Backward linear velocity command - `Left Arrow`: Leftward linear velocity command - `Right Arrow`: Rightward linear velocity command - `Z`: Counterclockwise yaw angular velocity command - `X`: Clockwise yaw angular velocity command - `C`: Toggles camera view between third-person and scene view while maintaining manual control - `ESC`: Unselect a selected ANYmal and yields manual control Launch this demo with the following command. Note that this demo limits the maximum number of ANYmals in the scene to 128. ``` PYTHON_PATH scripts/rlgames_demo.py task=AnymalTerrain num_envs=64 checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth ``` <img src="https://user-images.githubusercontent.com/34286328/184688654-6e7899b2-5847-4184-8944-2a96b129b1ff.gif" width="600" height="300"/>
29,919
Markdown
50.586207
469
0.76804
j3soon/OmniIsaacGymEnvs-KukaReacher/config/extension.toml
[gym] reloadable = true [package] version = "0.0.0" category = "Simulation" title = "Isaac Gym Envs" description = "RL environments" authors = ["Isaac Sim Team"] repository = "https://gitlab-master.nvidia.com/carbon-gym/omniisaacgymenvs" keywords = ["isaac"] changelog = "docs/CHANGELOG.md" readme = "docs/README.md" icon = "data/icon.png" writeTarget.kit = true [dependencies] "omni.isaac.gym" = {} "omni.isaac.core" = {} "omni.isaac.cloner" = {} "omni.isaac.ml_archive" = {} # torch [[python.module]] name = "omniisaacgymenvs"
532
TOML
20.319999
75
0.693609
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/extension.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import asyncio import inspect import os import traceback import weakref from abc import abstractmethod import hydra import omni.ext import omni.timeline import omni.ui as ui import omni.usd from hydra import compose, initialize from omegaconf import OmegaConf from omni.isaac.cloner import GridCloner from omni.isaac.core.utils.extensions import disable_extension, enable_extension from omni.isaac.core.utils.torch.maths import set_seed from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.core.world import World from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.rlgames.rlgames_train_mt import RLGTrainer, Trainer from omniisaacgymenvs.utils.task_util import import_tasks, initialize_task from omni.isaac.ui.callbacks import on_open_folder_clicked, on_open_IDE_clicked from omni.isaac.ui.menu import make_menu_item_description from omni.isaac.ui.ui_utils import ( btn_builder, dropdown_builder, get_style, int_builder, multi_btn_builder, multi_cb_builder, scrolling_frame_builder, setup_ui_headers, str_builder, ) from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items from omni.kit.viewport.utility import get_active_viewport, get_viewport_from_window_name from omni.kit.viewport.utility.camera_state import ViewportCameraState from pxr import Gf ext_instance = None class RLExtension(omni.ext.IExt): def on_startup(self, ext_id: str): self._render_modes = ["Full render", "UI only", "None"] self._env = None self._task = None self._ext_id = ext_id ext_manager = omni.kit.app.get_app().get_extension_manager() extension_path = ext_manager.get_extension_path(ext_id) self._ext_path = os.path.dirname(extension_path) if os.path.isfile(extension_path) else extension_path self._ext_file_path = os.path.abspath(__file__) self._initialize_task_list() self.start_extension( "", "", "RL Examples", "RL Examples", "", "A set of reinforcement learning examples.", self._ext_file_path, ) self._task_initialized = False self._task_changed = False self._is_training = False self._render = True self._resume = False self._test = False self._evaluate = False self._checkpoint_path = "" self._timeline = omni.timeline.get_timeline_interface() self._viewport = get_active_viewport() self._viewport.updates_enabled = True global ext_instance ext_instance = self def _initialize_task_list(self): self._task_map, _ = import_tasks() self._task_list = list(self._task_map.keys()) self._task_list.sort() self._task_list.remove("CartpoleCamera") # we cannot run camera-based training from extension workflow for now. it requires a specialized app file. self._task_name = self._task_list[0] self._parse_config(self._task_name) self._update_task_file_paths(self._task_name) def _update_task_file_paths(self, task): self._task_file_path = os.path.abspath(inspect.getfile(self._task_map[task])) self._task_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/task/{task}.yaml") self._train_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/train/{task}PPO.yaml") def _parse_config(self, task, num_envs=None, overrides=None): hydra.core.global_hydra.GlobalHydra.instance().clear() initialize(version_base=None, config_path="cfg") overrides_list = [f"task={task}"] if overrides is not None: overrides_list += overrides if num_envs is None: self._cfg = compose(config_name="config", overrides=overrides_list) else: self._cfg = compose(config_name="config", overrides=overrides_list + [f"num_envs={num_envs}"]) self._cfg_dict = omegaconf_to_dict(self._cfg) self._sim_config = SimConfig(self._cfg_dict) def start_extension( self, menu_name: str, submenu_name: str, name: str, title: str, doc_link: str, overview: str, file_path: str, number_of_extra_frames=1, window_width=550, keep_window_open=False, ): window = ui.Workspace.get_window("Property") if window: window.visible = False window = ui.Workspace.get_window("Render Settings") if window: window.visible = False menu_items = [make_menu_item_description(self._ext_id, name, lambda a=weakref.proxy(self): a._menu_callback())] if menu_name == "" or menu_name is None: self._menu_items = menu_items elif submenu_name == "" or submenu_name is None: self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)] else: self._menu_items = [ MenuItemDescription( name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)] ) ] add_menu_items(self._menu_items, "Isaac Examples") self._task_dropdown = None self._cbs = None self._build_ui( name=name, title=title, doc_link=doc_link, overview=overview, file_path=file_path, number_of_extra_frames=number_of_extra_frames, window_width=window_width, keep_window_open=keep_window_open, ) return def _build_ui( self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width, keep_window_open ): self._window = omni.ui.Window( name, width=window_width, height=0, visible=keep_window_open, dockPreference=ui.DockPreference.LEFT_BOTTOM ) with self._window.frame: self._main_stack = ui.VStack(spacing=5, height=0) with self._main_stack: setup_ui_headers(self._ext_id, file_path, title, doc_link, overview) self._controls_frame = ui.CollapsableFrame( title="World Controls", width=ui.Fraction(1), height=0, collapsed=False, style=get_style(), horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with self._controls_frame: with ui.VStack(style=get_style(), spacing=5, height=0): with ui.HStack(style=get_style()): with ui.VStack(style=get_style(), width=ui.Fraction(20)): dict = { "label": "Select Task", "type": "dropdown", "default_val": 0, "items": self._task_list, "tooltip": "Select a task", "on_clicked_fn": self._on_task_select, } self._task_dropdown = dropdown_builder(**dict) with ui.Frame(tooltip="Open Source Code"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) with ui.Frame(tooltip="Open Task Config"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_cfg_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) with ui.Frame(tooltip="Open Training Config"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._train_cfg_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) dict = { "label": "Number of environments", "tooltip": "Enter the number of environments to construct", "min": 0, "max": 8192, "default_val": self._cfg.task.env.numEnvs, } self._num_envs_int = int_builder(**dict) dict = { "label": "Load Environment", "type": "button", "text": "Load", "tooltip": "Load Environment and Task", "on_clicked_fn": self._on_load_world, } self._load_env_button = btn_builder(**dict) dict = { "label": "Rendering Mode", "type": "dropdown", "default_val": 0, "items": self._render_modes, "tooltip": "Select a rendering mode", "on_clicked_fn": self._on_render_mode_select, } self._render_dropdown = dropdown_builder(**dict) dict = { "label": "Configure Training", "count": 3, "text": ["Resume from Checkpoint", "Test", "Evaluate"], "default_val": [False, False, False], "tooltip": [ "", "Resume training from checkpoint", "Play a trained policy", "Evaluate a policy during training", ], "on_clicked_fn": [ self._on_resume_cb_update, self._on_test_cb_update, self._on_evaluate_cb_update, ], } self._cbs = multi_cb_builder(**dict) dict = { "label": "Load Checkpoint", "tooltip": "Enter path to checkpoint file", "on_clicked_fn": self._on_checkpoint_update, } self._checkpoint_str = str_builder(**dict) dict = { "label": "Train/Test", "count": 2, "text": ["Start", "Stop"], "tooltip": [ "", "Launch new training/inference run", "Terminate current training/inference run", ], "on_clicked_fn": [self._on_train, self._on_train_stop], } self._buttons = multi_btn_builder(**dict) return def create_task(self): headless = self._cfg.headless enable_viewport = "enable_cameras" in self._cfg.task.sim and self._cfg.task.sim.enable_cameras self._env = VecEnvRLGamesMT( headless=headless, sim_device=self._cfg.device_id, enable_livestream=self._cfg.enable_livestream, enable_viewport=enable_viewport, launch_simulation_app=False, ) self._task = initialize_task(self._cfg_dict, self._env, init_sim=False) self._task_initialized = True def _on_task_select(self, value): if self._task_initialized and value != self._task_name: self._task_changed = True self._task_initialized = False self._task_name = value self._parse_config(self._task_name) self._num_envs_int.set_value(self._cfg.task.env.numEnvs) self._update_task_file_paths(self._task_name) def _on_render_mode_select(self, value): if value == self._render_modes[0]: self._viewport.updates_enabled = True window = ui.Workspace.get_window("Viewport") window.visible = True if self._env: self._env._update_viewport = True self._env._render_mode = 0 elif value == self._render_modes[1]: self._viewport.updates_enabled = False window = ui.Workspace.get_window("Viewport") window.visible = False if self._env: self._env._update_viewport = False self._env._render_mode = 1 elif value == self._render_modes[2]: self._viewport.updates_enabled = False window = ui.Workspace.get_window("Viewport") window.visible = False if self._env: self._env._update_viewport = False self._env._render_mode = 2 def _on_render_cb_update(self, value): self._render = value print("updates enabled", value) self._viewport.updates_enabled = value if self._env: self._env._update_viewport = value if value: window = ui.Workspace.get_window("Viewport") window.visible = True else: window = ui.Workspace.get_window("Viewport") window.visible = False def _on_single_env_cb_update(self, value): visibility = "invisible" if value else "inherited" stage = omni.usd.get_context().get_stage() env_root = stage.GetPrimAtPath("/World/envs") if env_root.IsValid(): for i, p in enumerate(env_root.GetChildren()): p.GetAttribute("visibility").Set(visibility) if value: stage.GetPrimAtPath("/World/envs/env_0").GetAttribute("visibility").Set("inherited") env_pos = self._task._env_pos[0].cpu().numpy().tolist() camera_pos = [env_pos[0] + 10, env_pos[1] + 10, 3] camera_target = [env_pos[0], env_pos[1], env_pos[2]] else: camera_pos = [10, 10, 3] camera_target = [0, 0, 0] camera_state = ViewportCameraState("/OmniverseKit_Persp", get_active_viewport()) camera_state.set_position_world(Gf.Vec3d(*camera_pos), True) camera_state.set_target_world(Gf.Vec3d(*camera_target), True) def _on_test_cb_update(self, value): self._test = value if value is True and self._checkpoint_path.strip() == "": self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth") def _on_resume_cb_update(self, value): self._resume = value if value is True and self._checkpoint_path.strip() == "": self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth") def _on_evaluate_cb_update(self, value): self._evaluate = value def _on_checkpoint_update(self, value): self._checkpoint_path = value.get_value_as_string() async def _on_load_world_async(self, use_existing_stage): # initialize task if not initialized if not self._task_initialized or not omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid(): self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int()) self.create_task() else: # update config self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int()) self._task.update_config(self._sim_config) # clear scene # self._env._world.scene.clear() self._env._world._sim_params = self._sim_config.get_physics_params() await self._env._world.initialize_simulation_context_async() set_camera_view(eye=[10, 10, 3], target=[0, 0, 0], camera_prim_path="/OmniverseKit_Persp") if not use_existing_stage: # clear scene self._env._world.scene.clear() # clear environments added to world omni.usd.get_context().get_stage().RemovePrim("/World/collisions") omni.usd.get_context().get_stage().RemovePrim("/World/envs") # create scene await self._env._world.reset_async_set_up_scene() # update num_envs in envs self._env.update_task_params() else: self._task.initialize_views(self._env._world.scene) def _on_load_world(self): # stop simulation before updating stage self._timeline.stop() asyncio.ensure_future(self._on_load_world_async(use_existing_stage=False)) def _on_train_stop(self): if self._task_initialized: asyncio.ensure_future(self._env._world.stop_async()) async def _on_train_async(self, overrides=None): try: # initialize task if not initialized print("task initialized:", self._task_initialized) if not self._task_initialized: # if this is the first launch of the extension, we do not want to re-create stage if stage already exists use_existing_stage = False if omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid(): use_existing_stage = True print(use_existing_stage) await self._on_load_world_async(use_existing_stage) # update config self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int(), overrides=overrides) sim_config = SimConfig(self._cfg_dict) self._task.update_config(sim_config) cfg_dict = omegaconf_to_dict(self._cfg) # sets seed. if seed is -1 will pick a random one self._cfg.seed = set_seed(self._cfg.seed, torch_deterministic=self._cfg.torch_deterministic) cfg_dict["seed"] = self._cfg.seed self._checkpoint_path = self._checkpoint_str.get_value_as_string() if self._resume or self._test: self._cfg.checkpoint = self._checkpoint_path self._cfg.test = self._test self._cfg.evaluation = self._evaluate cfg_dict["checkpoint"] = self._cfg.checkpoint cfg_dict["test"] = self._cfg.test cfg_dict["evaluation"] = self._cfg.evaluation rlg_trainer = RLGTrainer(self._cfg, cfg_dict) if not rlg_trainer._bad_checkpoint: trainer = Trainer(rlg_trainer, self._env) await self._env._world.reset_async_no_set_up_scene() self._env._render_mode = self._render_dropdown.get_item_value_model().as_int await self._env.run(trainer) await omni.kit.app.get_app().next_update_async() except Exception as e: print(traceback.format_exc()) finally: self._is_training = False def _on_train(self): # stop simulation if still running self._timeline.stop() self._on_render_mode_select(self._render_modes[self._render_dropdown.get_item_value_model().as_int]) if not self._is_training: self._is_training = True asyncio.ensure_future(self._on_train_async()) return def _menu_callback(self): self._window.visible = not self._window.visible return def _on_window(self, status): return def on_shutdown(self): self._extra_frames = [] if self._menu_items is not None: self._sample_window_cleanup() self.shutdown_cleanup() global ext_instance ext_instance = None return def shutdown_cleanup(self): return def _sample_window_cleanup(self): remove_menu_items(self._menu_items, "Isaac Examples") self._window = None self._menu_items = None self._buttons = None self._load_env_button = None self._task_dropdown = None self._cbs = None self._checkpoint_str = None return def get_instance(): return ext_instance
22,236
Python
42.262646
155
0.533189
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/__init__.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import traceback try: from .extension import RLExtension, get_instance # import omniisaacgymenvs.tests except Exception as e: pass # print(e) # print(traceback.format_exc())
1,753
Python
46.405404
80
0.775242
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/envs/vec_env_rlgames_mt.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch from omni.isaac.gym.vec_env import TaskStopException, VecEnvMT from .vec_env_rlgames import VecEnvRLGames # VecEnv Wrapper for RL training class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT): def _parse_data(self, data): self._obs = data["obs"] self._rew = data["rew"].to(self._task.rl_device) self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._resets = data["reset"].to(self._task.rl_device) self._extras = data["extras"] def step(self, actions): if self._stop: raise TaskStopException() if self._task.randomize_actions: actions = self._task._dr_randomizer.apply_actions_randomization( actions=actions, reset_buf=self._task.reset_buf ) actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device) self.send_actions(actions) data = self.get_data() if self._task.randomize_observations: self._obs = self._task._dr_randomizer.apply_observations_randomization( observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf ) self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) obs_dict = {} obs_dict["obs"] = self._obs obs_dict["states"] = self._states return obs_dict, self._rew, self._resets, self._extras
3,109
Python
42.194444
118
0.705693
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/envs/vec_env_rlgames.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from datetime import datetime import numpy as np import torch from omni.isaac.gym.vec_env import VecEnvBase # VecEnv Wrapper for RL training class VecEnvRLGames(VecEnvBase): def _process_data(self): self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._rew = self._rew.to(self._task.rl_device) self._states = torch.clamp(self._states, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._resets = self._resets.to(self._task.rl_device) self._extras = self._extras def set_task(self, task, backend="numpy", sim_params=None, init_sim=True, rendering_dt=1.0 / 60.0) -> None: super().set_task(task, backend, sim_params, init_sim, rendering_dt) self.num_states = self._task.num_states self.state_space = self._task.state_space def step(self, actions): if self._task.randomize_actions: actions = self._task._dr_randomizer.apply_actions_randomization( actions=actions, reset_buf=self._task.reset_buf ) actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device) self._task.pre_physics_step(actions) if (self.sim_frame_count + self._task.control_frequency_inv) % self._task.rendering_interval == 0: for _ in range(self._task.control_frequency_inv - 1): self._world.step(render=False) self.sim_frame_count += 1 self._world.step(render=self._render) self.sim_frame_count += 1 else: for _ in range(self._task.control_frequency_inv): self._world.step(render=False) self.sim_frame_count += 1 self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step() if self._task.randomize_observations: self._obs = self._task._dr_randomizer.apply_observations_randomization( observations=self._obs.to(device=self._task.rl_device), reset_buf=self._task.reset_buf ) self._states = self._task.get_states() self._process_data() obs_dict = {"obs": self._obs, "states": self._states} return obs_dict, self._rew, self._resets, self._extras def reset(self, seed=None, options=None): """Resets the task and applies default zero actions to recompute observations and states.""" now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print(f"[{now}] Running RL reset") self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.rl_device) obs_dict, _, _, _ = self.step(actions) return obs_dict
4,328
Python
43.628866
116
0.677218
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/allegro_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.allegro_hand import AllegroHand from omniisaacgymenvs.robots.articulations.views.allegro_hand_view import AllegroHandView from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask class AllegroHandTask(InHandManipulationTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) InHandManipulationTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.object_type = self._task_cfg["env"]["objectType"] assert self.object_type in ["block"] self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full"]): raise Exception("Unknown type of observations!\nobservationType should be one of: [full_no_vel, full]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full_no_vel": 50, "full": 72, } self.object_scale = torch.tensor([1.0, 1.0, 1.0]) self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 16 self._num_states = 0 InHandManipulationTask.update_config(self) def get_starting_positions(self): self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device) self.hand_start_orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782], device=self.device) self.pose_dy, self.pose_dz = -0.2, 0.06 def get_hand(self): allegro_hand = AllegroHand( prim_path=self.default_zero_env_path + "/allegro_hand", name="allegro_hand", translation=self.hand_start_translation, orientation=self.hand_start_orientation, ) self._sim_config.apply_articulation_settings( "allegro_hand", get_prim_at_path(allegro_hand.prim_path), self._sim_config.parse_actor_config("allegro_hand"), ) allegro_hand_prim = self._stage.GetPrimAtPath(allegro_hand.prim_path) allegro_hand.set_allegro_hand_properties(stage=self._stage, allegro_hand_prim=allegro_hand_prim) allegro_hand.set_motor_control_mode( stage=self._stage, allegro_hand_path=self.default_zero_env_path + "/allegro_hand" ) def get_hand_view(self, scene): return AllegroHandView(prim_paths_expr="/World/envs/.*/allegro_hand", name="allegro_hand_view") def get_observations(self): self.get_object_goal_observations() self.hand_dof_pos = self._hands.get_joint_positions(clone=False) self.hand_dof_vel = self._hands.get_joint_velocities(clone=False) if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() else: print("Unkown observations type!") observations = {self._hands.name: {"obs_buf": self.obs_buf}} return observations def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, 16:19] = self.object_pos self.obs_buf[:, 19:23] = self.object_rot self.obs_buf[:, 23:26] = self.goal_pos self.obs_buf[:, 26:30] = self.goal_rot self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 34:50] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 32:35] = self.object_pos self.obs_buf[:, 35:39] = self.object_rot self.obs_buf[:, 39:42] = self.object_linvel self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 45:48] = self.goal_pos self.obs_buf[:, 48:52] = self.goal_rot self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 56:72] = self.actions
6,329
Python
42.655172
115
0.658872
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/ball_balance.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.maths import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.balance_bot import BalanceBot from pxr import PhysxSchema class BallBalanceTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 12 + 12 self._num_actions = 3 self.anchored = False RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._dt = self._task_cfg["sim"]["dt"] self._table_position = torch.tensor([0, 0, 0.56]) self._ball_position = torch.tensor([0.0, 0.0, 1.0]) self._ball_radius = 0.1 self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] def set_up_scene(self, scene) -> None: self.get_balance_table() self.add_ball() super().set_up_scene(scene, replicate_physics=False) self.set_up_table_anchors() self._balance_bots = ArticulationView( prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False ) scene.add(self._balance_bots) self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False ) scene.add(self._balls) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("balance_bot_view"): scene.remove_object("balance_bot_view", registry_only=True) if scene.object_exists("ball_view"): scene.remove_object("ball_view", registry_only=True) self._balance_bots = ArticulationView( prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False ) scene.add(self._balance_bots) self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False ) scene.add(self._balls) def get_balance_table(self): balance_table = BalanceBot( prim_path=self.default_zero_env_path + "/BalanceBot", name="BalanceBot", translation=self._table_position ) self._sim_config.apply_articulation_settings( "table", get_prim_at_path(balance_table.prim_path), self._sim_config.parse_actor_config("table") ) def add_ball(self): ball = DynamicSphere( prim_path=self.default_zero_env_path + "/Ball/ball", translation=self._ball_position, name="ball_0", radius=self._ball_radius, color=torch.tensor([0.9, 0.6, 0.2]), ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) def set_up_table_anchors(self): from pxr import Gf height = 0.08 stage = get_current_stage() for i in range(self._num_envs): base_path = f"{self.default_base_env_path}/env_{i}/BalanceBot" for j, leg_offset in enumerate([(0.4, 0, height), (-0.2, 0.34641, 0), (-0.2, -0.34641, 0)]): # fix the legs to ground leg_path = f"{base_path}/lower_leg{j}" ground_joint_path = leg_path + "_ground" env_pos = stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}").GetAttribute("xformOp:translate").Get() anchor_pos = env_pos + Gf.Vec3d(*leg_offset) self.fix_to_ground(stage, ground_joint_path, leg_path, anchor_pos) def fix_to_ground(self, stage, joint_path, prim_path, anchor_pos): from pxr import UsdPhysics, Gf # D6 fixed joint d6FixedJoint = UsdPhysics.Joint.Define(stage, joint_path) d6FixedJoint.CreateBody0Rel().SetTargets(["/World/defaultGroundPlane"]) d6FixedJoint.CreateBody1Rel().SetTargets([prim_path]) d6FixedJoint.CreateLocalPos0Attr().Set(anchor_pos) d6FixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0))) d6FixedJoint.CreateLocalPos1Attr().Set(Gf.Vec3f(0, 0, 0.18)) d6FixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0))) # lock all DOF (lock - low is greater than high) d6Prim = stage.GetPrimAtPath(joint_path) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) def get_observations(self) -> dict: ball_positions, ball_orientations = self._balls.get_world_poses(clone=False) ball_positions = ball_positions[:, 0:3] - self._env_pos ball_velocities = self._balls.get_velocities(clone=False) ball_linvels = ball_velocities[:, 0:3] ball_angvels = ball_velocities[:, 3:6] dof_pos = self._balance_bots.get_joint_positions(clone=False) dof_vel = self._balance_bots.get_joint_velocities(clone=False) sensor_force_torques = self._balance_bots.get_measured_joint_forces(joint_indices=self._sensor_indices) # (num_envs, num_sensors, 6) self.obs_buf[..., 0:3] = dof_pos[..., self.actuated_dof_indices] self.obs_buf[..., 3:6] = dof_vel[..., self.actuated_dof_indices] self.obs_buf[..., 6:9] = ball_positions self.obs_buf[..., 9:12] = ball_linvels self.obs_buf[..., 12:15] = sensor_force_torques[..., 0] / 20.0 self.obs_buf[..., 15:18] = sensor_force_torques[..., 3] / 20.0 self.obs_buf[..., 18:21] = sensor_force_torques[..., 4] / 20.0 self.obs_buf[..., 21:24] = sensor_force_torques[..., 5] / 20.0 self.ball_positions = ball_positions self.ball_linvels = ball_linvels observations = {"ball_balance": {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) # update position targets from actions self.dof_position_targets[..., self.actuated_dof_indices] += ( self._dt * self._action_speed_scale * actions.to(self.device) ) self.dof_position_targets[:] = tensor_clamp( self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits ) # reset position targets for reset envs self.dof_position_targets[reset_env_ids] = 0 self._balance_bots.set_joint_position_targets(self.dof_position_targets) # .clone()) def reset_idx(self, env_ids): num_resets = len(env_ids) env_ids_32 = env_ids.type(torch.int32) env_ids_64 = env_ids.type(torch.int64) min_d = 0.001 # min horizontal dist from origin max_d = 0.4 # max horizontal dist from origin min_height = 1.0 max_height = 2.0 min_horizontal_speed = 0 max_horizontal_speed = 2 dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device) dirs = torch_random_dir_2((num_resets, 1), self._device) hpos = dists * dirs speedscales = (dists - min_d) / (max_d - min_d) hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device) hvels = -speedscales * hspeeds * dirs vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze() ball_pos = self.initial_ball_pos.clone() ball_rot = self.initial_ball_rot.clone() # position ball_pos[env_ids_64, 0:2] += hpos[..., 0:2] ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze() # rotation ball_rot[env_ids_64, 0] = 1 ball_rot[env_ids_64, 1:] = 0 ball_velocities = self.initial_ball_velocities.clone() # linear ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2] ball_velocities[env_ids_64, 2] = vspeeds # angular ball_velocities[env_ids_64, 3:6] = 0 # reset root state for bbots and balls in selected envs self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32) self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32) # reset root pose and velocity self._balance_bots.set_world_poses( self.initial_bot_pos[env_ids_64].clone(), self.initial_bot_rot[env_ids_64].clone(), indices=env_ids_32 ) self._balance_bots.set_velocities(self.initial_bot_velocities[env_ids_64].clone(), indices=env_ids_32) # reset DOF states for bbots in selected envs self._balance_bots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): dof_limits = self._balance_bots.get_dof_limits() self.bbot_dof_lower_limits, self.bbot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device)) self.initial_dof_positions = self._balance_bots.get_joint_positions() self.initial_bot_pos, self.initial_bot_rot = self._balance_bots.get_world_poses() # self.initial_bot_pos[..., 2] = 0.559 # tray_height self.initial_bot_velocities = self._balance_bots.get_velocities() self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses() self.initial_ball_velocities = self._balls.get_velocities() self.dof_position_targets = torch.zeros( (self.num_envs, self._balance_bots.num_dof), dtype=torch.float32, device=self._device, requires_grad=False ) actuated_joints = ["lower_leg0", "lower_leg1", "lower_leg2"] self.actuated_dof_indices = torch.tensor( [self._balance_bots._dof_indices[j] for j in actuated_joints], device=self._device, dtype=torch.long ) force_links = ["upper_leg0", "upper_leg1", "upper_leg2"] self._sensor_indices = torch.tensor( [self._balance_bots._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) def calculate_metrics(self) -> None: ball_dist = torch.sqrt( self.ball_positions[..., 0] * self.ball_positions[..., 0] + (self.ball_positions[..., 2] - 0.7) * (self.ball_positions[..., 2] - 0.7) + (self.ball_positions[..., 1]) * self.ball_positions[..., 1] ) ball_speed = torch.sqrt( self.ball_linvels[..., 0] * self.ball_linvels[..., 0] + self.ball_linvels[..., 1] * self.ball_linvels[..., 1] + self.ball_linvels[..., 2] * self.ball_linvels[..., 2] ) pos_reward = 1.0 / (1.0 + ball_dist) speed_reward = 1.0 / (1.0 + ball_speed) self.rew_buf[:] = pos_reward * speed_reward def is_done(self) -> None: reset = torch.where( self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf ) reset = torch.where( self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset ) self.reset_buf[:] = reset
13,958
Python
44.174757
140
0.630391
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/cartpole_camera.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from gym import spaces import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.tasks.cartpole import CartpoleTask from omniisaacgymenvs.robots.articulations.cartpole import Cartpole class CartpoleCameraTask(CartpoleTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._max_episode_length = 500 self._num_observations = 4 self._num_actions = 1 # use multi-dimensional observation for camera RGB self.observation_space = spaces.Box( np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * -np.Inf, np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * np.Inf) RLTask.__init__(self, name, env) def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0]) self._reset_dist = self._task_cfg["env"]["resetDist"] self._max_push_effort = self._task_cfg["env"]["maxEffort"] self.camera_type = self._task_cfg["env"].get("cameraType", 'rgb') self.camera_width = self._task_cfg["env"]["cameraWidth"] self.camera_height = self._task_cfg["env"]["cameraHeight"] self.camera_channels = 3 self._export_images = self._task_cfg["env"]["exportImages"] def cleanup(self) -> None: # initialize remaining buffers RLTask.cleanup(self) # override observation buffer for camera data self.obs_buf = torch.zeros( (self.num_envs, self.camera_width, self.camera_height, 3), device=self.device, dtype=torch.float) def set_up_scene(self, scene) -> None: self.get_cartpole() RLTask.set_up_scene(self, scene) # start replicator to capture image data self.rep.orchestrator._orchestrator._is_started = True # set up cameras self.render_products = [] env_pos = self._env_pos.cpu() for i in range(self._num_envs): camera = self.rep.create.camera( position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55)) render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height)) self.render_products.append(render_product) # initialize pytorch writer for vectorized collection self.pytorch_listener = self.PytorchListener() self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter") self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda") self.pytorch_writer.attach(self.render_products) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) return def get_observations(self) -> dict: dof_pos = self._cartpoles.get_joint_positions(clone=False) dof_vel = self._cartpoles.get_joint_velocities(clone=False) self.cart_pos = dof_pos[:, self._cart_dof_idx] self.cart_vel = dof_vel[:, self._cart_dof_idx] self.pole_pos = dof_pos[:, self._pole_dof_idx] self.pole_vel = dof_vel[:, self._pole_dof_idx] # retrieve RGB data from all render products images = self.pytorch_listener.get_rgb_data() if images is not None: if self._export_images: from torchvision.utils import save_image, make_grid img = images/255 save_image(make_grid(img, nrows = 2), 'cartpole_export.png') self.obs_buf = torch.swapaxes(images, 1, 3).clone().float()/255.0 else: print("Image tensor is NONE!") return self.obs_buf
5,824
Python
41.518248
119
0.67342
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/anymal_terrain.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.anymal import Anymal from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView from omniisaacgymenvs.tasks.utils.anymal_terrain_generator import * from omniisaacgymenvs.utils.terrain_utils.terrain_utils import * from pxr import UsdLux, UsdPhysics class AnymalTerrainTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.height_samples = None self.custom_origins = False self.init_done = False self._env_spacing = 0.0 self._num_observations = 188 self._num_actions = 12 self.update_config(sim_config) RLTask.__init__(self, name, env) self.height_points = self.init_height_points() self.measured_heights = None # joint positions offsets self.default_dof_pos = torch.zeros( (self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False ) # reward episode sums torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = { "lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(), "orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(), "air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros(), } return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config # normalization self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"] self.height_meas_scale = self._task_cfg["env"]["learn"]["heightMeasurementScale"] self.action_scale = self._task_cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["termination"] = self._task_cfg["env"]["learn"]["terminalReward"] self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"] self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["ang_vel_xy"] = self._task_cfg["env"]["learn"]["angularVelocityXYRewardScale"] self.rew_scales["orient"] = self._task_cfg["env"]["learn"]["orientationRewardScale"] self.rew_scales["torque"] = self._task_cfg["env"]["learn"]["torqueRewardScale"] self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["base_height"] = self._task_cfg["env"]["learn"]["baseHeightRewardScale"] self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["hip"] = self._task_cfg["env"]["learn"]["hipRewardScale"] self.rew_scales["fallen_over"] = self._task_cfg["env"]["learn"]["fallenOverRewardScale"] # command ranges self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"] # base init state pos = self._task_cfg["env"]["baseInitState"]["pos"] rot = self._task_cfg["env"]["baseInitState"]["rot"] v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"] v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"] self.base_init_state = pos + rot + v_lin + v_ang # default joint positions self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"] # other self.decimation = self._task_cfg["env"]["control"]["decimation"] self.dt = self.decimation * self._task_cfg["sim"]["dt"] self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) self.push_interval = int(self._task_cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5) self.Kp = self._task_cfg["env"]["control"]["stiffness"] self.Kd = self._task_cfg["env"]["control"]["damping"] self.curriculum = self._task_cfg["env"]["terrain"]["curriculum"] self.base_threshold = 0.2 self.knee_threshold = 0.1 for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt self._num_envs = self._task_cfg["env"]["numEnvs"] self._task_cfg["sim"]["default_physics_material"]["static_friction"] = self._task_cfg["env"]["terrain"][ "staticFriction" ] self._task_cfg["sim"]["default_physics_material"]["dynamic_friction"] = self._task_cfg["env"]["terrain"][ "dynamicFriction" ] self._task_cfg["sim"]["default_physics_material"]["restitution"] = self._task_cfg["env"]["terrain"][ "restitution" ] self._task_cfg["sim"]["add_ground_plane"] = False def _get_noise_scale_vec(self, cfg): noise_vec = torch.zeros_like(self.obs_buf[0]) self.add_noise = self._task_cfg["env"]["learn"]["addNoise"] noise_level = self._task_cfg["env"]["learn"]["noiseLevel"] noise_vec[:3] = self._task_cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale noise_vec[3:6] = self._task_cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale noise_vec[6:9] = self._task_cfg["env"]["learn"]["gravityNoise"] * noise_level noise_vec[9:12] = 0.0 # commands noise_vec[12:24] = self._task_cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale noise_vec[24:36] = self._task_cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale noise_vec[36:176] = ( self._task_cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale ) noise_vec[176:188] = 0.0 # previous actions return noise_vec def init_height_points(self): # 1mx1.6m rectangle (without center line) y = 0.1 * torch.tensor( [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False ) # 10-50cm on each side x = 0.1 * torch.tensor( [-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False ) # 20-80cm on each side grid_x, grid_y = torch.meshgrid(x, y, indexing='ij') self.num_height_points = grid_x.numel() points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False) points[:, :, 0] = grid_x.flatten() points[:, :, 1] = grid_y.flatten() return points def _create_trimesh(self, create_mesh=True): self.terrain = Terrain(self._task_cfg["env"]["terrain"], num_robots=self.num_envs) vertices = self.terrain.vertices triangles = self.terrain.triangles position = torch.tensor([-self.terrain.border_size, -self.terrain.border_size, 0.0]) if create_mesh: add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position) self.height_samples = ( torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device) ) def set_up_scene(self, scene) -> None: self._stage = get_current_stage() self.get_terrain() self.get_anymal() super().set_up_scene(scene, collision_filter_global_paths=["/World/terrain"]) self._anymals = AnymalView( prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True ) scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) def initialize_views(self, scene): # initialize terrain variables even if we do not need to re-create the terrain mesh self.get_terrain(create_mesh=False) super().initialize_views(scene) if scene.object_exists("anymal_view"): scene.remove_object("anymal_view", registry_only=True) if scene.object_exists("knees_view"): scene.remove_object("knees_view", registry_only=True) if scene.object_exists("base_view"): scene.remove_object("base_view", registry_only=True) self._anymals = AnymalView( prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True ) scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) def get_terrain(self, create_mesh=True): self.env_origins = torch.zeros((self.num_envs, 3), device=self.device, requires_grad=False) if not self.curriculum: self._task_cfg["env"]["terrain"]["maxInitMapLevel"] = self._task_cfg["env"]["terrain"]["numLevels"] - 1 self.terrain_levels = torch.randint( 0, self._task_cfg["env"]["terrain"]["maxInitMapLevel"] + 1, (self.num_envs,), device=self.device ) self.terrain_types = torch.randint( 0, self._task_cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device ) self._create_trimesh(create_mesh=create_mesh) self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float) def get_anymal(self): anymal_translation = torch.tensor([0.0, 0.0, 0.66]) anymal_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) anymal = Anymal( prim_path=self.default_zero_env_path + "/anymal", name="anymal", translation=anymal_translation, orientation=anymal_orientation, ) self._sim_config.apply_articulation_settings( "anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("anymal") ) anymal.set_anymal_properties(self._stage, anymal.prim) anymal.prepare_contacts(self._stage, anymal.prim) self.dof_names = anymal.dof_names for i in range(self.num_actions): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle def post_reset(self): self.base_init_state = torch.tensor( self.base_init_state, dtype=torch.float, device=self.device, requires_grad=False ) self.timeout_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) # initialize some data used later on self.up_axis_idx = 2 self.common_step_counter = 0 self.extras = {} self.noise_scale_vec = self._get_noise_scale_vec(self._task_cfg) self.commands = torch.zeros( self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False ) # x vel, y vel, yaw vel, heading self.commands_scale = torch.tensor( [self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False, ) self.gravity_vec = torch.tensor( get_axis_params(-1.0, self.up_axis_idx), dtype=torch.float, device=self.device ).repeat((self.num_envs, 1)) self.forward_vec = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float, device=self.device).repeat( (self.num_envs, 1) ) self.torques = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.actions = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.last_actions = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) self.last_dof_vel = torch.zeros((self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.num_envs): self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]] self.num_dof = self._anymals.num_dof self.dof_pos = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device) self.dof_vel = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device) self.base_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device) self.base_quat = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device) self.base_velocities = torch.zeros((self.num_envs, 6), dtype=torch.float, device=self.device) self.knee_pos = torch.zeros((self.num_envs * 4, 3), dtype=torch.float, device=self.device) self.knee_quat = torch.zeros((self.num_envs * 4, 4), dtype=torch.float, device=self.device) indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) self.init_done = True def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities self.update_terrain_level(env_ids) self.base_pos[env_ids] = self.base_init_state[0:3] self.base_pos[env_ids, 0:3] += self.env_origins[env_ids] self.base_pos[env_ids, 0:2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device) self.base_quat[env_ids] = self.base_init_state[3:7] self.base_velocities[env_ids] = self.base_init_state[7:] self._anymals.set_world_poses( positions=self.base_pos[env_ids].clone(), orientations=self.base_quat[env_ids].clone(), indices=indices ) self._anymals.set_velocities(velocities=self.base_velocities[env_ids].clone(), indices=indices) self._anymals.set_joint_positions(positions=self.dof_pos[env_ids].clone(), indices=indices) self._anymals.set_joint_velocities(velocities=self.dof_vel[env_ids].clone(), indices=indices) self.commands[env_ids, 0] = torch_rand_float( self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids, 1] = torch_rand_float( self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids, 3] = torch_rand_float( self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze( 1 ) # set small commands to zero self.last_actions[env_ids] = 0.0 self.last_dof_vel[env_ids] = 0.0 self.feet_air_time[env_ids] = 0.0 self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"]["rew_" + key] = ( torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s ) self.episode_sums[key][env_ids] = 0.0 self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float()) def update_terrain_level(self, env_ids): if not self.init_done or not self.curriculum: # do not change on initial reset return root_pos, _ = self._anymals.get_world_poses(clone=False) distance = torch.norm(root_pos[env_ids, :2] - self.env_origins[env_ids, :2], dim=1) self.terrain_levels[env_ids] -= 1 * ( distance < torch.norm(self.commands[env_ids, :2]) * self.max_episode_length_s * 0.25 ) self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2) self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]] def refresh_dof_state_tensors(self): self.dof_pos = self._anymals.get_joint_positions(clone=False) self.dof_vel = self._anymals.get_joint_velocities(clone=False) def refresh_body_state_tensors(self): self.base_pos, self.base_quat = self._anymals.get_world_poses(clone=False) self.base_velocities = self._anymals.get_velocities(clone=False) self.knee_pos, self.knee_quat = self._anymals._knees.get_world_poses(clone=False) def pre_physics_step(self, actions): if not self._env._world.is_playing(): return self.actions = actions.clone().to(self.device) for i in range(self.decimation): if self._env._world.is_playing(): torques = torch.clip( self.Kp * (self.action_scale * self.actions + self.default_dof_pos - self.dof_pos) - self.Kd * self.dof_vel, -80.0, 80.0, ) self._anymals.set_joint_efforts(torques) self.torques = torques SimulationContext.step(self._env._world, render=False) self.refresh_dof_state_tensors() def post_physics_step(self): self.progress_buf[:] += 1 if self._env._world.is_playing(): self.refresh_dof_state_tensors() self.refresh_body_state_tensors() self.common_step_counter += 1 if self.common_step_counter % self.push_interval == 0: self.push_robots() # prepare quantities self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3]) self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6]) self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec) forward = quat_apply(self.base_quat, self.forward_vec) heading = torch.atan2(forward[:, 1], forward[:, 0]) self.commands[:, 2] = torch.clip(0.5 * wrap_to_pi(self.commands[:, 3] - heading), -1.0, 1.0) self.check_termination() self.get_states() self.calculate_metrics() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.get_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] return self.obs_buf, self.rew_buf, self.reset_buf, self.extras def push_robots(self): self.base_velocities[:, 0:2] = torch_rand_float( -1.0, 1.0, (self.num_envs, 2), device=self.device ) # lin vel x/y self._anymals.set_velocities(self.base_velocities) def check_termination(self): self.timeout_buf = torch.where( self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.timeout_buf), torch.zeros_like(self.timeout_buf), ) knee_contact = ( torch.norm(self._anymals._knees.get_net_contact_forces(clone=False).view(self._num_envs, 4, 3), dim=-1) > 1.0 ) self.has_fallen = (torch.norm(self._anymals._base.get_net_contact_forces(clone=False), dim=1) > 1.0) | ( torch.sum(knee_contact, dim=-1) > 1.0 ) self.reset_buf = self.has_fallen.clone() self.reset_buf = torch.where(self.timeout_buf.bool(), torch.ones_like(self.reset_buf), self.reset_buf) def calculate_metrics(self): # velocity tracking reward lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"] # other base velocity penalties rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"] rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"] # orientation penalty rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"] # base height penalty rew_base_height = torch.square(self.base_pos[:, 2] - 0.52) * self.rew_scales["base_height"] # torque penalty rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"] # joint acc penalty rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"] # fallen over penalty rew_fallen_over = self.has_fallen * self.rew_scales["fallen_over"] # action rate penalty rew_action_rate = ( torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"] ) # cosmetic penalty for hip motion rew_hip = ( torch.sum(torch.abs(self.dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["hip"] ) # total reward self.rew_buf = ( rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height + rew_torque + rew_joint_acc + rew_action_rate + rew_hip + rew_fallen_over ) self.rew_buf = torch.clip(self.rew_buf, min=0.0, max=None) # add termination reward self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf # log episode reward sums self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy self.episode_sums["ang_vel_z"] += rew_ang_vel_z self.episode_sums["lin_vel_z"] += rew_lin_vel_z self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy self.episode_sums["orient"] += rew_orient self.episode_sums["torques"] += rew_torque self.episode_sums["joint_acc"] += rew_joint_acc self.episode_sums["action_rate"] += rew_action_rate self.episode_sums["base_height"] += rew_base_height self.episode_sums["hip"] += rew_hip def get_observations(self): self.measured_heights = self.get_heights() heights = ( torch.clip(self.base_pos[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.0) * self.height_meas_scale ) self.obs_buf = torch.cat( ( self.base_lin_vel * self.lin_vel_scale, self.base_ang_vel * self.ang_vel_scale, self.projected_gravity, self.commands[:, :3] * self.commands_scale, self.dof_pos * self.dof_pos_scale, self.dof_vel * self.dof_vel_scale, heights, self.actions, ), dim=-1, ) def get_ground_heights_below_knees(self): points = self.knee_pos.reshape(self.num_envs, 4, 3) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale def get_ground_heights_below_base(self): points = self.base_pos.reshape(self.num_envs, 1, 3) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale def get_heights(self, env_ids=None): if env_ids: points = quat_apply_yaw( self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids] ) + (self.base_pos[env_ids, 0:3]).unsqueeze(1) else: points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + ( self.base_pos[:, 0:3] ).unsqueeze(1) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale @torch.jit.script def quat_apply_yaw(quat, vec): quat_yaw = quat.clone().view(-1, 4) quat_yaw[:, 1:3] = 0.0 quat_yaw = normalize(quat_yaw) return quat_apply(quat_yaw, vec) @torch.jit.script def wrap_to_pi(angles): angles %= 2 * np.pi angles -= 2 * np.pi * (angles > np.pi) return angles def get_axis_params(value, axis_idx, x_value=0.0, dtype=float, n_dims=3): """construct arguments to `Vec` according to axis index.""" zs = np.zeros((n_dims,)) assert axis_idx < n_dims, "the axis dim should be within the vector dimensions" zs[axis_idx] = 1.0 params = np.where(zs == 1.0, value, zs) params[0] = x_value return list(params.astype(dtype))
29,337
Python
45.568254
120
0.609128
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/shadow_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.shadow_hand import ShadowHand from omniisaacgymenvs.robots.articulations.views.shadow_hand_view import ShadowHandView from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask class ShadowHandTask(InHandManipulationTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) InHandManipulationTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.object_type = self._task_cfg["env"]["objectType"] assert self.object_type in ["block"] self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]" ) print("Obs type:", self.obs_type) self.num_obs_dict = { "openai": 42, "full_no_vel": 77, "full": 157, "full_state": 187, } self.asymmetric_obs = self._task_cfg["env"]["asymmetric_observations"] self.use_vel_obs = False self.fingertip_obs = True self.fingertips = [ "robot0:ffdistal", "robot0:mfdistal", "robot0:rfdistal", "robot0:lfdistal", "robot0:thdistal", ] self.num_fingertips = len(self.fingertips) self.object_scale = torch.tensor([1.0, 1.0, 1.0]) self.force_torque_obs_scale = 10.0 num_states = 0 if self.asymmetric_obs: num_states = 187 self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 20 self._num_states = num_states InHandManipulationTask.update_config(self) def get_starting_positions(self): self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device) self.hand_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.pose_dy, self.pose_dz = -0.39, 0.10 def get_hand(self): shadow_hand = ShadowHand( prim_path=self.default_zero_env_path + "/shadow_hand", name="shadow_hand", translation=self.hand_start_translation, orientation=self.hand_start_orientation, ) self._sim_config.apply_articulation_settings( "shadow_hand", get_prim_at_path(shadow_hand.prim_path), self._sim_config.parse_actor_config("shadow_hand"), ) shadow_hand.set_shadow_hand_properties(stage=self._stage, shadow_hand_prim=shadow_hand.prim) shadow_hand.set_motor_control_mode(stage=self._stage, shadow_hand_path=shadow_hand.prim_path) def get_hand_view(self, scene): hand_view = ShadowHandView(prim_paths_expr="/World/envs/.*/shadow_hand", name="shadow_hand_view") scene.add(hand_view._fingers) return hand_view def get_observations(self): self.get_object_goal_observations() self.fingertip_pos, self.fingertip_rot = self._hands._fingers.get_world_poses(clone=False) self.fingertip_pos -= self._env_pos.repeat((1, self.num_fingertips)).reshape( self.num_envs * self.num_fingertips, 3 ) self.fingertip_velocities = self._hands._fingers.get_velocities(clone=False) self.hand_dof_pos = self._hands.get_joint_positions(clone=False) self.hand_dof_vel = self._hands.get_joint_velocities(clone=False) if self.obs_type == "full_state" or self.asymmetric_obs: self.vec_sensor_tensor = self._hands.get_measured_joint_forces( joint_indices=self._hands._sensor_indices ).view(self._num_envs, -1) if self.obs_type == "openai": self.compute_fingertip_observations(True) elif self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state(False) else: print("Unkown observations type!") if self.asymmetric_obs: self.compute_full_state(True) observations = {self._hands.name: {"obs_buf": self.obs_buf}} return observations def compute_fingertip_observations(self, no_vel=False): if no_vel: # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 # Fingertip positions # Object Position, but not orientation # Relative target orientation # 3*self.num_fingertips = 15 self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 15:18] = self.object_pos self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 22:42] = self.actions else: # 13*self.num_fingertips = 65 self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 15:35] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[:, 35:65] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[:, 65:68] = self.object_pos self.obs_buf[:, 68:72] = self.object_rot self.obs_buf[:, 72:75] = self.object_linvel self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 78:81] = self.goal_pos self.obs_buf[:, 81:85] = self.goal_rot self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 89:109] = self.actions def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, 24:37] = self.object_pos self.obs_buf[:, 27:31] = self.object_rot self.obs_buf[:, 31:34] = self.goal_pos self.obs_buf[:, 34:38] = self.goal_rot self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 57:77] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 48:51] = self.object_pos self.obs_buf[:, 51:55] = self.object_rot self.obs_buf[:, 55:58] = self.object_linvel self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 61:64] = self.goal_pos self.obs_buf[:, 64:68] = self.goal_rot self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # (7+6)*self.num_fingertips = 65 self.obs_buf[:, 72:87] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 87:107] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[:, 107:137] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[:, 137:157] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.states_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel # self.states_buf[:, 2*self.num_hand_dofs:3*self.num_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 2 * self.num_hand_dofs # 48 self.states_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos self.states_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot self.states_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.states_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos self.states_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot self.states_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul( self.object_rot, quat_conjugate(self.goal_rot) ) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 72 self.states_buf[ :, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips ] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips ] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips ] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques ] = (self.force_torque_obs_scale * self.vec_sensor_tensor) # obs_end = 72 + 65 + 30 = 167 # obs_total = obs_end + num_actions = 187 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.states_buf[:, obs_end : obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 2 * self.num_hand_dofs : 3 * self.num_hand_dofs] = ( self.force_torque_obs_scale * self.dof_force_tensor ) obj_obs_start = 3 * self.num_hand_dofs # 48 self.obs_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos self.obs_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot self.obs_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.obs_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos self.obs_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot self.obs_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul( self.object_rot, quat_conjugate(self.goal_rot) ) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 72 self.obs_buf[ :, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips ] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips ] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips ] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques ] = (self.force_torque_obs_scale * self.vec_sensor_tensor) # obs_end = 96 + 65 + 30 = 167 # obs_total = obs_end + num_actions = 187 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end : obs_end + self.num_actions] = self.actions
15,107
Python
48.211726
129
0.609188
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/franka_cabinet.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import math import numpy as np import torch from omni.isaac.cloner import Cloner from omni.isaac.core.objects import DynamicCuboid from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.rotations import * from omni.isaac.core.utils.torch.transformations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.cabinet import Cabinet from omniisaacgymenvs.robots.articulations.franka import Franka from omniisaacgymenvs.robots.articulations.views.cabinet_view import CabinetView from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView from pxr import Usd, UsdGeom class FrankaCabinetTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self.distX_offset = 0.04 self.dt = 1 / 60.0 self._num_observations = 23 self._num_actions = 9 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.num_props = self._task_cfg["env"]["numProps"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] def set_up_scene(self, scene) -> None: self.get_franka() self.get_cabinet() if self.num_props > 0: self.get_props() super().set_up_scene(scene, filter_collisions=False) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view") scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._cabinets) scene.add(self._cabinets._drawers) if self.num_props > 0: self._props = RigidPrimView( prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False ) scene.add(self._props) self.init_data() return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("franka_view"): scene.remove_object("franka_view", registry_only=True) if scene.object_exists("hands_view"): scene.remove_object("hands_view", registry_only=True) if scene.object_exists("lfingers_view"): scene.remove_object("lfingers_view", registry_only=True) if scene.object_exists("rfingers_view"): scene.remove_object("rfingers_view", registry_only=True) if scene.object_exists("cabinet_view"): scene.remove_object("cabinet_view", registry_only=True) if scene.object_exists("drawers_view"): scene.remove_object("drawers_view", registry_only=True) if scene.object_exists("prop_view"): scene.remove_object("prop_view", registry_only=True) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view") scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._cabinets) scene.add(self._cabinets._drawers) if self.num_props > 0: self._props = RigidPrimView( prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False ) scene.add(self._props) self.init_data() def get_franka(self): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka") self._sim_config.apply_articulation_settings( "franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka") ) def get_cabinet(self): cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet") self._sim_config.apply_articulation_settings( "cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet") ) def get_props(self): prop_cloner = Cloner() drawer_pos = torch.tensor([0.0515, 0.0, 0.7172]) prop_color = torch.tensor([0.2, 0.4, 0.6]) props_per_row = int(math.ceil(math.sqrt(self.num_props))) prop_size = 0.08 prop_spacing = 0.09 xmin = -0.5 * prop_spacing * (props_per_row - 1) zmin = -0.5 * prop_spacing * (props_per_row - 1) prop_count = 0 prop_pos = [] for j in range(props_per_row): prop_up = zmin + j * prop_spacing for k in range(props_per_row): if prop_count >= self.num_props: break propx = xmin + k * prop_spacing prop_pos.append([propx, prop_up, 0.0]) prop_count += 1 prop = DynamicCuboid( prim_path=self.default_zero_env_path + "/prop/prop_0", name="prop", color=prop_color, size=prop_size, density=100.0, ) self._sim_config.apply_articulation_settings( "prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop") ) prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)] prop_cloner.clone( source_prim_path=self.default_zero_env_path + "/prop/prop_0", prim_paths=prop_paths, positions=np.array(prop_pos) + drawer_pos.numpy(), replicate_physics=False, ) def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device, ) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device, ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device, ) finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3]) grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine( hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3] ) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device) self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1)) self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.franka_default_dof_pos = torch.tensor( [1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False) self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos ( self.franka_grasp_rot, self.franka_grasp_pos, self.drawer_grasp_rot, self.drawer_grasp_pos, ) = self.compute_grasp_transforms( hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos, drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos, ) self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) to_target = self.drawer_grasp_pos - self.franka_grasp_pos self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, to_target, self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = {self._frankas.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos # reset cabinet self._cabinets.set_joint_positions( torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices ) self._cabinets.set_joint_velocities( torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices ) # reset props if self.num_props > 0: self._props.set_world_poses( self.default_prop_pos[self.prop_indices[env_ids].flatten()], self.default_prop_rot[self.prop_indices[env_ids].flatten()], self.prop_indices[env_ids].flatten().to(torch.int32), ) self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) if self.num_props > 0: self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses() self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view( self._num_envs, self.num_props ) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: self.rew_buf[:] = self.compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos, self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot, self.franka_lfinger_pos, self.franka_rfinger_pos, self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis, self._num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale, self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self._max_episode_length, self.franka_dof_pos, self.finger_close_reward_scale, ) def is_done(self) -> None: # reset if drawer is open or max length reached self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where( self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf ) def compute_grasp_transforms( self, hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos, drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos, ): global_franka_rot, global_franka_pos = tf_combine( hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos ) global_drawer_rot, global_drawer_pos = tf_combine( drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos ) return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos def compute_franka_reward( self, reset_buf, progress_buf, actions, cabinet_dof_pos, franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot, franka_lfinger_pos, franka_rfinger_pos, gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis, num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale, finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length, joint_positions, finger_close_reward_scale, ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor] # distance from hand to the drawer d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1) dist_reward = 1.0 / (1.0 + d**2) dist_reward *= dist_reward dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward) axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis) axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis) dot1 = ( torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) ) # alignment of forward axis for gripper dot2 = ( torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) ) # alignment of up axis for gripper # reward for matching the orientation of the hand to the drawer (fingers wrapped) rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2) # bonus if left finger is above the drawer handle and right below around_handle_reward = torch.zeros_like(rot_reward) around_handle_reward = torch.where( franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where( franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward ), around_handle_reward, ) # reward for distance of each finger from the drawer finger_dist_reward = torch.zeros_like(rot_reward) lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) finger_dist_reward = torch.where( franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where( franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], (0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward, ), finger_dist_reward, ) finger_close_reward = torch.zeros_like(rot_reward) finger_close_reward = torch.where( d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward ) # regularization on the actions (summed for each environment) action_penalty = torch.sum(actions**2, dim=-1) # how far the cabinet has been opened out open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint rewards = ( dist_reward_scale * dist_reward + rot_reward_scale * rot_reward + around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward + finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty + finger_close_reward * finger_close_reward_scale ) # bonus for opening drawer properly rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards) # # prevent bad style in opening drawer # rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, # torch.ones_like(rewards) * -1, rewards) # rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, # torch.ones_like(rewards) * -1, rewards) return rewards
22,939
Python
41.324723
222
0.599895
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/crazyflie.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.crazyflie import Crazyflie from omniisaacgymenvs.robots.articulations.views.crazyflie_view import CrazyflieView EPS = 1e-6 # small constant to avoid divisions by 0 and log(0) class CrazyflieTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 18 self._num_actions = 4 self._crazyflie_position = torch.tensor([0, 0, 1.0]) self._ball_position = torch.tensor([0, 0, 1.0]) RLTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] self.dt = self._task_cfg["sim"]["dt"] # parameters for the crazyflie self.arm_length = 0.05 # parameters for the controller self.motor_damp_time_up = 0.15 self.motor_damp_time_down = 0.15 # I use the multiplier 4, since 4*T ~ time for a step response to finish, where # T is a time constant of the first-order filter self.motor_tau_up = 4 * self.dt / (self.motor_damp_time_up + EPS) self.motor_tau_down = 4 * self.dt / (self.motor_damp_time_down + EPS) # thrust max self.mass = 0.028 self.thrust_to_weight = 1.9 self.motor_assymetry = np.array([1.0, 1.0, 1.0, 1.0]) # re-normalizing to sum-up to 4 self.motor_assymetry = self.motor_assymetry * 4.0 / np.sum(self.motor_assymetry) self.grav_z = -1.0 * self._task_cfg["sim"]["gravity"][2] def set_up_scene(self, scene) -> None: self.get_crazyflie() self.get_target() RLTask.set_up_scene(self, scene) self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view") scene.add(self._copters) scene.add(self._balls) for i in range(4): scene.add(self._copters.physics_rotors[i]) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("crazyflie_view"): scene.remove_object("crazyflie_view", registry_only=True) if scene.object_exists("ball_view"): scene.remove_object("ball_view", registry_only=True) for i in range(1, 5): scene.remove_object(f"m{i}_prop_view", registry_only=True) self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view") scene.add(self._copters) scene.add(self._balls) for i in range(4): scene.add(self._copters.physics_rotors[i]) def get_crazyflie(self): copter = Crazyflie( prim_path=self.default_zero_env_path + "/Crazyflie", name="crazyflie", translation=self._crazyflie_position ) self._sim_config.apply_articulation_settings( "crazyflie", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("crazyflie") ) def get_target(self): radius = 0.2 color = torch.tensor([1, 0, 0]) ball = DynamicSphere( prim_path=self.default_zero_env_path + "/ball", translation=self._ball_position, name="target_0", radius=radius, color=color, ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) ball.set_collision_enabled(False) def get_observations(self) -> dict: self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) root_positions = self.root_pos - self._env_pos root_quats = self.root_rot rot_x = quat_axis(root_quats, 0) rot_y = quat_axis(root_quats, 1) rot_z = quat_axis(root_quats, 2) root_linvels = self.root_velocities[:, :3] root_angvels = self.root_velocities[:, 3:] self.obs_buf[..., 0:3] = self.target_positions - root_positions self.obs_buf[..., 3:6] = rot_x self.obs_buf[..., 6:9] = rot_y self.obs_buf[..., 9:12] = rot_z self.obs_buf[..., 12:15] = root_linvels self.obs_buf[..., 15:18] = root_angvels observations = {self._copters.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1) if len(set_target_ids) > 0: self.set_targets(set_target_ids) actions = actions.clone().to(self._device) self.actions = actions # clamp to [-1.0, 1.0] thrust_cmds = torch.clamp(actions, min=-1.0, max=1.0) # scale to [0.0, 1.0] thrust_cmds = (thrust_cmds + 1.0) / 2.0 # filtering the thruster and adding noise motor_tau = self.motor_tau_up * torch.ones((self._num_envs, 4), dtype=torch.float32, device=self._device) motor_tau[thrust_cmds < self.thrust_cmds_damp] = self.motor_tau_down motor_tau[motor_tau > 1.0] = 1.0 # Since NN commands thrusts we need to convert to rot vel and back thrust_rot = thrust_cmds**0.5 self.thrust_rot_damp = motor_tau * (thrust_rot - self.thrust_rot_damp) + self.thrust_rot_damp self.thrust_cmds_damp = self.thrust_rot_damp**2 ## Adding noise thrust_noise = 0.01 * torch.randn(4, dtype=torch.float32, device=self._device) thrust_noise = thrust_cmds * thrust_noise self.thrust_cmds_damp = torch.clamp(self.thrust_cmds_damp + thrust_noise, min=0.0, max=1.0) thrusts = self.thrust_max * self.thrust_cmds_damp # thrusts given rotation root_quats = self.root_rot rot_x = quat_axis(root_quats, 0) rot_y = quat_axis(root_quats, 1) rot_z = quat_axis(root_quats, 2) rot_matrix = torch.cat((rot_x, rot_y, rot_z), 1).reshape(-1, 3, 3) force_x = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device) force_y = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device) force_xy = torch.cat((force_x, force_y), 1).reshape(-1, 4, 2) thrusts = thrusts.reshape(-1, 4, 1) thrusts = torch.cat((force_xy, thrusts), 2) thrusts_0 = thrusts[:, 0] thrusts_0 = thrusts_0[:, :, None] thrusts_1 = thrusts[:, 1] thrusts_1 = thrusts_1[:, :, None] thrusts_2 = thrusts[:, 2] thrusts_2 = thrusts_2[:, :, None] thrusts_3 = thrusts[:, 3] thrusts_3 = thrusts_3[:, :, None] mod_thrusts_0 = torch.matmul(rot_matrix, thrusts_0) mod_thrusts_1 = torch.matmul(rot_matrix, thrusts_1) mod_thrusts_2 = torch.matmul(rot_matrix, thrusts_2) mod_thrusts_3 = torch.matmul(rot_matrix, thrusts_3) self.thrusts[:, 0] = torch.squeeze(mod_thrusts_0) self.thrusts[:, 1] = torch.squeeze(mod_thrusts_1) self.thrusts[:, 2] = torch.squeeze(mod_thrusts_2) self.thrusts[:, 3] = torch.squeeze(mod_thrusts_3) # clear actions for reset envs self.thrusts[reset_env_ids] = 0 # spin spinning rotors prop_rot = self.thrust_cmds_damp * self.prop_max_rot self.dof_vel[:, 0] = prop_rot[:, 0] self.dof_vel[:, 1] = -1.0 * prop_rot[:, 1] self.dof_vel[:, 2] = prop_rot[:, 2] self.dof_vel[:, 3] = -1.0 * prop_rot[:, 3] self._copters.set_joint_velocities(self.dof_vel) # apply actions for i in range(4): self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices) def post_reset(self): thrust_max = self.grav_z * self.mass * self.thrust_to_weight * self.motor_assymetry / 4.0 self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device) self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_max = torch.tensor(thrust_max, device=self._device, dtype=torch.float32) self.motor_linearity = 1.0 self.prop_max_rot = 433.3 self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32) self.target_positions[:, 2] = 1 self.actions = torch.zeros((self._num_envs, 4), device=self._device, dtype=torch.float32) self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device) # Extra info self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = { "rew_pos": torch_zeros(), "rew_orient": torch_zeros(), "rew_effort": torch_zeros(), "rew_spin": torch_zeros(), "raw_dist": torch_zeros(), "raw_orient": torch_zeros(), "raw_effort": torch_zeros(), "raw_spin": torch_zeros(), } self.root_pos, self.root_rot = self._copters.get_world_poses() self.root_velocities = self._copters.get_velocities() self.dof_pos = self._copters.get_joint_positions() self.dof_vel = self._copters.get_joint_velocities() self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses(clone=False) self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone() # control parameters self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device) self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.set_targets(self.all_indices) def set_targets(self, env_ids): num_sets = len(env_ids) envs_long = env_ids.long() # set target position randomly with x, y in (0, 0) and z in (2) self.target_positions[envs_long, 0:2] = torch.zeros((num_sets, 2), device=self._device) self.target_positions[envs_long, 2] = torch.ones(num_sets, device=self._device) * 2.0 # shift the target up so it visually aligns better ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long] ball_pos[:, 2] += 0.0 self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids) def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_pos[env_ids, :] = torch_rand_float(-0.0, 0.0, (num_resets, self._copters.num_dof), device=self._device) self.dof_vel[env_ids, :] = 0 root_pos = self.initial_root_pos.clone() root_pos[env_ids, 0] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 1] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 2] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_velocities = self.root_velocities.clone() root_velocities[env_ids] = 0 # apply resets self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids) self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids) self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids) self._copters.set_velocities(root_velocities[env_ids], indices=env_ids) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self.thrust_cmds_damp[env_ids] = 0 self.thrust_rot_damp[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0.0 def calculate_metrics(self) -> None: root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_angvels = self.root_velocities[:, 3:] # pos reward target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + target_dist) self.target_dist = target_dist self.root_positions = root_positions # orient reward ups = quat_axis(root_quats, 2) self.orient_z = ups[..., 2] up_reward = torch.clamp(ups[..., 2], min=0.0, max=1.0) # effort reward effort = torch.square(self.actions).sum(-1) effort_reward = 0.05 * torch.exp(-0.5 * effort) # spin reward spin = torch.square(root_angvels).sum(-1) spin_reward = 0.01 * torch.exp(-1.0 * spin) # combined reward self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spin_reward) - effort_reward # log episode reward sums self.episode_sums["rew_pos"] += pos_reward self.episode_sums["rew_orient"] += up_reward self.episode_sums["rew_effort"] += effort_reward self.episode_sums["rew_spin"] += spin_reward # log raw info self.episode_sums["raw_dist"] += target_dist self.episode_sums["raw_orient"] += ups[..., 2] self.episode_sums["raw_effort"] += effort self.episode_sums["raw_spin"] += spin def is_done(self) -> None: # resets due to misbehavior ones = torch.ones_like(self.reset_buf) die = torch.zeros_like(self.reset_buf) die = torch.where(self.target_dist > 5.0, ones, die) # z >= 0.5 & z <= 5.0 & up > 0 die = torch.where(self.root_positions[..., 2] < 0.5, ones, die) die = torch.where(self.root_positions[..., 2] > 5.0, ones, die) die = torch.where(self.orient_z < 0.0, ones, die) # resets due to episode length self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
16,830
Python
41.502525
120
0.61937
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/humanoid.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.humanoid import Humanoid from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask from pxr import PhysxSchema class HumanoidLocomotionTask(LocomotionTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 87 self._num_actions = 21 self._humanoid_positions = torch.tensor([0, 0, 1.34]) LocomotionTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config LocomotionTask.update_config(self) def set_up_scene(self, scene) -> None: self.get_humanoid() RLTask.set_up_scene(self, scene) self._humanoids = ArticulationView( prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False ) scene.add(self._humanoids) return def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("humanoid_view"): scene.remove_object("humanoid_view", registry_only=True) self._humanoids = ArticulationView( prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False ) scene.add(self._humanoids) def get_humanoid(self): humanoid = Humanoid( prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions ) self._sim_config.apply_articulation_settings( "Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid") ) def get_robot(self): return self._humanoids def post_reset(self): self.joint_gears = torch.tensor( [ 67.5000, # lower_waist 67.5000, # lower_waist 67.5000, # right_upper_arm 67.5000, # right_upper_arm 67.5000, # left_upper_arm 67.5000, # left_upper_arm 67.5000, # pelvis 45.0000, # right_lower_arm 45.0000, # left_lower_arm 45.0000, # right_thigh: x 135.0000, # right_thigh: y 45.0000, # right_thigh: z 45.0000, # left_thigh: x 135.0000, # left_thigh: y 45.0000, # left_thigh: z 90.0000, # right_knee 90.0000, # left_knee 22.5, # right_foot 22.5, # right_foot 22.5, # left_foot 22.5, # left_foot ], device=self._device, ) self.max_motor_effort = torch.max(self.joint_gears) self.motor_effort_ratio = self.joint_gears / self.max_motor_effort dof_limits = self._humanoids.get_dof_limits() self.dof_limits_lower = dof_limits[0, :, 0].to(self._device) self.dof_limits_upper = dof_limits[0, :, 1].to(self._device) force_links = ["left_foot", "right_foot"] self._sensor_indices = torch.tensor( [self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): return get_dof_at_limit_cost(self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale) @torch.jit.script def get_dof_at_limit_cost(obs_buf, motor_effort_ratio, joints_at_limit_cost_scale): # type: (Tensor, Tensor, float) -> Tensor scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02 dof_at_limit_cost = torch.sum( (torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1 ) return dof_at_limit_cost
5,980
Python
41.119718
117
0.651003
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/franka_deformable.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.franka import Franka from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils from omni.isaac.core.materials.deformable_material import DeformableMaterial from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView from omni.physx.scripts import deformableUtils, physicsUtils import numpy as np import torch import math from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema class FrankaDeformableTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self.update_config(sim_config) self.dt = 1/60. self._num_observations = 39 self._num_actions = 9 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.action_scale = self._task_cfg["env"]["actionScale"] def set_up_scene(self, scene) -> None: self.stage = get_current_stage() self.assets_root_path = get_assets_root_path() if self.assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self.get_franka() self.get_beaker() self.get_deformable_tube() super().set_up_scene(scene=scene, replicate_physics=False) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self.deformableView = DeformablePrimView( prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view" ) scene.add(self.deformableView) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("franka_view"): scene.remove_object("franka_view", registry_only=True) if scene.object_exists("hands_view"): scene.remove_object("hands_view", registry_only=True) if scene.object_exists("lfingers_view"): scene.remove_object("lfingers_view", registry_only=True) if scene.object_exists("rfingers_view"): scene.remove_object("rfingers_view", registry_only=True) if scene.object_exists("deformabletube_view"): scene.remove_object("deformabletube_view", registry_only=True) self._frankas = FrankaView( prim_paths_expr="/World/envs/.*/franka", name="franka_view" ) self.deformableView = DeformablePrimView( prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view" ) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self.deformableView) def get_franka(self): franka = Franka( prim_path=self.default_zero_env_path + "/franka", name="franka", orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]), translation=torch.tensor([0.0, 0.0, 0.0]), ) self._sim_config.apply_articulation_settings( "franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka") ) franka.set_franka_properties(stage=self.stage, prim=franka.prim) def get_beaker(self): _usd_path = self.assets_root_path + "/Isaac/Props/Beaker/beaker_500ml.usd" mesh_path = self.default_zero_env_path + "/beaker" add_reference_to_stage(_usd_path, mesh_path) beaker = RigidPrim( prim_path=mesh_path+"/beaker", name="beaker", position=torch.tensor([0.5, 0.2, 0.095]), ) self._sim_config.apply_articulation_settings("beaker", beaker.prim, self._sim_config.parse_actor_config("beaker")) def get_deformable_tube(self): _usd_path = self.assets_root_path + "/Isaac/Props/DeformableTube/tube.usd" mesh_path = self.default_zero_env_path + "/deformableTube/tube" add_reference_to_stage(_usd_path, mesh_path) skin_mesh = get_prim_at_path(mesh_path) physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh) physicsUtils.set_or_add_translate_op(skin_mesh, (0.6, 0.0, 0.005)) physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([0, 0, 1]), 90).GetQuat()) def get_observations(self) -> dict: franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False) self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False) self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos tube_positions = self.deformableView.get_simulation_mesh_nodal_positions(clone=False) tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities(clone=False) self.tube_front_positions = tube_positions[:, 200, :] - self._env_pos self.tube_front_velocities = tube_velocities[:, 200, :] self.tube_back_positions = tube_positions[:, -1, :] - self._env_pos self.tube_back_velocities = tube_velocities[:, -1, :] front_to_gripper = self.tube_front_positions - self.gripper_site_pos to_front_goal = self.front_goal_pos - self.tube_front_positions to_back_goal = self.back_goal_pos - self.tube_back_positions self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, front_to_gripper, to_front_goal, to_back_goal, self.tube_front_positions, self.tube_front_velocities, self.tube_back_positions, self.tube_back_velocities, ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) self.franka_dof_targets[:, -1] = self.franka_dof_targets[:, -2] env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) pos = self.franka_default_dof_pos dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) self.deformableView.set_simulation_mesh_nodal_positions(self.initial_tube_positions[env_ids], indices) self.deformableView.set_simulation_mesh_nodal_velocities(self.initial_tube_velocities[env_ids], indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self.franka_default_dof_pos = torch.tensor( [0.00, 0.63, 0.00, -2.15, 0.00, 2.76, 0.75, 0.02, 0.02], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) self.front_goal_pos = torch.tensor([0.36, 0.0, 0.23], device=self._device).repeat((self._num_envs, 1)) self.back_goal_pos = torch.tensor([0.5, 0.2, 0.0], device=self._device).repeat((self._num_envs, 1)) self.goal_hand_rot = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self._device).repeat((self.num_envs, 1)) self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False) self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False) self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos self.initial_tube_positions = self.deformableView.get_simulation_mesh_nodal_positions() self.initial_tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities() self.tube_front_positions = self.initial_tube_positions[:, 0, :] - self._env_pos self.tube_front_velocities = self.initial_tube_velocities[:, 0, :] self.tube_back_positions = self.initial_tube_positions[:, -1, :] - self._env_pos self.tube_back_velocities = self.initial_tube_velocities[:, -1, :] self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: goal_distance_error = torch.norm(self.tube_back_positions[:, 0:2] - self.back_goal_pos[:, 0:2], p = 2, dim = -1) goal_dist_reward = 1.0 / (5*goal_distance_error + .025) current_z_level = self.tube_back_positions[:, 2:3] z_lift_level = torch.where( goal_distance_error < 0.07, torch.zeros_like(current_z_level), torch.ones_like(current_z_level)*0.18 ) front_lift_error = torch.norm(current_z_level - z_lift_level, p = 2, dim = -1) front_lift_reward = 1.0 / (5*front_lift_error + .025) rewards = goal_dist_reward + 4*front_lift_reward self.rew_buf[:] = rewards def is_done(self) -> None: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 0] < 0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 0] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 1] < -1.0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 1] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
13,322
Python
42.825658
136
0.641045
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/ant.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.ant import Ant from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask from pxr import PhysxSchema class AntLocomotionTask(LocomotionTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) LocomotionTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_observations = 60 self._num_actions = 8 self._ant_positions = torch.tensor([0, 0, 0.5]) LocomotionTask.update_config(self) def set_up_scene(self, scene) -> None: self.get_ant() RLTask.set_up_scene(self, scene) self._ants = ArticulationView( prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False ) scene.add(self._ants) return def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("ant_view"): scene.remove_object("ant_view", registry_only=True) self._ants = ArticulationView( prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False ) scene.add(self._ants) def get_ant(self): ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions) self._sim_config.apply_articulation_settings( "Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant") ) def get_robot(self): return self._ants def post_reset(self): self.joint_gears = torch.tensor([15, 15, 15, 15, 15, 15, 15, 15], dtype=torch.float32, device=self._device) dof_limits = self._ants.get_dof_limits() self.dof_limits_lower = dof_limits[0, :, 0].to(self._device) self.dof_limits_upper = dof_limits[0, :, 1].to(self._device) self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self._device) force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"] self._sensor_indices = torch.tensor( [self._ants._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): return get_dof_at_limit_cost(self.obs_buf, self._ants.num_dof) @torch.jit.script def get_dof_at_limit_cost(obs_buf, num_dof): # type: (Tensor, int) -> Tensor return torch.sum(obs_buf[:, 12 : 12 + num_dof] > 0.99, dim=-1)
4,691
Python
41.654545
115
0.69708
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/cartpole.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.cartpole import Cartpole class CartpoleTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._max_episode_length = 500 self._num_observations = 4 self._num_actions = 1 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0]) self._reset_dist = self._task_cfg["env"]["resetDist"] self._max_push_effort = self._task_cfg["env"]["maxEffort"] def set_up_scene(self, scene) -> None: self.get_cartpole() super().set_up_scene(scene) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("cartpole_view"): scene.remove_object("cartpole_view", registry_only=True) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) def get_cartpole(self): cartpole = Cartpole( prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions ) # applies articulation settings from the task configuration yaml file self._sim_config.apply_articulation_settings( "Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole") ) def get_observations(self) -> dict: dof_pos = self._cartpoles.get_joint_positions(clone=False) dof_vel = self._cartpoles.get_joint_velocities(clone=False) self.cart_pos = dof_pos[:, self._cart_dof_idx] self.cart_vel = dof_vel[:, self._cart_dof_idx] self.pole_pos = dof_pos[:, self._pole_dof_idx] self.pole_vel = dof_vel[:, self._pole_dof_idx] self.obs_buf[:, 0] = self.cart_pos self.obs_buf[:, 1] = self.cart_vel self.obs_buf[:, 2] = self.pole_pos self.obs_buf[:, 3] = self.pole_vel observations = {self._cartpoles.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = actions.to(self._device) forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device) forces[:, self._cart_dof_idx] = self._max_push_effort * actions[:, 0] indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device) self._cartpoles.set_joint_efforts(forces, indices=indices) def reset_idx(self, env_ids): num_resets = len(env_ids) # randomize DOF positions dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device) dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) # randomize DOF velocities dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device) dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) # apply resets indices = env_ids.to(dtype=torch.int32) self._cartpoles.set_joint_positions(dof_pos, indices=indices) self._cartpoles.set_joint_velocities(dof_vel, indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint") self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint") # randomize all envs indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: reward = 1.0 - self.pole_pos * self.pole_pos - 0.01 * torch.abs(self.cart_vel) - 0.005 * torch.abs(self.pole_vel) reward = torch.where(torch.abs(self.cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward) reward = torch.where(torch.abs(self.pole_pos) > np.pi / 2, torch.ones_like(reward) * -2.0, reward) self.rew_buf[:] = reward def is_done(self) -> None: resets = torch.where(torch.abs(self.cart_pos) > self._reset_dist, 1, 0) resets = torch.where(torch.abs(self.pole_pos) > math.pi / 2, 1, resets) resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) self.reset_buf[:] = resets
7,256
Python
42.981818
121
0.659179
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/kukakr120r2500pro_reacher.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Ref: /omniisaacgymenvs/tasks/shadow_hand.py import math import numpy as np import torch # from omniisaacgymenvs.sim2real.kukakr120r2500pro import RealWorldKukaKR120R2500Pro from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig from omniisaacgymenvs.robots.articulations.views.kukakr120r2500pro_view import KukaKR120R2500ProView from omniisaacgymenvs.robots.articulations.kukakr120r2500pro import KukaKR120R2500Pro from omniisaacgymenvs.tasks.shared.reacher import ReacherTask from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omni.isaac.gym.vec_env import VecEnvBase class KukaKR120R2500ProReacherTask(ReacherTask): def __init__( self, name: str, sim_config: SimConfig, env: VecEnvBase, offset=None ) -> None: self.update_config(sim_config) ReacherTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["full"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [full]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full": 29, # 6: Kuka joints position (action space) # 6: Kuka joints velocity # 3: goal position # 4: goal rotation # 4: goal relative rotation # 6: previous action } self.object_scale = torch.tensor([1.0] * 3) self.goal_scale = torch.tensor([2.0] * 3) self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 6 self._num_states = 0 pi = math.pi # For actions # The dof limits follow those defined in: # thirdparty/kuka_kr120_support/urdf/kr120r2500pro.urdf self._dof_limits = torch.tensor([[ [np.deg2rad(-185), np.deg2rad(185)], [np.deg2rad(-155), np.deg2rad(35)], [np.deg2rad(-130), np.deg2rad(154)], [np.deg2rad(-350), np.deg2rad(350)], [np.deg2rad(-130), np.deg2rad(130)], [np.deg2rad(-350), np.deg2rad(350)], ]], dtype=torch.float32, device=self._cfg["sim_device"]) self.useURDF = self._task_cfg["env"]["useURDF"] # Setup Sim2Real sim2real_config = self._task_cfg['sim2real'] if sim2real_config['enabled'] and self.test and self.num_envs == 1: raise NotImplementedError("Sim2Real is not implemented for Kuka KR120R2500Pro yet!") ReacherTask.update_config(self) def get_num_dof(self): return self._arms.num_dof def get_arm(self): if not self.useURDF: usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Kuka/KR120_R2500_Pro/kr120r2500pro_urdf_instanceable.usd" else: raise NotImplementedError("Only URDF is supported for Kuka KR120R2500Pro now!") kuka = KukaKR120R2500Pro( prim_path=self.default_zero_env_path + "/Kuka", name="Kuka", usd_path=usd_path ) self._sim_config.apply_articulation_settings( "kuka", get_prim_at_path(kuka.prim_path), self._sim_config.parse_actor_config("kuka"), ) def get_arm_view(self, scene): end_effector_prim_paths_expr = "/World/envs/.*/Kuka/tool0" arm_view = KukaKR120R2500ProView( prim_paths_expr="/World/envs/.*/Kuka", end_effector_prim_paths_expr=end_effector_prim_paths_expr, name="kuka_view" ) scene.add(arm_view._end_effectors) return arm_view def get_object_displacement_tensor(self): return torch.tensor([0.05, 0.0, 0.0], device=self.device).repeat((self.num_envs, 1)) def get_observations(self): self.arm_dof_pos = self._arms.get_joint_positions() self.arm_dof_vel = self._arms.get_joint_velocities() if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() else: print("Unkown observations type!") observations = {self._arms.name: {"obs_buf": self.obs_buf}} return observations def get_reset_target_new_pos(self, n_reset_envs): # Randomly generate goal positions, although the resulting goal may still not be reachable. new_pos = torch_rand_float(-1, 1, (n_reset_envs, 3), device=self.device) new_pos[:, 0] = new_pos[:, 0] * 0.5 + 1.0 * torch.sign(new_pos[:, 0]) new_pos[:, 1] = new_pos[:, 1] * 0.5 + 1.0 * torch.sign(new_pos[:, 1]) new_pos[:, 2] = torch.abs(new_pos[:, 2] * 1.3) + 0.2 return new_pos def compute_full_observations(self, no_vel=False): if no_vel: raise NotImplementedError() else: # There are many redundant information for the simple Reacher task, but we'll keep them for now. self.obs_buf[:, 0:self.num_arm_dofs] = unscale(self.arm_dof_pos[:, :self.num_arm_dofs], self.arm_dof_lower_limits, self.arm_dof_upper_limits) self.obs_buf[:, self.num_arm_dofs:2*self.num_arm_dofs] = self.vel_obs_scale * self.arm_dof_vel[:, :self.num_arm_dofs] base = 2 * self.num_arm_dofs self.obs_buf[:, base+0:base+3] = self.goal_pos self.obs_buf[:, base+3:base+7] = self.goal_rot self.obs_buf[:, base+7:base+11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, base+11:base+17] = self.actions def send_joint_pos(self, joint_pos): # self.real_world_kukakr120r2500pro.send_joint_pos(joint_pos) pass
7,583
Python
41.606741
147
0.64605
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/quadcopter.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.quadcopter import Quadcopter from omniisaacgymenvs.robots.articulations.views.quadcopter_view import QuadcopterView class QuadcopterTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 21 self._num_actions = 12 self._copter_position = torch.tensor([0, 0, 1.0]) RLTask.__init__(self, name=name, env=env) max_thrust = 2.0 self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32) self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32) self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] self.dt = self._task_cfg["sim"]["dt"] def set_up_scene(self, scene) -> None: self.get_copter() self.get_target() RLTask.set_up_scene(self, scene) self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view") self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False ) self._balls._non_root_link = True # do not set states for kinematics scene.add(self._copters) scene.add(self._copters.rotors) scene.add(self._balls) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("quadcopter_view"): scene.remove_object("quadcopter_view", registry_only=True) if scene.object_exists("rotors_view"): scene.remove_object("rotors_view", registry_only=True) if scene.object_exists("targets_view"): scene.remove_object("targets_view", registry_only=True) self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view") self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False ) scene.add(self._copters) scene.add(self._copters.rotors) scene.add(self._balls) def get_copter(self): copter = Quadcopter( prim_path=self.default_zero_env_path + "/Quadcopter", name="quadcopter", translation=self._copter_position ) self._sim_config.apply_articulation_settings( "copter", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("copter") ) def get_target(self): radius = 0.05 color = torch.tensor([1, 0, 0]) ball = DynamicSphere( prim_path=self.default_zero_env_path + "/ball", name="target_0", radius=radius, color=color, ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) ball.set_collision_enabled(False) def get_observations(self) -> dict: self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) self.dof_pos = self._copters.get_joint_positions(clone=False) root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_linvels = self.root_velocities[:, :3] root_angvels = self.root_velocities[:, 3:] self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3 self.obs_buf[..., 3:7] = root_quats self.obs_buf[..., 7:10] = root_linvels / 2 self.obs_buf[..., 10:13] = root_angvels / math.pi self.obs_buf[..., 13:21] = self.dof_pos observations = {self._copters.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = actions.clone().to(self._device) dof_action_speed_scale = 8 * math.pi self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8] self.dof_position_targets[:] = tensor_clamp( self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits ) thrust_action_speed_scale = 100 self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12] self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits) self.forces[:, 0, 2] = self.thrusts[:, 0] self.forces[:, 1, 2] = self.thrusts[:, 1] self.forces[:, 2, 2] = self.thrusts[:, 2] self.forces[:, 3, 2] = self.thrusts[:, 3] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids] # apply actions self._copters.set_joint_position_targets(self.dof_position_targets) self._copters.rotors.apply_forces(self.forces, is_global=False) def post_reset(self): # control tensors self.dof_position_targets = torch.zeros( (self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False ) self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False) self.forces = torch.zeros( (self._num_envs, self._copters.rotors.count // self._num_envs, 3), dtype=torch.float32, device=self._device, requires_grad=False, ) self.target_positions = torch.zeros((self._num_envs, 3), device=self._device) self.target_positions[:, 2] = 1.0 self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) self.dof_pos = self._copters.get_joint_positions(clone=False) self.dof_vel = self._copters.get_joint_velocities(clone=False) self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone() dof_limits = self._copters.get_dof_limits() self.dof_lower_limits = dof_limits[0][:, 0].to(device=self._device) self.dof_upper_limits = dof_limits[0][:, 1].to(device=self._device) def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device) self.dof_vel[env_ids, :] = 0 root_pos = self.initial_root_pos.clone() root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1) root_velocities = self.root_velocities.clone() root_velocities[env_ids] = 0 # apply resets self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids) self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids) self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids) self._copters.set_velocities(root_velocities[env_ids], indices=env_ids) self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def calculate_metrics(self) -> None: root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_angvels = self.root_velocities[:, 3:] # distance to target target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + 3 * target_dist * target_dist) # 2 self.target_dist = target_dist self.root_positions = root_positions # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage) rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1 / 400)) rew = torch.clip(rew, 0.0, None) self.rew_buf[:] = rew def is_done(self) -> None: # resets due to misbehavior ones = torch.ones_like(self.reset_buf) die = torch.zeros_like(self.reset_buf) die = torch.where(self.target_dist > 3.0, ones, die) die = torch.where(self.root_positions[..., 2] < 0.3, ones, die) # resets due to episode length self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
11,498
Python
42.889313
120
0.640633
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/ingenuity.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.robots.articulations.ingenuity import Ingenuity from omniisaacgymenvs.robots.articulations.views.ingenuity_view import IngenuityView from omni.isaac.core.utils.torch.rotations import * from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omniisaacgymenvs.tasks.base.rl_task import RLTask import numpy as np import torch import math class IngenuityTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self.update_config(sim_config) self.thrust_limit = 2000 self.thrust_lateral_component = 0.2 self._num_observations = 13 self._num_actions = 6 self._ingenuity_position = torch.tensor([0, 0, 1.0]) self._ball_position = torch.tensor([0, 0, 1.0]) RLTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] self.dt = self._task_cfg["sim"]["dt"] def set_up_scene(self, scene) -> None: self.get_ingenuity() self.get_target() RLTask.set_up_scene(self, scene) self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False) self._balls._non_root_link = True # do not set states for kinematics scene.add(self._copters) scene.add(self._balls) for i in range(2): scene.add(self._copters.physics_rotors[i]) scene.add(self._copters.visual_rotors[i]) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("ingenuity_view"): scene.remove_object("ingenuity_view", registry_only=True) for i in range(2): if scene.object_exists(f"physics_rotor_{i}_view"): scene.remove_object(f"physics_rotor_{i}_view", registry_only=True) if scene.object_exists(f"visual_rotor_{i}_view"): scene.remove_object(f"visual_rotor_{i}_view", registry_only=True) if scene.object_exists("targets_view"): scene.remove_object("targets_view", registry_only=True) self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False) scene.add(self._copters) scene.add(self._balls) for i in range(2): scene.add(self._copters.physics_rotors[i]) scene.add(self._copters.visual_rotors[i]) def get_ingenuity(self): copter = Ingenuity(prim_path=self.default_zero_env_path + "/Ingenuity", name="ingenuity", translation=self._ingenuity_position) self._sim_config.apply_articulation_settings("ingenuity", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("ingenuity")) def get_target(self): radius = 0.1 color = torch.tensor([1, 0, 0]) ball = DynamicSphere( prim_path=self.default_zero_env_path + "/ball", translation=self._ball_position, name="target_0", radius=radius, color=color, ) self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")) ball.set_collision_enabled(False) def get_observations(self) -> dict: self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_linvels = self.root_velocities[:, :3] root_angvels = self.root_velocities[:, 3:] self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3 self.obs_buf[..., 3:7] = root_quats self.obs_buf[..., 7:10] = root_linvels / 2 self.obs_buf[..., 10:13] = root_angvels / math.pi observations = { self._copters.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1) if len(set_target_ids) > 0: self.set_targets(set_target_ids) actions = actions.clone().to(self._device) vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * self.thrust_limit, -self.thrust_limit, self.thrust_limit) vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * self.thrust_limit, -self.thrust_limit, self.thrust_limit) lateral_fraction_prop_0 = torch.clamp( actions[:, 0:2] * self.thrust_lateral_component, -self.thrust_lateral_component, self.thrust_lateral_component, ) lateral_fraction_prop_1 = torch.clamp( actions[:, 3:5] * self.thrust_lateral_component, -self.thrust_lateral_component, self.thrust_lateral_component, ) self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0 self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0 self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1 self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1 # clear actions for reset envs self.thrusts[reset_env_ids] = 0 # spin spinning rotors self.dof_vel[:, self.spinning_indices[0]] = 50 self.dof_vel[:, self.spinning_indices[1]] = -50 self._copters.set_joint_velocities(self.dof_vel) # apply actions for i in range(2): self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices) def post_reset(self): self.spinning_indices = torch.tensor([1, 3], device=self._device) self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device) self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32) self.target_positions[:, 2] = 1 self.root_pos, self.root_rot = self._copters.get_world_poses() self.root_velocities = self._copters.get_velocities() self.dof_pos = self._copters.get_joint_positions() self.dof_vel = self._copters.get_joint_velocities() self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses() self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone() # control tensors self.thrusts = torch.zeros((self._num_envs, 2, 3), dtype=torch.float32, device=self._device) def set_targets(self, env_ids): num_sets = len(env_ids) envs_long = env_ids.long() # set target position randomly with x, y in (-1, 1) and z in (1, 2) self.target_positions[envs_long, 0:2] = torch.rand((num_sets, 2), device=self._device) * 2 - 1 self.target_positions[envs_long, 2] = torch.rand(num_sets, device=self._device) + 1 # shift the target up so it visually aligns better ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long] ball_pos[:, 2] += 0.4 self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids) def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_pos[env_ids, 1] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze() self.dof_pos[env_ids, 3] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze() self.dof_vel[env_ids, :] = 0 root_pos = self.initial_root_pos.clone() root_pos[env_ids, 0] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 1] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 2] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1) root_velocities = self.root_velocities.clone() root_velocities[env_ids] = 0 # apply resets self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids) self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids) self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids) self._copters.set_velocities(root_velocities[env_ids], indices=env_ids) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def calculate_metrics(self) -> None: root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_angvels = self.root_velocities[:, 3:] # distance to target target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + 2.5 * target_dist * target_dist) self.target_dist = target_dist self.root_positions = root_positions # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 1.0 / (1.0 + 30 * tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + 10 * spinnage * spinnage) # combined reward # uprightness and spinning only matter when close to the target self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spinnage_reward) def is_done(self) -> None: # resets due to misbehavior ones = torch.ones_like(self.reset_buf) die = torch.zeros_like(self.reset_buf) die = torch.where(self.target_dist > 20.0, ones, die) die = torch.where(self.root_positions[..., 2] < 0.5, ones, die) # resets due to episode length self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
12,391
Python
42.943262
151
0.635138
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/anymal.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.anymal import Anymal from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView from omniisaacgymenvs.tasks.utils.usd_utils import set_drive class AnymalTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 48 self._num_actions = 12 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config # normalization self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"] self.action_scale = self._task_cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"] self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["cosmetic"] = self._task_cfg["env"]["learn"]["cosmeticRewardScale"] # command ranges self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"] # base init state pos = self._task_cfg["env"]["baseInitState"]["pos"] rot = self._task_cfg["env"]["baseInitState"]["rot"] v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"] v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"] state = pos + rot + v_lin + v_ang self.base_init_state = state # default joint positions self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"] # other self.dt = 1 / 60 self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) self.Kp = self._task_cfg["env"]["control"]["stiffness"] self.Kd = self._task_cfg["env"]["control"]["damping"] for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt self._num_envs = self._task_cfg["env"]["numEnvs"] self._anymal_translation = torch.tensor([0.0, 0.0, 0.62]) self._env_spacing = self._task_cfg["env"]["envSpacing"] def set_up_scene(self, scene) -> None: self.get_anymal() super().set_up_scene(scene) self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview") scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("anymalview"): scene.remove_object("anymalview", registry_only=True) if scene.object_exists("knees_view"): scene.remove_object("knees_view", registry_only=True) if scene.object_exists("base_view"): scene.remove_object("base_view", registry_only=True) self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview") scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) def get_anymal(self): anymal = Anymal( prim_path=self.default_zero_env_path + "/anymal", name="Anymal", translation=self._anymal_translation ) self._sim_config.apply_articulation_settings( "Anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("Anymal") ) # Configure joint properties joint_paths = [] for quadrant in ["LF", "LH", "RF", "RH"]: for component, abbrev in [("HIP", "H"), ("THIGH", "K")]: joint_paths.append(f"{quadrant}_{component}/{quadrant}_{abbrev}FE") joint_paths.append(f"base/{quadrant}_HAA") for joint_path in joint_paths: set_drive(f"{anymal.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000) def get_observations(self) -> dict: torso_position, torso_rotation = self._anymals.get_world_poses(clone=False) root_velocities = self._anymals.get_velocities(clone=False) dof_pos = self._anymals.get_joint_positions(clone=False) dof_vel = self._anymals.get_joint_velocities(clone=False) velocity = root_velocities[:, 0:3] ang_velocity = root_velocities[:, 3:6] base_lin_vel = quat_rotate_inverse(torso_rotation, velocity) * self.lin_vel_scale base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity) * self.ang_vel_scale projected_gravity = quat_rotate(torso_rotation, self.gravity_vec) dof_pos_scaled = (dof_pos - self.default_dof_pos) * self.dof_pos_scale commands_scaled = self.commands * torch.tensor( [self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], requires_grad=False, device=self.commands.device, ) obs = torch.cat( ( base_lin_vel, base_ang_vel, projected_gravity, commands_scaled, dof_pos_scaled, dof_vel * self.dof_vel_scale, self.actions, ), dim=-1, ) self.obs_buf[:] = obs observations = {self._anymals.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) indices = torch.arange(self._anymals.count, dtype=torch.int32, device=self._device) self.actions[:] = actions.clone().to(self._device) current_targets = self.current_targets + self.action_scale * self.actions * self.dt self.current_targets[:] = tensor_clamp( current_targets, self.anymal_dof_lower_limits, self.anymal_dof_upper_limits ) self._anymals.set_joint_position_targets(self.current_targets, indices) def reset_idx(self, env_ids): num_resets = len(env_ids) # randomize DOF velocities velocities = torch_rand_float(-0.1, 0.1, (num_resets, self._anymals.num_dof), device=self._device) dof_pos = self.default_dof_pos[env_ids] dof_vel = velocities self.current_targets[env_ids] = dof_pos[:] root_vel = torch.zeros((num_resets, 6), device=self._device) # apply resets indices = env_ids.to(dtype=torch.int32) self._anymals.set_joint_positions(dof_pos, indices) self._anymals.set_joint_velocities(dof_vel, indices) self._anymals.set_world_poses( self.initial_root_pos[env_ids].clone(), self.initial_root_rot[env_ids].clone(), indices ) self._anymals.set_velocities(root_vel, indices) self.commands_x[env_ids] = torch_rand_float( self.command_x_range[0], self.command_x_range[1], (num_resets, 1), device=self._device ).squeeze() self.commands_y[env_ids] = torch_rand_float( self.command_y_range[0], self.command_y_range[1], (num_resets, 1), device=self._device ).squeeze() self.commands_yaw[env_ids] = torch_rand_float( self.command_yaw_range[0], self.command_yaw_range[1], (num_resets, 1), device=self._device ).squeeze() # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self.last_actions[env_ids] = 0.0 self.last_dof_vel[env_ids] = 0.0 def post_reset(self): self.default_dof_pos = torch.zeros( (self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False ) dof_names = self._anymals.dof_names for i in range(self.num_actions): name = dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle self.initial_root_pos, self.initial_root_rot = self._anymals.get_world_poses() self.current_targets = self.default_dof_pos.clone() dof_limits = self._anymals.get_dof_limits() self.anymal_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.anymal_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.commands = torch.zeros(self._num_envs, 3, dtype=torch.float, device=self._device, requires_grad=False) self.commands_y = self.commands.view(self._num_envs, 3)[..., 1] self.commands_x = self.commands.view(self._num_envs, 3)[..., 0] self.commands_yaw = self.commands.view(self._num_envs, 3)[..., 2] # initialize some data used later on self.extras = {} self.gravity_vec = torch.tensor([0.0, 0.0, -1.0], device=self._device).repeat((self._num_envs, 1)) self.actions = torch.zeros( self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False ) self.last_dof_vel = torch.zeros( (self._num_envs, 12), dtype=torch.float, device=self._device, requires_grad=False ) self.last_actions = torch.zeros( self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False ) self.time_out_buf = torch.zeros_like(self.reset_buf) # randomize all envs indices = torch.arange(self._anymals.count, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: torso_position, torso_rotation = self._anymals.get_world_poses(clone=False) root_velocities = self._anymals.get_velocities(clone=False) dof_pos = self._anymals.get_joint_positions(clone=False) dof_vel = self._anymals.get_joint_velocities(clone=False) velocity = root_velocities[:, 0:3] ang_velocity = root_velocities[:, 3:6] base_lin_vel = quat_rotate_inverse(torso_rotation, velocity) base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity) # velocity tracking reward lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"] rew_lin_vel_z = torch.square(base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"] rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - dof_vel), dim=1) * self.rew_scales["joint_acc"] rew_action_rate = ( torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"] ) rew_cosmetic = ( torch.sum(torch.abs(dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["cosmetic"] ) total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_joint_acc + rew_action_rate + rew_cosmetic + rew_lin_vel_z total_reward = torch.clip(total_reward, 0.0, None) self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = dof_vel[:] self.fallen_over = self._anymals.is_base_below_threshold(threshold=0.51, ground_heights=0.0) total_reward[torch.nonzero(self.fallen_over)] = -1 self.rew_buf[:] = total_reward.detach() def is_done(self) -> None: # reset agents time_out = self.progress_buf >= self.max_episode_length - 1 self.reset_buf[:] = time_out | self.fallen_over
14,350
Python
44.55873
118
0.630941
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/warp/humanoid.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask from omniisaacgymenvs.robots.articulations.humanoid import Humanoid from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp import numpy as np import torch import warp as wp import math class HumanoidLocomotionTask(LocomotionTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_observations = 87 self._num_actions = 21 self._humanoid_positions = torch.tensor([0, 0, 1.34]) LocomotionTask.__init__(self, name=name, env=env) return def set_up_scene(self, scene) -> None: self.get_humanoid() RLTaskWarp.set_up_scene(self, scene) self._humanoids = ArticulationView(prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False) scene.add(self._humanoids) return def get_humanoid(self): humanoid = Humanoid(prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions) self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid")) def get_robot(self): return self._humanoids def post_reset(self): self.joint_gears = wp.array( [ 67.5000, # lower_waist 67.5000, # lower_waist 67.5000, # right_upper_arm 67.5000, # right_upper_arm 67.5000, # left_upper_arm 67.5000, # left_upper_arm 67.5000, # pelvis 45.0000, # right_lower_arm 45.0000, # left_lower_arm 45.0000, # right_thigh: x 135.0000, # right_thigh: y 45.0000, # right_thigh: z 45.0000, # left_thigh: x 135.0000, # left_thigh: y 45.0000, # left_thigh: z 90.0000, # right_knee 90.0000, # left_knee 22.5, # right_foot 22.5, # right_foot 22.5, # left_foot 22.5, # left_foot ], device=self._device, dtype=wp.float32 ) self.max_motor_effort = 135.0 self.motor_effort_ratio = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device) wp.launch(compute_effort_ratio, dim=self._humanoids._num_dof, inputs=[self.motor_effort_ratio, self.joint_gears, self.max_motor_effort], device=self._device) dof_limits = self._humanoids.get_dof_limits().to(self._device) self.dof_limits_lower = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device) self.dof_limits_upper = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device) wp.launch(parse_dof_limits, dim=self._humanoids._num_dof, inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device) self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) force_links = ["left_foot", "right_foot"] self._sensor_indices = wp.array([self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._humanoids._num_dof), inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale]) return self.dof_at_limit_cost @wp.kernel def compute_effort_ratio(motor_effort_ratio: wp.array(dtype=wp.float32), joint_gears: wp.array(dtype=wp.float32), max_motor_effort: float): tid = wp.tid() motor_effort_ratio[tid] = joint_gears[tid] / max_motor_effort @wp.kernel def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32), dof_limits_upper: wp.array(dtype=wp.float32), dof_limits: wp.array(dtype=wp.float32, ndim=3)): tid = wp.tid() dof_limits_lower[tid] = dof_limits[0, tid, 0] dof_limits_upper[tid] = dof_limits[0, tid, 1] @wp.kernel def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32), obs_buf: wp.array(dtype=wp.float32, ndim=2), motor_effort_ratio: wp.array(dtype=wp.float32), joints_at_limit_cost_scale: float): i, j = wp.tid() dof_i = j + 12 scaled_cost = joints_at_limit_cost_scale * (wp.abs(obs_buf[i, dof_i]) - 0.98) / 0.02 cost = 0.0 if wp.abs(obs_buf[i, dof_i]) > 0.98: cost = scaled_cost * motor_effort_ratio[j] dof_at_limit_cost[i] = cost
6,686
Python
42.422078
143
0.639994
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/warp/ant.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.robots.articulations.ant import Ant from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp import numpy as np import torch import warp as wp class AntLocomotionTask(LocomotionTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_observations = 60 self._num_actions = 8 self._ant_positions = wp.array([0, 0, 0.5], dtype=wp.float32, device="cpu") LocomotionTask.__init__(self, name=name, env=env) return def set_up_scene(self, scene) -> None: self.get_ant() RLTaskWarp.set_up_scene(self, scene) self._ants = ArticulationView(prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False) scene.add(self._ants) return def get_ant(self): ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions) self._sim_config.apply_articulation_settings("Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant")) def get_robot(self): return self._ants def post_reset(self): self.joint_gears = wp.array([15, 15, 15, 15, 15, 15, 15, 15], dtype=wp.float32, device=self._device) dof_limits = self._ants.get_dof_limits().to(self._device) self.dof_limits_lower = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device) self.dof_limits_upper = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device) wp.launch(parse_dof_limits, dim=self._ants._num_dof, inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device) self.motor_effort_ratio = wp.array([1, 1, 1, 1, 1, 1, 1, 1], dtype=wp.float32, device=self._device) self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"] self._sensor_indices = wp.array([self._ants._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._ants._num_dof), inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio]) return self.dof_at_limit_cost @wp.kernel def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32), obs_buf: wp.array(dtype=wp.float32, ndim=2), motor_effort_ratio: wp.array(dtype=wp.float32)): i, j = wp.tid() dof_i = j + 12 cost = 0.0 if wp.abs(obs_buf[i, dof_i]) > 0.99: cost = 1.0 dof_at_limit_cost[i] = cost @wp.kernel def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32), dof_limits_upper: wp.array(dtype=wp.float32), dof_limits: wp.array(dtype=wp.float32, ndim=3)): tid = wp.tid() dof_limits_lower[tid] = dof_limits[0, tid, 0] dof_limits_upper[tid] = dof_limits[0, tid, 1]
5,221
Python
44.807017
136
0.685309
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/warp/cartpole.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omniisaacgymenvs.robots.articulations.cartpole import Cartpole from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path import omni.isaac.core.utils.warp as warp_utils from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp import numpy as np import torch import warp as wp import math class CartpoleTask(RLTaskWarp): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._cartpole_positions = wp.array([0.0, 0.0, 2.0], dtype=wp.float32) self._reset_dist = self._task_cfg["env"]["resetDist"] self._max_push_effort = self._task_cfg["env"]["maxEffort"] self._max_episode_length = 500 self._num_observations = 4 self._num_actions = 1 RLTaskWarp.__init__(self, name, env) return def set_up_scene(self, scene) -> None: self.get_cartpole() super().set_up_scene(scene) self._cartpoles = ArticulationView(prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False) scene.add(self._cartpoles) return def get_cartpole(self): cartpole = Cartpole(prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions) # applies articulation settings from the task configuration yaml file self._sim_config.apply_articulation_settings("Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole")) def get_observations(self) -> dict: dof_pos = self._cartpoles.get_joint_positions(clone=False) dof_vel = self._cartpoles.get_joint_velocities(clone=False) wp.launch(get_observations, dim=self._num_envs, inputs=[self.obs_buf, dof_pos, dof_vel, self._cart_dof_idx, self._pole_dof_idx], device=self._device) observations = { self._cartpoles.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: self.reset_idx() actions_wp = wp.from_torch(actions) forces = wp.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=wp.float32, device=self._device) wp.launch(compute_forces, dim=self._num_envs, inputs=[forces, actions_wp, self._cart_dof_idx, self._max_push_effort], device=self._device) self._cartpoles.set_joint_efforts(forces) def reset_idx(self): reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1) num_resets = len(reset_env_ids) indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32) if num_resets > 0: wp.launch(reset_idx, num_resets, inputs=[self.dof_pos, self.dof_vel, indices, self.reset_buf, self.progress_buf, self._cart_dof_idx, self._pole_dof_idx, self._rand_seed], device=self._device) # apply resets self._cartpoles.set_joint_positions(self.dof_pos[indices], indices=indices) self._cartpoles.set_joint_velocities(self.dof_vel[indices], indices=indices) def post_reset(self): self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint") self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint") self.dof_pos = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32) self.dof_vel = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32) # randomize all envs self.reset_idx() def calculate_metrics(self) -> None: wp.launch(calculate_metrics, dim=self._num_envs, inputs=[self.obs_buf, self.rew_buf, self._reset_dist], device=self._device) def is_done(self) -> None: wp.launch(is_done, dim=self._num_envs, inputs=[self.obs_buf, self.reset_buf, self.progress_buf, self._reset_dist, self._max_episode_length], device=self._device) @wp.kernel def reset_idx(dof_pos: wp.array(dtype=wp.float32, ndim=2), dof_vel: wp.array(dtype=wp.float32, ndim=2), indices: wp.array(dtype=wp.int32), reset_buf: wp.array(dtype=wp.int32), progress_buf: wp.array(dtype=wp.int32), cart_dof_idx: int, pole_dof_idx: int, rand_seed: int): i = wp.tid() idx = indices[i] rand_state = wp.rand_init(rand_seed, i) # randomize DOF positions dof_pos[idx, cart_dof_idx] = 1.0 * (1.0 - 2.0 * wp.randf(rand_state)) dof_pos[idx, pole_dof_idx] = 0.125 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state)) # randomize DOF velocities dof_vel[idx, cart_dof_idx] = 0.5 * (1.0 - 2.0 * wp.randf(rand_state)) dof_vel[idx, pole_dof_idx] = 0.25 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state)) # bookkeeping progress_buf[idx] = 0 reset_buf[idx] = 0 @wp.kernel def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2), actions: wp.array(dtype=wp.float32, ndim=2), cart_dof_idx: int, max_push_effort: float): i = wp.tid() forces[i, cart_dof_idx] = max_push_effort * actions[i, 0] @wp.kernel def get_observations(obs_buf: wp.array(dtype=wp.float32, ndim=2), dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2), dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2), cart_dof_idx: int, pole_dof_idx: int): i = wp.tid() obs_buf[i, 0] = dof_pos[i, cart_dof_idx] obs_buf[i, 1] = dof_vel[i, cart_dof_idx] obs_buf[i, 2] = dof_pos[i, pole_dof_idx] obs_buf[i, 3] = dof_vel[i, pole_dof_idx] @wp.kernel def calculate_metrics(obs_buf: wp.array(dtype=wp.float32, ndim=2), rew_buf: wp.array(dtype=wp.float32), reset_dist: float): i = wp.tid() cart_pos = obs_buf[i, 0] cart_vel = obs_buf[i, 1] pole_angle = obs_buf[i, 2] pole_vel = obs_buf[i, 3] rew_buf[i] = 1.0 - pole_angle * pole_angle - 0.01 * wp.abs(cart_vel) - 0.005 * wp.abs(pole_vel) if wp.abs(cart_pos) > reset_dist or wp.abs(pole_angle) > warp_utils.PI / 2.0: rew_buf[i] = -2.0 @wp.kernel def is_done(obs_buf: wp.array(dtype=wp.float32, ndim=2), reset_buf: wp.array(dtype=wp.int32), progress_buf: wp.array(dtype=wp.int32), reset_dist: float, max_episode_length: int): i = wp.tid() cart_pos = obs_buf[i, 0] pole_pos = obs_buf[i, 2] if wp.abs(cart_pos) > reset_dist or wp.abs(pole_pos) > warp_utils.PI / 2.0 or progress_buf[i] > max_episode_length: reset_buf[i] = 1 else: reset_buf[i] = 0
8,665
Python
38.390909
154
0.635661
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/warp/shared/locomotion.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from abc import abstractmethod from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path import omni.isaac.core.utils.warp as warp_utils from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp import numpy as np import torch import warp as wp class LocomotionTask(RLTaskWarp): def __init__( self, name, env, offset=None ) -> None: self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"] self.contact_force_scale = self._task_cfg["env"]["contactForceScale"] self.power_scale = self._task_cfg["env"]["powerScale"] self.heading_weight = self._task_cfg["env"]["headingWeight"] self.up_weight = self._task_cfg["env"]["upWeight"] self.actions_cost_scale = self._task_cfg["env"]["actionsCost"] self.energy_cost_scale = self._task_cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"] self.death_cost = self._task_cfg["env"]["deathCost"] self.termination_height = self._task_cfg["env"]["terminationHeight"] self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"] self._num_sensors = 2 RLTaskWarp.__init__(self, name, env) return @abstractmethod def set_up_scene(self, scene) -> None: pass @abstractmethod def get_robot(self): pass def get_observations(self) -> dict: torso_position, torso_rotation = self._robots.get_world_poses(clone=False) velocities = self._robots.get_velocities(clone=False) dof_pos = self._robots.get_joint_positions(clone=False) dof_vel = self._robots.get_joint_velocities(clone=False) # force sensors attached to the feet sensor_force_torques = self._robots.get_measured_joint_forces() wp.launch(get_observations, dim=self._num_envs, inputs=[self.obs_buf, torso_position, torso_rotation, self._env_pos, velocities, dof_pos, dof_vel, self.prev_potentials, self.potentials, self.dt, self.target, self.basis_vec0, self.basis_vec1, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, sensor_force_torques, self.contact_force_scale, self.actions, self.angular_velocity_scale, self._robots._num_dof, self._num_sensors, self._sensor_indices], device=self._device ) observations = { self._robots.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: self.reset_idx() actions_wp = wp.from_torch(actions) self.actions = actions_wp wp.launch(compute_forces, dim=(self._num_envs, self._robots._num_dof), inputs=[self.forces, self.actions, self.joint_gears, self.power_scale], device=self._device) # applies joint torques self._robots.set_joint_efforts(self.forces) def reset_idx(self): reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1) num_resets = len(reset_env_ids) indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32) if num_resets > 0: wp.launch(reset_dofs, dim=(num_resets, self._robots._num_dof), inputs=[self.dof_pos, self.dof_vel, self.initial_dof_pos, self.dof_limits_lower, self.dof_limits_upper, indices, self._rand_seed], device=self._device) wp.launch(reset_idx, dim=num_resets, inputs=[self.root_pos, self.root_rot, self.initial_root_pos, self.initial_root_rot, self._env_pos, self.target, self.prev_potentials, self.potentials, self.dt, self.reset_buf, self.progress_buf, indices, self._rand_seed], device=self._device) # apply resets self._robots.set_joint_positions(self.dof_pos[indices], indices=indices) self._robots.set_joint_velocities(self.dof_vel[indices], indices=indices) self._robots.set_world_poses(self.root_pos[indices], self.root_rot[indices], indices=indices) self._robots.set_velocities(self.root_vel[indices], indices=indices) def post_reset(self): self._robots = self.get_robot() self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() self.initial_dof_pos = self._robots.get_joint_positions() # initialize some data used later on self.basis_vec0 = wp.vec3(1, 0, 0) self.basis_vec1 = wp.vec3(0, 0, 1) self.target = wp.vec3(1000, 0, 0) self.dt = 1.0 / 60.0 # initialize potentials self.potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) self.prev_potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) wp.launch(init_potentials, dim=self._num_envs, inputs=[self.potentials, self.prev_potentials, self.dt], device=self._device) self.actions = wp.zeros((self.num_envs, self.num_actions), device=self._device, dtype=wp.float32) self.forces = wp.zeros((self._num_envs, self._robots._num_dof), dtype=wp.float32, device=self._device) self.dof_pos = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32) self.dof_vel = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32) self.root_pos = wp.zeros((self.num_envs, 3), device=self._device, dtype=wp.float32) self.root_rot = wp.zeros((self.num_envs, 4), device=self._device, dtype=wp.float32) self.root_vel = wp.zeros((self.num_envs, 6), device=self._device, dtype=wp.float32) # randomize all env self.reset_idx() def calculate_metrics(self) -> None: dof_at_limit_cost = self.get_dof_at_limit_cost() wp.launch(calculate_metrics, dim=self._num_envs, inputs=[self.rew_buf, self.obs_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.termination_height, self.death_cost, self._robots.num_dof, dof_at_limit_cost, self.alive_reward_scale, self.motor_effort_ratio], device=self._device ) def is_done(self) -> None: wp.launch(is_done, dim=self._num_envs, inputs=[self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length], device=self._device ) ##################################################################### ###==========================warp kernels=========================### ##################################################################### @wp.kernel def init_potentials(potentials: wp.array(dtype=wp.float32), prev_potentials: wp.array(dtype=wp.float32), dt: float): i = wp.tid() potentials[i] = -1000.0 / dt prev_potentials[i] = -1000.0 / dt @wp.kernel def reset_idx(root_pos: wp.array(dtype=wp.float32, ndim=2), root_rot: wp.array(dtype=wp.float32, ndim=2), initial_root_pos: wp.indexedarray(dtype=wp.float32, ndim=2), initial_root_rot: wp.indexedarray(dtype=wp.float32, ndim=2), env_pos: wp.array(dtype=wp.float32, ndim=2), target: wp.vec3, prev_potentials: wp.array(dtype=wp.float32), potentials: wp.array(dtype=wp.float32), dt: float, reset_buf: wp.array(dtype=wp.int32), progress_buf: wp.array(dtype=wp.int32), indices: wp.array(dtype=wp.int32), rand_seed: int): i = wp.tid() idx = indices[i] # reset root states for j in range(3): root_pos[idx, j] = initial_root_pos[idx, j] for j in range(4): root_rot[idx, j] = initial_root_rot[idx, j] # reset potentials to_target = target - wp.vec3(initial_root_pos[idx, 0] - env_pos[idx, 0], initial_root_pos[idx, 1] - env_pos[idx, 1], target[2]) prev_potentials[idx] = -wp.length(to_target) / dt potentials[idx] = -wp.length(to_target) / dt temp = potentials[idx] - prev_potentials[idx] # bookkeeping reset_buf[idx] = 0 progress_buf[idx] = 0 @wp.kernel def reset_dofs(dof_pos: wp.array(dtype=wp.float32, ndim=2), dof_vel: wp.array(dtype=wp.float32, ndim=2), initial_dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2), dof_limits_lower: wp.array(dtype=wp.float32), dof_limits_upper: wp.array(dtype=wp.float32), indices: wp.array(dtype=wp.int32), rand_seed: int): i, j = wp.tid() idx = indices[i] rand_state = wp.rand_init(rand_seed, i * j + j) # randomize DOF positions and velocities dof_pos[idx, j] = wp.clamp(wp.randf(rand_state, -0.2, 0.2) + initial_dof_pos[idx, j], dof_limits_lower[j], dof_limits_upper[j]) dof_vel[idx, j] = wp.randf(rand_state, -0.1, 0.1) @wp.kernel def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2), actions: wp.array(dtype=wp.float32, ndim=2), joint_gears: wp.array(dtype=wp.float32), power_scale: float): i, j = wp.tid() forces[i, j] = actions[i, j] * joint_gears[j] * power_scale @wp.func def get_euler_xyz(q: wp.quat): qx = 0 qy = 1 qz = 2 qw = 3 # roll (x-axis rotation) sinr_cosp = 2.0 * (q[qw] * q[qx] + q[qy] * q[qz]) cosr_cosp = q[qw] * q[qw] - q[qx] * q[qx] - q[qy] * q[qy] + q[qz] * q[qz] roll = wp.atan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2.0 * (q[qw] * q[qy] - q[qz] * q[qx]) if wp.abs(sinp) >= 1: pitch = warp_utils.PI / 2.0 * (wp.abs(sinp)/sinp) else: pitch = wp.asin(sinp) # yaw (z-axis rotation) siny_cosp = 2.0 * (q[qw] * q[qz] + q[qx] * q[qy]) cosy_cosp = q[qw] * q[qw] + q[qx] * q[qx] - q[qy] * q[qy] - q[qz] * q[qz] yaw = wp.atan2(siny_cosp, cosy_cosp) rpy = wp.vec3(roll % (2.0 * warp_utils.PI), pitch % (2.0 * warp_utils.PI), yaw % (2.0 * warp_utils.PI)) return rpy @wp.func def compute_up_vec(torso_rotation: wp.quat, vec1: wp.vec3): up_vec = wp.quat_rotate(torso_rotation, vec1) return up_vec @wp.func def compute_heading_vec(torso_rotation: wp.quat, vec0: wp.vec3): heading_vec = wp.quat_rotate(torso_rotation, vec0) return heading_vec @wp.func def unscale(x:float, lower:float, upper:float): return (2.0 * x - upper - lower) / (upper - lower) @wp.func def normalize_angle(x: float): return wp.atan2(wp.sin(x), wp.cos(x)) @wp.kernel def get_observations( obs_buf: wp.array(dtype=wp.float32, ndim=2), torso_pos: wp.indexedarray(dtype=wp.float32, ndim=2), torso_rot: wp.indexedarray(dtype=wp.float32, ndim=2), env_pos: wp.array(dtype=wp.float32, ndim=2), velocity: wp.indexedarray(dtype=wp.float32, ndim=2), dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2), dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2), prev_potentials: wp.array(dtype=wp.float32), potentials: wp.array(dtype=wp.float32), dt: float, target: wp.vec3, basis_vec0: wp.vec3, basis_vec1: wp.vec3, dof_limits_lower: wp.array(dtype=wp.float32), dof_limits_upper: wp.array(dtype=wp.float32), dof_vel_scale: float, sensor_force_torques: wp.indexedarray(dtype=wp.float32, ndim=3), contact_force_scale: float, actions: wp.array(dtype=wp.float32, ndim=2), angular_velocity_scale: float, num_dofs: int, num_sensors: int, sensor_indices: wp.array(dtype=wp.int32) ): i = wp.tid() torso_position_x = torso_pos[i, 0] - env_pos[i, 0] torso_position_y = torso_pos[i, 1] - env_pos[i, 1] torso_position_z = torso_pos[i, 2] - env_pos[i, 2] to_target = target - wp.vec3(torso_position_x, torso_position_y, target[2]) prev_potentials[i] = potentials[i] potentials[i] = -wp.length(to_target) / dt temp = potentials[i] - prev_potentials[i] torso_quat = wp.quat(torso_rot[i, 1], torso_rot[i, 2], torso_rot[i, 3], torso_rot[i, 0]) up_vec = compute_up_vec(torso_quat, basis_vec1) up_proj = up_vec[2] heading_vec = compute_heading_vec(torso_quat, basis_vec0) target_dir = wp.normalize(to_target) heading_proj = wp.dot(heading_vec, target_dir) lin_velocity = wp.vec3(velocity[i, 0], velocity[i, 1], velocity[i, 2]) ang_velocity = wp.vec3(velocity[i, 3], velocity[i, 4], velocity[i, 5]) rpy = get_euler_xyz(torso_quat) vel_loc = wp.quat_rotate_inv(torso_quat, lin_velocity) angvel_loc = wp.quat_rotate_inv(torso_quat, ang_velocity) walk_target_angle = wp.atan2(target[2] - torso_position_z, target[0] - torso_position_x) angle_to_target = walk_target_angle - rpy[2] # yaw # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs obs_offset = 0 obs_buf[i, 0] = torso_position_z obs_offset = obs_offset + 1 for j in range(3): obs_buf[i, j+obs_offset] = vel_loc[j] obs_offset = obs_offset + 3 for j in range(3): obs_buf[i, j+obs_offset] = angvel_loc[j] * angular_velocity_scale obs_offset = obs_offset + 3 obs_buf[i, obs_offset+0] = normalize_angle(rpy[2]) obs_buf[i, obs_offset+1] = normalize_angle(rpy[0]) obs_buf[i, obs_offset+2] = normalize_angle(angle_to_target) obs_buf[i, obs_offset+3] = up_proj obs_buf[i, obs_offset+4] = heading_proj obs_offset = obs_offset + 5 for j in range(num_dofs): obs_buf[i, obs_offset+j] = unscale(dof_pos[i, j], dof_limits_lower[j], dof_limits_upper[j]) obs_offset = obs_offset + num_dofs for j in range(num_dofs): obs_buf[i, obs_offset+j] = dof_vel[i, j] * dof_vel_scale obs_offset = obs_offset + num_dofs for j in range(num_sensors): sensor_idx = sensor_indices[j] for k in range(6): obs_buf[i, obs_offset+j*6+k] = sensor_force_torques[i, sensor_idx, k] * contact_force_scale obs_offset = obs_offset + (num_sensors * 6) for j in range(num_dofs): obs_buf[i, obs_offset+j] = actions[i, j] @wp.kernel def is_done( obs_buf: wp.array(dtype=wp.float32, ndim=2), termination_height: float, reset_buf: wp.array(dtype=wp.int32), progress_buf: wp.array(dtype=wp.int32), max_episode_length: int ): i = wp.tid() if obs_buf[i, 0] < termination_height or progress_buf[i] >= max_episode_length - 1: reset_buf[i] = 1 else: reset_buf[i] = 0 @wp.kernel def calculate_metrics( rew_buf: wp.array(dtype=wp.float32), obs_buf: wp.array(dtype=wp.float32, ndim=2), actions: wp.array(dtype=wp.float32, ndim=2), up_weight: float, heading_weight: float, potentials: wp.array(dtype=wp.float32), prev_potentials: wp.array(dtype=wp.float32), actions_cost_scale: float, energy_cost_scale: float, termination_height: float, death_cost: float, num_dof: int, dof_at_limit_cost: wp.array(dtype=wp.float32), alive_reward_scale: float, motor_effort_ratio: wp.array(dtype=wp.float32) ): i = wp.tid() # heading reward if obs_buf[i, 11] > 0.8: heading_reward = heading_weight else: heading_reward = heading_weight * obs_buf[i, 11] / 0.8 # aligning up axis of robot and environment up_reward = 0.0 if obs_buf[i, 10] > 0.93: up_reward = up_weight # energy penalty for movement actions_cost = float(0.0) electricity_cost = float(0.0) for j in range(num_dof): actions_cost = actions_cost + (actions[i, j] * actions[i, j]) electricity_cost = electricity_cost + (wp.abs(actions[i, j] * obs_buf[i, 12+num_dof+j]) * motor_effort_ratio[j]) # reward for duration of staying alive progress_reward = potentials[i] - prev_potentials[i] total_reward = ( progress_reward + alive_reward_scale + up_reward + heading_reward - actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost[i] ) # adjust reward for fallen agents if obs_buf[i, 0] < termination_height: total_reward = death_cost rew_buf[i] = total_reward
18,233
Python
39.52
147
0.624198
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/base/rl_task.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import asyncio from abc import abstractmethod import numpy as np import omni.isaac.core.utils.warp.tensor as wp_utils import omni.kit import omni.usd import torch import warp as wp from gym import spaces from omni.isaac.cloner import GridCloner from omni.isaac.core.tasks import BaseTask from omni.isaac.core.utils.prims import define_prim from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.gym.tasks.rl_task import RLTaskInterface from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer from pxr import Gf, UsdGeom, UsdLux class RLTask(RLTaskInterface): """This class provides a PyTorch RL-specific interface for setting up RL tasks. It includes utilities for setting up RL task related parameters, cloning environments, and data collection for RL algorithms. """ def __init__(self, name, env, offset=None) -> None: """Initializes RL parameters, cloner object, and buffers. Args: name (str): name of the task. env (VecEnvBase): an instance of the environment wrapper class to register task. offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None. """ BaseTask.__init__(self, name=name, offset=offset) self._rand_seed = self._cfg["seed"] # optimization flags for pytorch JIT torch._C._jit_set_nvfuser_enabled(False) self.test = self._cfg["test"] self._device = self._cfg["sim_device"] # set up randomizer for DR self._dr_randomizer = Randomizer(self._cfg, self._task_cfg) if self._dr_randomizer.randomize: import omni.replicator.isaac as dr self.dr = dr # set up replicator for camera data collection if self._task_cfg["sim"].get("enable_cameras", False): from omni.replicator.isaac.scripts.writers.pytorch_writer import PytorchWriter from omni.replicator.isaac.scripts.writers.pytorch_listener import PytorchListener import omni.replicator.core as rep self.rep = rep self.PytorchWriter = PytorchWriter self.PytorchListener = PytorchListener print("Task Device:", self._device) self.randomize_actions = False self.randomize_observations = False self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf) self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf) self.rl_device = self._cfg.get("rl_device", "cuda:0") self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1) self.rendering_interval = self._task_cfg.get("renderingInterval", 1) print("RL device: ", self.rl_device) self._env = env if not hasattr(self, "_num_agents"): self._num_agents = 1 # used for multi-agent environments if not hasattr(self, "_num_states"): self._num_states = 0 # initialize data spaces (defaults to gym.Box) if not hasattr(self, "action_space"): self.action_space = spaces.Box( np.ones(self.num_actions, dtype=np.float32) * -1.0, np.ones(self.num_actions, dtype=np.float32) * 1.0 ) if not hasattr(self, "observation_space"): self.observation_space = spaces.Box( np.ones(self.num_observations, dtype=np.float32) * -np.Inf, np.ones(self.num_observations, dtype=np.float32) * np.Inf, ) if not hasattr(self, "state_space"): self.state_space = spaces.Box( np.ones(self.num_states, dtype=np.float32) * -np.Inf, np.ones(self.num_states, dtype=np.float32) * np.Inf, ) self.cleanup() def cleanup(self) -> None: """Prepares torch buffers for RL data collection.""" # prepare tensors self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float) self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float) self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float) self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long) self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long) self.extras = {} def set_up_scene( self, scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True, copy_from_source=False ) -> None: """Clones environments based on value provided in task config and applies collision filters to mask collisions across environments. Args: scene (Scene): Scene to add objects to. replicate_physics (bool): Clone physics using PhysX API for better performance. collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked. filter_collisions (bool): Mask off collision between environments. copy_from_source (bool): Copy from source prim when cloning instead of inheriting. """ super().set_up_scene(scene) self._cloner = GridCloner(spacing=self._env_spacing) self._cloner.define_base_env(self.default_base_env_path) stage = omni.usd.get_context().get_stage() UsdGeom.Xform.Define(stage, self.default_zero_env_path) if self._task_cfg["sim"].get("add_ground_plane", True): self._ground_plane_path = "/World/defaultGroundPlane" collision_filter_global_paths.append(self._ground_plane_path) scene.add_default_ground_plane(prim_path=self._ground_plane_path) prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs) self._env_pos = self._cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source ) self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float) if filter_collisions: self._cloner.filter_collisions( self._env._world.get_physics_context().prim_path, "/World/collisions", prim_paths, collision_filter_global_paths, ) if self._env._render: self.set_initial_camera_params(camera_position=[10, 10, 3], camera_target=[0, 0, 0]) if self._task_cfg["sim"].get("add_distant_light", True): self._create_distant_light() def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]): from omni.kit.viewport.utility import get_viewport_from_window_name from omni.kit.viewport.utility.camera_state import ViewportCameraState viewport_api_2 = get_viewport_from_window_name("Viewport") viewport_api_2.set_active_camera("/OmniverseKit_Persp") camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2) camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True) camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True) def _create_distant_light(self, prim_path="/World/defaultDistantLight", intensity=5000): stage = get_current_stage() light = UsdLux.DistantLight.Define(stage, prim_path) light.CreateIntensityAttr().Set(intensity) def initialize_views(self, scene): """Optionally implemented by individual task classes to initialize views used in the task. This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage. Args: scene (Scene): Scene to remove existing views and initialize/add new views. """ self._cloner = GridCloner(spacing=self._env_spacing) pos, _ = self._cloner.get_clone_transforms(self._num_envs) self._env_pos = torch.tensor(np.array(pos), device=self._device, dtype=torch.float) @property def default_base_env_path(self): """Retrieves default path to the parent of all env prims. Returns: default_base_env_path(str): Defaults to "/World/envs". """ return "/World/envs" @property def default_zero_env_path(self): """Retrieves default path to the first env prim (index 0). Returns: default_zero_env_path(str): Defaults to "/World/envs/env_0". """ return f"{self.default_base_env_path}/env_0" def reset(self): """Flags all environments for reset.""" self.reset_buf = torch.ones_like(self.reset_buf) def post_physics_step(self): """Processes RL required computations for observations, states, rewards, resets, and extras. Also maintains progress buffer for tracking step count per environment. Returns: obs_buf(torch.Tensor): Tensor of observation data. rew_buf(torch.Tensor): Tensor of rewards data. reset_buf(torch.Tensor): Tensor of resets/dones data. extras(dict): Dictionary of extras data. """ self.progress_buf[:] += 1 if self._env._world.is_playing(): self.get_observations() self.get_states() self.calculate_metrics() self.is_done() self.get_extras() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras class RLTaskWarp(RLTask): def cleanup(self) -> None: """Prepares torch buffers for RL data collection.""" # prepare tensors self.obs_buf = wp.zeros((self._num_envs, self.num_observations), device=self._device, dtype=wp.float32) self.states_buf = wp.zeros((self._num_envs, self.num_states), device=self._device, dtype=wp.float32) self.rew_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.float32) self.reset_buf = wp_utils.ones(self._num_envs, device=self._device, dtype=wp.int32) self.progress_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.int32) self.zero_states_buf_torch = torch.zeros( (self._num_envs, self.num_states), device=self._device, dtype=torch.float32 ) self.extras = {} def reset(self): """Flags all environments for reset.""" wp.launch(reset_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device) def post_physics_step(self): """Processes RL required computations for observations, states, rewards, resets, and extras. Also maintains progress buffer for tracking step count per environment. Returns: obs_buf(torch.Tensor): Tensor of observation data. rew_buf(torch.Tensor): Tensor of rewards data. reset_buf(torch.Tensor): Tensor of resets/dones data. extras(dict): Dictionary of extras data. """ wp.launch(increment_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device) if self._env._world.is_playing(): self.get_observations() self.get_states() self.calculate_metrics() self.is_done() self.get_extras() obs_buf_torch = wp.to_torch(self.obs_buf) rew_buf_torch = wp.to_torch(self.rew_buf) reset_buf_torch = wp.to_torch(self.reset_buf) return obs_buf_torch, rew_buf_torch, reset_buf_torch, self.extras def get_states(self): """API for retrieving states buffer, used for asymmetric AC training. Returns: states_buf(torch.Tensor): States buffer. """ if self.num_states > 0: return wp.to_torch(self.states_buf) else: return self.zero_states_buf_torch def set_up_scene(self, scene) -> None: """Clones environments based on value provided in task config and applies collision filters to mask collisions across environments. Args: scene (Scene): Scene to add objects to. """ super().set_up_scene(scene) self._env_pos = wp.from_torch(self._env_pos) @wp.kernel def increment_progress(progress_buf: wp.array(dtype=wp.int32)): i = wp.tid() progress_buf[i] = progress_buf[i] + 1 @wp.kernel def reset_progress(progress_buf: wp.array(dtype=wp.int32)): i = wp.tid() progress_buf[i] = 1
14,224
Python
41.717718
143
0.653121
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: base class. Inherits Gym's RLTask class and abstract base class. Inherited by environment classes. Not directly executed. Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml. """ import carb import hydra import math import numpy as np import torch from omni.isaac.core.objects import FixedCuboid from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.factory_franka import FactoryFranka from pxr import PhysxSchema, UsdPhysics import omniisaacgymenvs.tasks.factory.factory_control as fc from omniisaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase from omniisaacgymenvs.tasks.factory.factory_schema_config_base import ( FactorySchemaConfigBase, ) class FactoryBase(RLTask, FactoryABCBase): def __init__(self, name, sim_config, env) -> None: """Initialize instance variables. Initialize RLTask superclass.""" # Set instance variables from base YAML self._get_base_yaml_params() self._env_spacing = self.cfg_base.env.env_spacing # Set instance variables from task and train YAMLs self._sim_config = sim_config self._cfg = sim_config.config # CL args, task config, and train config self._task_cfg = sim_config.task_config # just task config self._num_envs = sim_config.task_config["env"]["numEnvs"] self._num_observations = sim_config.task_config["env"]["numObservations"] self._num_actions = sim_config.task_config["env"]["numActions"] super().__init__(name, env) def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase) config_path = ( "task/FactoryBase.yaml" # relative to Gym's Hydra search path (cfg dir) ) self.cfg_base = hydra.compose(config_name=config_path) self.cfg_base = self.cfg_base["task"] # strip superfluous nesting asset_info_path = "../tasks/factory/yaml/factory_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir) self.asset_info_franka_table = hydra.compose(config_name=asset_info_path) self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][ "tasks" ]["factory"][ "yaml" ] # strip superfluous nesting def import_franka_assets(self, add_to_stage=True): """Set Franka and table asset options. Import assets.""" self._stage = get_current_stage() if add_to_stage: franka_translation = np.array([self.cfg_base.env.franka_depth, 0.0, 0.0]) franka_orientation = np.array([0.0, 0.0, 0.0, 1.0]) franka = FactoryFranka( prim_path=self.default_zero_env_path + "/franka", name="franka", translation=franka_translation, orientation=franka_orientation, ) self._sim_config.apply_articulation_settings( "franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"), ) for link_prim in franka.prim.GetChildren(): if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI): rb = PhysxSchema.PhysxRigidBodyAPI.Get( self._stage, link_prim.GetPrimPath() ) rb.GetDisableGravityAttr().Set(True) rb.GetRetainAccelerationsAttr().Set(False) if self.cfg_base.sim.add_damping: rb.GetLinearDampingAttr().Set( 1.0 ) # default = 0.0; increased to improve stability rb.GetMaxLinearVelocityAttr().Set( 1.0 ) # default = 1000.0; reduced to prevent CUDA errors rb.GetAngularDampingAttr().Set( 5.0 ) # default = 0.5; increased to improve stability rb.GetMaxAngularVelocityAttr().Set( 2 / math.pi * 180 ) # default = 64.0; reduced to prevent CUDA errors else: rb.GetLinearDampingAttr().Set(0.0) rb.GetMaxLinearVelocityAttr().Set(1000.0) rb.GetAngularDampingAttr().Set(0.5) rb.GetMaxAngularVelocityAttr().Set(64 / math.pi * 180) table_translation = np.array( [0.0, 0.0, self.cfg_base.env.table_height * 0.5] ) table_orientation = np.array([1.0, 0.0, 0.0, 0.0]) table = FixedCuboid( prim_path=self.default_zero_env_path + "/table", name="table", translation=table_translation, orientation=table_orientation, scale=np.array( [ self.asset_info_franka_table.table_depth, self.asset_info_franka_table.table_width, self.cfg_base.env.table_height, ] ), size=1.0, color=np.array([0, 0, 0]), ) self.parse_controller_spec(add_to_stage=add_to_stage) def acquire_base_tensors(self): """Acquire tensors.""" self.num_dofs = 9 self.env_pos = self._env_pos self.dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.dof_vel = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.dof_torque = torch.zeros( (self.num_envs, self.num_dofs), device=self.device ) self.fingertip_contact_wrench = torch.zeros( (self.num_envs, 6), device=self.device ) self.ctrl_target_fingertip_midpoint_pos = torch.zeros( (self.num_envs, 3), device=self.device ) self.ctrl_target_fingertip_midpoint_quat = torch.zeros( (self.num_envs, 4), device=self.device ) self.ctrl_target_dof_pos = torch.zeros( (self.num_envs, self.num_dofs), device=self.device ) self.ctrl_target_gripper_dof_pos = torch.zeros( (self.num_envs, 2), device=self.device ) self.ctrl_target_fingertip_contact_wrench = torch.zeros( (self.num_envs, 6), device=self.device ) self.prev_actions = torch.zeros( (self.num_envs, self.num_actions), device=self.device ) def refresh_base_tensors(self): """Refresh tensors.""" if not self._env._world.is_playing(): return self.dof_pos = self.frankas.get_joint_positions(clone=False) self.dof_vel = self.frankas.get_joint_velocities(clone=False) # Jacobian shape: [4, 11, 6, 9] (root has no Jacobian) self.franka_jacobian = self.frankas.get_jacobians() self.franka_mass_matrix = self.frankas.get_mass_matrices(clone=False) self.arm_dof_pos = self.dof_pos[:, 0:7] self.arm_mass_matrix = self.franka_mass_matrix[ :, 0:7, 0:7 ] # for Franka arm (not gripper) self.hand_pos, self.hand_quat = self.frankas._hands.get_world_poses(clone=False) self.hand_pos -= self.env_pos hand_velocities = self.frankas._hands.get_velocities(clone=False) self.hand_linvel = hand_velocities[:, 0:3] self.hand_angvel = hand_velocities[:, 3:6] ( self.left_finger_pos, self.left_finger_quat, ) = self.frankas._lfingers.get_world_poses(clone=False) self.left_finger_pos -= self.env_pos left_finger_velocities = self.frankas._lfingers.get_velocities(clone=False) self.left_finger_linvel = left_finger_velocities[:, 0:3] self.left_finger_angvel = left_finger_velocities[:, 3:6] self.left_finger_jacobian = self.franka_jacobian[:, 8, 0:6, 0:7] left_finger_forces = self.frankas._lfingers.get_net_contact_forces(clone=False) self.left_finger_force = left_finger_forces[:, 0:3] ( self.right_finger_pos, self.right_finger_quat, ) = self.frankas._rfingers.get_world_poses(clone=False) self.right_finger_pos -= self.env_pos right_finger_velocities = self.frankas._rfingers.get_velocities(clone=False) self.right_finger_linvel = right_finger_velocities[:, 0:3] self.right_finger_angvel = right_finger_velocities[:, 3:6] self.right_finger_jacobian = self.franka_jacobian[:, 9, 0:6, 0:7] right_finger_forces = self.frankas._rfingers.get_net_contact_forces(clone=False) self.right_finger_force = right_finger_forces[:, 0:3] self.gripper_dof_pos = self.dof_pos[:, 7:9] ( self.fingertip_centered_pos, self.fingertip_centered_quat, ) = self.frankas._fingertip_centered.get_world_poses(clone=False) self.fingertip_centered_pos -= self.env_pos fingertip_centered_velocities = self.frankas._fingertip_centered.get_velocities( clone=False ) self.fingertip_centered_linvel = fingertip_centered_velocities[:, 0:3] self.fingertip_centered_angvel = fingertip_centered_velocities[:, 3:6] self.fingertip_centered_jacobian = self.franka_jacobian[:, 10, 0:6, 0:7] self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) / 2 self.fingertip_midpoint_pos = fc.translate_along_local_z( pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length, device=self.device, ) self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal # TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf) self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross( self.fingertip_centered_angvel, (self.fingertip_midpoint_pos - self.fingertip_centered_pos), dim=1, ) # From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity), # angular velocity of midpoint w.r.t. world is equal to sum of # angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world. # Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero. # Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world. self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal self.fingertip_midpoint_jacobian = ( self.left_finger_jacobian + self.right_finger_jacobian ) * 0.5 def parse_controller_spec(self, add_to_stage): """Parse controller specification into lower-level controller configuration.""" cfg_ctrl_keys = { "num_envs", "jacobian_type", "gripper_prop_gains", "gripper_deriv_gains", "motor_ctrl_mode", "gain_space", "ik_method", "joint_prop_gains", "joint_deriv_gains", "do_motion_ctrl", "task_prop_gains", "task_deriv_gains", "do_inertial_comp", "motion_ctrl_axes", "do_force_ctrl", "force_ctrl_method", "wrench_prop_gains", "force_ctrl_axes", } self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys} self.cfg_ctrl["num_envs"] = self.num_envs self.cfg_ctrl["jacobian_type"] = self.cfg_task.ctrl.all.jacobian_type self.cfg_ctrl["gripper_prop_gains"] = torch.tensor( self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device ).repeat((self.num_envs, 1)) ctrl_type = self.cfg_task.ctrl.ctrl_type if ctrl_type == "gym_default": self.cfg_ctrl["motor_ctrl_mode"] = "gym" self.cfg_ctrl["gain_space"] = "joint" self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.gym_default.ik_method self.cfg_ctrl["joint_prop_gains"] = torch.tensor( self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["joint_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["gripper_prop_gains"] = torch.tensor( self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device ).repeat((self.num_envs, 1)) elif ctrl_type == "joint_space_ik": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "joint" self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_ik.ik_method self.cfg_ctrl["joint_prop_gains"] = torch.tensor( self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["joint_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_inertial_comp"] = False elif ctrl_type == "joint_space_id": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "joint" self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_id.ik_method self.cfg_ctrl["joint_prop_gains"] = torch.tensor( self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["joint_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_inertial_comp"] = True elif ctrl_type == "task_space_impedance": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "task" self.cfg_ctrl["do_motion_ctrl"] = True self.cfg_ctrl["task_prop_gains"] = torch.tensor( self.cfg_task.ctrl.task_space_impedance.task_prop_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["task_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.task_space_impedance.task_deriv_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_inertial_comp"] = False self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_force_ctrl"] = False elif ctrl_type == "operational_space_motion": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "task" self.cfg_ctrl["do_motion_ctrl"] = True self.cfg_ctrl["task_prop_gains"] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.task_prop_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["task_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.task_deriv_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_inertial_comp"] = True self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_force_ctrl"] = False elif ctrl_type == "open_loop_force": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "task" self.cfg_ctrl["do_motion_ctrl"] = False self.cfg_ctrl["do_force_ctrl"] = True self.cfg_ctrl["force_ctrl_method"] = "open" self.cfg_ctrl["force_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device ).repeat((self.num_envs, 1)) elif ctrl_type == "closed_loop_force": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "task" self.cfg_ctrl["do_motion_ctrl"] = False self.cfg_ctrl["do_force_ctrl"] = True self.cfg_ctrl["force_ctrl_method"] = "closed" self.cfg_ctrl["wrench_prop_gains"] = torch.tensor( self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["force_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device ).repeat((self.num_envs, 1)) elif ctrl_type == "hybrid_force_motion": self.cfg_ctrl["motor_ctrl_mode"] = "manual" self.cfg_ctrl["gain_space"] = "task" self.cfg_ctrl["do_motion_ctrl"] = True self.cfg_ctrl["task_prop_gains"] = torch.tensor( self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["task_deriv_gains"] = torch.tensor( self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_inertial_comp"] = True self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["do_force_ctrl"] = True self.cfg_ctrl["force_ctrl_method"] = "closed" self.cfg_ctrl["wrench_prop_gains"] = torch.tensor( self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains, device=self.device, ).repeat((self.num_envs, 1)) self.cfg_ctrl["force_ctrl_axes"] = torch.tensor( self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes, device=self.device, ).repeat((self.num_envs, 1)) if add_to_stage: if self.cfg_ctrl["motor_ctrl_mode"] == "gym": for i in range(7): joint_prim = self._stage.GetPrimAtPath( self.default_zero_env_path + f"/franka/panda_link{i}/panda_joint{i+1}" ) drive = UsdPhysics.DriveAPI.Apply(joint_prim, "angular") drive.GetStiffnessAttr().Set( self.cfg_ctrl["joint_prop_gains"][0, i].item() * np.pi / 180 ) drive.GetDampingAttr().Set( self.cfg_ctrl["joint_deriv_gains"][0, i].item() * np.pi / 180 ) for i in range(2): joint_prim = self._stage.GetPrimAtPath( self.default_zero_env_path + f"/franka/panda_hand/panda_finger_joint{i+1}" ) drive = UsdPhysics.DriveAPI.Apply(joint_prim, "linear") drive.GetStiffnessAttr().Set( self.cfg_ctrl["gripper_deriv_gains"][0, i].item() ) drive.GetDampingAttr().Set( self.cfg_ctrl["gripper_deriv_gains"][0, i].item() ) elif self.cfg_ctrl["motor_ctrl_mode"] == "manual": for i in range(7): joint_prim = self._stage.GetPrimAtPath( self.default_zero_env_path + f"/franka/panda_link{i}/panda_joint{i+1}" ) joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "angular") drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None") drive.GetStiffnessAttr().Set(0.0) drive.GetDampingAttr().Set(0.0) for i in range(2): joint_prim = self._stage.GetPrimAtPath( self.default_zero_env_path + f"/franka/panda_hand/panda_finger_joint{i+1}" ) joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "linear") drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None") drive.GetStiffnessAttr().Set(0.0) drive.GetDampingAttr().Set(0.0) def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" # Get desired Jacobian if self.cfg_ctrl["jacobian_type"] == "geometric": self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian elif self.cfg_ctrl["jacobian_type"] == "analytic": self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian( fingertip_quat=self.fingertip_quat, fingertip_jacobian=self.fingertip_midpoint_jacobian, num_envs=self.num_envs, device=self.device, ) # Set PD joint pos target or joint torque if self.cfg_ctrl["motor_ctrl_mode"] == "gym": self._set_dof_pos_target() elif self.cfg_ctrl["motor_ctrl_mode"] == "manual": self._set_dof_torque() def _set_dof_pos_target(self): """Set Franka DOF position target to move fingertips towards target pose.""" self.ctrl_target_dof_pos = fc.compute_dof_pos_target( cfg_ctrl=self.cfg_ctrl, arm_dof_pos=self.arm_dof_pos, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, jacobian=self.fingertip_midpoint_jacobian_tf, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, device=self.device, ) self.frankas.set_joint_position_targets(positions=self.ctrl_target_dof_pos) def _set_dof_torque(self): """Set Franka DOF torque to move fingertips towards target pose.""" self.dof_torque = fc.compute_dof_torque( cfg_ctrl=self.cfg_ctrl, dof_pos=self.dof_pos, dof_vel=self.dof_vel, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, fingertip_midpoint_linvel=self.fingertip_midpoint_linvel, fingertip_midpoint_angvel=self.fingertip_midpoint_angvel, left_finger_force=self.left_finger_force, right_finger_force=self.right_finger_force, jacobian=self.fingertip_midpoint_jacobian_tf, arm_mass_matrix=self.arm_mass_matrix, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench, device=self.device, ) self.frankas.set_joint_efforts(efforts=self.dof_torque) def enable_gravity(self, gravity_mag): """Enable gravity.""" gravity = [0.0, 0.0, -gravity_mag] self._env._world._physics_sim_view.set_gravity( carb.Float3(gravity[0], gravity[1], gravity[2]) ) def disable_gravity(self): """Disable gravity.""" gravity = [0.0, 0.0, 0.0] self._env._world._physics_sim_view.set_gravity( carb.Float3(gravity[0], gravity[1], gravity[2]) )
26,838
Python
45.921329
148
0.588419
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for task class configurations. Used by Hydra. Defines template for task class YAML files. Not enforced. """ from __future__ import annotations from dataclasses import dataclass @dataclass class Sim: use_gpu_pipeline: bool # use GPU pipeline dt: float # timestep size gravity: list[float] # gravity vector @dataclass class Env: numObservations: int # number of observations per env; camel case required by VecTask numActions: int # number of actions per env; camel case required by VecTask numEnvs: int # number of envs; camel case required by VecTask @dataclass class Randomize: franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7) @dataclass class RL: pos_action_scale: list[ float ] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m rot_action_scale: list[ float ] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad force_action_scale: list[ float ] # scale on force targets (3), to convert [-1, 1] to +- x N torque_action_scale: list[ float ] # scale on torque targets (3), to convert [-1, 1] to +- x Nm clamp_rot: bool # clamp small values of rotation actions to zero clamp_rot_thresh: float # smallest acceptable value max_episode_length: int # max number of timesteps in each episode @dataclass class All: jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic} gripper_prop_gains: list[ float ] # proportional gains on left and right Franka gripper finger DOF position (2) gripper_deriv_gains: list[ float ] # derivative gains on left and right Franka gripper finger DOF position (2) @dataclass class GymDefault: joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7) joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7) @dataclass class JointSpaceIK: ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd} joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class JointSpaceID: ik_method: str joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class TaskSpaceImpedance: motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6) task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6) task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6) @dataclass class OperationalSpaceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] @dataclass class OpenLoopForce: force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6) @dataclass class ClosedLoopForce: force_ctrl_axes: list[bool] wrench_prop_gains: list[float] # proportional gains on Franka finger force (6) @dataclass class HybridForceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] force_ctrl_axes: list[bool] wrench_prop_gains: list[float] @dataclass class Ctrl: ctrl_type: str # {gym_default, # joint_space_ik, # joint_space_id, # task_space_impedance, # operational_space_motion, # open_loop_force, # closed_loop_force, # hybrid_force_motion} gym_default: GymDefault joint_space_ik: JointSpaceIK joint_space_id: JointSpaceID task_space_impedance: TaskSpaceImpedance operational_space_motion: OperationalSpaceMotion open_loop_force: OpenLoopForce closed_loop_force: ClosedLoopForce hybrid_force_motion: HybridForceMotion @dataclass class FactorySchemaConfigTask: name: str physics_engine: str sim: Sim env: Env rl: RL ctrl: Ctrl
5,517
Python
30.895954
130
0.719413
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt place task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPlace """ import asyncio import hydra import math import omegaconf import torch from typing import Tuple import omni.kit from omni.isaac.core.simulation_context import SimulationContext import omni.isaac.core.utils.torch as torch_utils from omni.isaac.core.utils.torch.transformations import tf_combine import omniisaacgymenvs.tasks.factory.factory_control as fc from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from omniisaacgymenvs.tasks.factory.factory_schema_config_task import ( FactorySchemaConfigTask, ) class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, name, sim_config, env, offset=None) -> None: """Initialize environment superclass. Initialize instance variables.""" super().__init__(name, sim_config, env) self._get_task_yaml_params() def _get_task_yaml_params(self) -> None: """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg) self.max_episode_length = ( self.cfg_task.rl.max_episode_length ) # required instance var for VecTask asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][ "factory" ][ "yaml" ] # strip superfluous nesting ppo_path = "train/FactoryTaskNutBoltPlacePPO.yaml" # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting def post_reset(self) -> None: """Reset the world. Called only once, before simulation begins.""" if self.cfg_task.sim.disable_gravity: self.disable_gravity() self.acquire_base_tensors() self._acquire_task_tensors() self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() # Reset all envs indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device) asyncio.ensure_future( self.reset_idx_async(indices, randomize_gripper_pose=False) ) def _acquire_task_tensors(self) -> None: """Acquire tensors.""" # Nut-bolt tensors self.nut_base_pos_local = self.bolt_head_heights * torch.tensor( [0.0, 0.0, 1.0], device=self.device ).repeat((self.num_envs, 1)) bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths self.bolt_tip_pos_local = bolt_heights * torch.tensor( [0.0, 0.0, 1.0], device=self.device ).repeat((self.num_envs, 1)) # Keypoint tensors self.keypoint_offsets = ( self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale ) self.keypoints_nut = torch.zeros( (self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device, ) self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device) self.identity_quat = ( torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) .unsqueeze(0) .repeat(self.num_envs, 1) ) self.actions = torch.zeros( (self.num_envs, self.num_actions), device=self.device ) def pre_physics_step(self, actions) -> None: """Reset environments. Apply actions from policy. Simulation step called after this method.""" if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids, randomize_gripper_pose=True) self.actions = actions.clone().to( self.device ) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets( actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True ) async def pre_physics_step_async(self, actions) -> None: """Reset environments. Apply actions from policy. Simulation step called after this method.""" if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: await self.reset_idx_async(env_ids, randomize_gripper_pose=True) self.actions = actions.clone().to( self.device ) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets( actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True, ) def reset_idx(self, env_ids, randomize_gripper_pose) -> None: """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) # Close gripper onto nut self.disable_gravity() # to prevent nut from falling self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps) self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag) if randomize_gripper_pose: self._randomize_gripper_pose( env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps ) self._reset_buffers(env_ids) async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None: """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) # Close gripper onto nut self.disable_gravity() # to prevent nut from falling await self._close_gripper_async( sim_steps=self.cfg_task.env.num_gripper_close_sim_steps ) self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag) if randomize_gripper_pose: await self._randomize_gripper_pose_async( env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps ) self._reset_buffers(env_ids) def _reset_franka(self, env_ids) -> None: """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat( ( torch.tensor( self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device, ).repeat((len(env_ids), 1)), (self.nut_widths_max * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max * 0.5) * 1.1, ), # buffer on gripper DOF pos to prevent initial contact dim=-1, ) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) def _reset_object(self, env_ids) -> None: """Reset root states of nut and bolt.""" # Randomize root state of nut within gripper self.nut_pos[env_ids, 0] = 0.0 self.nut_pos[env_ids, 1] = 0.0 fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset nut_base_pos_local = self.bolt_head_heights.squeeze(-1) self.nut_pos[env_ids, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local nut_noise_pos_in_gripper = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag( torch.tensor( self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device ) ) self.nut_pos[env_ids, :] += nut_noise_pos_in_gripper[env_ids] nut_rot_euler = torch.tensor( [0.0, 0.0, math.pi * 0.5], device=self.device ).repeat(len(env_ids), 1) nut_noise_rot_in_gripper = 2 * ( torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper nut_rot_euler[:, 2] += nut_noise_rot_in_gripper nut_rot_quat = torch_utils.quat_from_euler_xyz( nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2] ) self.nut_quat[env_ids, :] = nut_rot_quat self.nut_linvel[env_ids, :] = 0.0 self.nut_angvel[env_ids, :] = 0.0 indices = env_ids.to(dtype=torch.int32) self.nuts.set_world_poses( self.nut_pos[env_ids] + self.env_pos[env_ids], self.nut_quat[env_ids], indices, ) self.nuts.set_velocities( torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1), indices, ) # Randomize root state of bolt bolt_noise_xy = 2 * ( torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor( self.cfg_task.randomize.bolt_pos_xy_noise, dtype=torch.float32, device=self.device, ) ) self.bolt_pos[env_ids, 0] = ( self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0] ) self.bolt_pos[env_ids, 1] = ( self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1] ) self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height self.bolt_quat[env_ids, :] = torch.tensor( [1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device ).repeat(len(env_ids), 1) indices = env_ids.to(dtype=torch.int32) self.bolts.set_world_poses( self.bolt_pos[env_ids] + self.env_pos[env_ids], self.bolt_quat[env_ids], indices, ) def _reset_buffers(self, env_ids) -> None: """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _apply_actions_as_ctrl_targets( self, actions, ctrl_target_gripper_dof_pos, do_scale ) -> None: """Apply actions from policy as position/rotation/force/torque targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag( torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device) ) self.ctrl_target_fingertip_midpoint_pos = ( self.fingertip_midpoint_pos + pos_actions ) # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag( torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device) ) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat( self.num_envs, 1 ), ) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul( rot_actions_quat, self.fingertip_midpoint_quat ) if self.cfg_ctrl["do_force_ctrl"]: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor( self.cfg_task.rl.force_action_scale, device=self.device ) ) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor( self.cfg_task.rl.torque_action_scale, device=self.device ) ) self.ctrl_target_fingertip_contact_wrench = torch.cat( (force_actions, torque_actions), dim=-1 ) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def post_physics_step( self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 if self._env._world.is_playing(): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.get_observations() self.calculate_metrics() self.get_extras() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras def _refresh_task_tensors(self) -> None: """Refresh tensors.""" # Compute pos of keypoints on gripper, nut, and bolt in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_nut[:, idx] = tf_combine( self.nut_quat, self.nut_pos, self.identity_quat, (keypoint_offset + self.nut_base_pos_local), )[1] self.keypoints_bolt[:, idx] = tf_combine( self.bolt_quat, self.bolt_pos, self.identity_quat, (keypoint_offset + self.bolt_tip_pos_local), )[1] def get_observations(self) -> dict: """Compute observations.""" # Shallow copies of tensors obs_tensors = [ self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_pos, self.nut_quat, self.bolt_pos, self.bolt_quat, ] if self.cfg_task.rl.add_obs_bolt_tip_pos: obs_tensors += [self.bolt_tip_pos_local] self.obs_buf = torch.cat( obs_tensors, dim=-1 ) # shape = (num_envs, num_observations) observations = {self.frankas.name: {"obs_buf": self.obs_buf}} return observations def calculate_metrics(self) -> None: """Update reset and reward buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self) -> None: """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where( self.progress_buf[:] >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf, ) def _update_rew_buf(self) -> None: """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = ( torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale ) self.rew_buf[:] = ( keypoint_reward * self.cfg_task.rl.keypoint_reward_scale - action_penalty * self.cfg_task.rl.action_penalty_scale ) # In this policy, episode length is constant across all envs is_last_step = self.progress_buf[0] == self.max_episode_length - 1 if is_last_step: # Check if nut is close enough to bolt is_nut_close_to_bolt = self._check_nut_close_to_bolt() self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus self.extras["successes"] = torch.mean(is_nut_close_to_bolt.float()) def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor: """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = ( torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 ) return keypoint_offsets def _get_keypoint_dist(self) -> torch.Tensor: """Get keypoint distance between nut and bolt.""" keypoint_dist = torch.sum( torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1 ) return keypoint_dist def _randomize_gripper_pose(self, env_ids, sim_steps) -> None: """Move gripper to random pose.""" # Step once to update PhysX with new joint positions and velocities from reset_franka() SimulationContext.step(self._env._world, render=True) # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device ) + torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device ) self.ctrl_target_fingertip_midpoint_pos = ( self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1 ) ) fingertip_midpoint_pos_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device ) ) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = ( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device, ) .unsqueeze(0) .repeat(self.num_envs, 1) ) fingertip_midpoint_rot_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device ) ) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2], ) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets( actions=actions, ctrl_target_gripper_dof_pos=0.0, do_scale=False, ) SimulationContext.step(self._env._world, render=True) self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) # Step once to update PhysX with new joint velocities SimulationContext.step(self._env._world, render=True) async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None: """Move gripper to random pose.""" # Step once to update PhysX with new joint positions and velocities from reset_franka() self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device ) + torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device ) self.ctrl_target_fingertip_midpoint_pos = ( self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1 ) ) fingertip_midpoint_pos_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device ) ) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = ( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device, ) .unsqueeze(0) .repeat(self.num_envs, 1) ) fingertip_midpoint_rot_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device ) ) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2], ) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets( actions=actions, ctrl_target_gripper_dof_pos=0.0, do_scale=False, ) self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) # Step once to update PhysX with new joint velocities self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() def _close_gripper(self, sim_steps) -> None: """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps) -> None: """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros( (self.num_envs, 6), device=self.device ) # No hand motion # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets( delta_hand_pose, gripper_dof_pos, do_scale=False ) SimulationContext.step(self._env._world, render=True) async def _close_gripper_async(self, sim_steps) -> None: """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" await self._move_gripper_to_dof_pos_async( gripper_dof_pos=0.0, sim_steps=sim_steps ) async def _move_gripper_to_dof_pos_async( self, gripper_dof_pos, sim_steps ) -> None: """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros( (self.num_envs, 6), device=self.device ) # No hand motion # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets( delta_hand_pose, gripper_dof_pos, do_scale=False ) self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() def _check_nut_close_to_bolt(self) -> torch.Tensor: """Check if nut is close to bolt.""" keypoint_dist = torch.norm( self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1 ) is_nut_close_to_bolt = torch.where( torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf), ) return is_nut_close_to_bolt
29,034
Python
37.868809
131
0.594303
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_env.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for environment class configurations. Used by Hydra. Defines template for environment class YAML files. """ from dataclasses import dataclass @dataclass class Sim: disable_franka_collisions: bool # disable collisions between Franka and objects @dataclass class Env: env_name: str # name of scene @dataclass class FactorySchemaConfigEnv: sim: Sim env: Env
1,960
Python
36.711538
84
0.776531
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for task classes. Inherits ABC class. Inherited by task classes. Defines template for task classes. """ from abc import ABC, abstractmethod class FactoryABCTask(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize environment superclass.""" pass @abstractmethod def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def _acquire_task_tensors(self): """Acquire tensors.""" pass @abstractmethod def _refresh_task_tensors(self): """Refresh tensors.""" pass @abstractmethod def pre_physics_step(self): """Reset environments. Apply actions from policy as controller targets. Simulation step called after this method.""" pass @abstractmethod def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" pass @abstractmethod def get_observations(self): """Compute observations.""" pass @abstractmethod def calculate_metrics(self): """Detect successes and failures. Update reward and reset buffers.""" pass @abstractmethod def _update_rew_buf(self): """Compute reward at current timestep.""" pass @abstractmethod def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass @abstractmethod def reset_idx(self): """Reset specified environments.""" pass @abstractmethod def _reset_franka(self): """Reset DOF states and DOF targets of Franka.""" pass @abstractmethod def _reset_object(self): """Reset root state of object.""" pass @abstractmethod def _reset_buffers(self): """Reset buffers.""" pass
3,492
Python
31.342592
124
0.69559
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_env.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for environment classes. Inherits ABC class. Inherited by environment classes. Defines template for environment classes. """ from abc import ABC, abstractmethod class FactoryABCEnv(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize base superclass. Acquire tensors.""" pass @abstractmethod def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def set_up_scene(self): """Set env options. Import assets. Create actors.""" pass @abstractmethod def _import_env_assets(self): """Set asset options. Import assets.""" pass @abstractmethod def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass
2,489
Python
37.906249
95
0.73644
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt screw task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltScrew """ import hydra import math import omegaconf import torch from typing import Tuple import omni.isaac.core.utils.torch as torch_utils import omniisaacgymenvs.tasks.factory.factory_control as fc from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from omniisaacgymenvs.tasks.factory.factory_schema_config_task import ( FactorySchemaConfigTask, ) class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, name, sim_config, env, offset=None) -> None: """Initialize environment superclass. Initialize instance variables.""" super().__init__(name, sim_config, env) self._get_task_yaml_params() def _get_task_yaml_params(self) -> None: """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg) self.max_episode_length = ( self.cfg_task.rl.max_episode_length ) # required instance var for VecTask asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][ "factory" ][ "yaml" ] # strip superfluous nesting ppo_path = "train/FactoryTaskNutBoltScrewPPO.yaml" # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting def post_reset(self) -> None: """Reset the world. Called only once, before simulation begins.""" if self.cfg_task.sim.disable_gravity: self.disable_gravity() self.acquire_base_tensors() self._acquire_task_tensors() self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() # Reset all envs indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device) self.reset_idx(indices) def _acquire_task_tensors(self) -> None: """Acquire tensors.""" target_heights = ( self.cfg_base.env.table_height + self.bolt_head_heights + self.nut_heights * 0.5 ) self.target_pos = target_heights * torch.tensor( [0.0, 0.0, 1.0], device=self.device ).repeat((self.num_envs, 1)) self.identity_quat = ( torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) .unsqueeze(0) .repeat(self.num_envs, 1) ) self.actions = torch.zeros( (self.num_envs, self.num_actions), device=self.device ) def pre_physics_step(self, actions) -> None: """Reset environments. Apply actions from policy. Simulation step called after this method.""" if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to( self.device ) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets( actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True ) def reset_idx(self, env_ids) -> None: """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self._reset_buffers(env_ids) def _reset_franka(self, env_ids) -> None: """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat( ( torch.tensor( self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device, ).repeat((len(env_ids), 1)), (self.nut_widths_max[env_ids] * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max[env_ids] * 0.5) * 1.1, ), # buffer on gripper DOF pos to prevent initial contact dim=-1, ) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) def _reset_object(self, env_ids) -> None: """Reset root state of nut.""" nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids] self.nut_pos[env_ids, :] = nut_pos * torch.tensor( [0.0, 0.0, 1.0], device=self.device ).repeat(len(env_ids), 1) nut_rot = ( self.cfg_task.randomize.nut_rot_initial * torch.ones((len(env_ids), 1), device=self.device) * math.pi / 180.0 ) self.nut_quat[env_ids, :] = torch.cat( ( torch.cos(nut_rot * 0.5), torch.zeros((len(env_ids), 1), device=self.device), torch.zeros((len(env_ids), 1), device=self.device), torch.sin(nut_rot * 0.5), ), dim=-1, ) self.nut_linvel[env_ids, :] = 0.0 self.nut_angvel[env_ids, :] = 0.0 indices = env_ids.to(dtype=torch.int32) self.nuts.set_world_poses( self.nut_pos[env_ids] + self.env_pos[env_ids], self.nut_quat[env_ids], indices, ) self.nuts.set_velocities( torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1), indices, ) def _reset_buffers(self, env_ids) -> None: """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _apply_actions_as_ctrl_targets( self, actions, ctrl_target_gripper_dof_pos, do_scale ) -> None: """Apply actions from policy as position/rotation/force/torque targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if self.cfg_task.rl.unidirectional_pos: pos_actions[:, 2] = -(pos_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: pos_actions = pos_actions @ torch.diag( torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device) ) self.ctrl_target_fingertip_midpoint_pos = ( self.fingertip_midpoint_pos + pos_actions ) # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if self.cfg_task.rl.unidirectional_rot: rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: rot_actions = rot_actions @ torch.diag( torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device) ) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat( self.num_envs, 1 ), ) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul( rot_actions_quat, self.fingertip_midpoint_quat ) if self.cfg_ctrl["do_force_ctrl"]: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if self.cfg_task.rl.unidirectional_force: force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor( self.cfg_task.rl.force_action_scale, device=self.device ) ) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor( self.cfg_task.rl.torque_action_scale, device=self.device ) ) self.ctrl_target_fingertip_contact_wrench = torch.cat( (force_actions, torque_actions), dim=-1 ) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def post_physics_step( self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 if self._env._world.is_playing(): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.get_observations() self.calculate_metrics() self.get_extras() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras def _refresh_task_tensors(self) -> None: """Refresh tensors.""" self.fingerpad_midpoint_pos = fc.translate_along_local_z( pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length - self.asset_info_franka_table.franka_fingerpad_length * 0.5, device=self.device, ) self.finger_nut_keypoint_dist = self._get_keypoint_dist(body="finger_nut") self.nut_keypoint_dist = self._get_keypoint_dist(body="nut") self.nut_dist_to_target = torch.norm( self.target_pos - self.nut_com_pos, p=2, dim=-1 ) # distance between nut COM and target self.nut_dist_to_fingerpads = torch.norm( self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1 ) # distance between nut COM and midpoint between centers of fingerpads self.was_success = torch.zeros_like(self.progress_buf, dtype=torch.bool) def get_observations(self) -> dict: """Compute observations.""" # Shallow copies of tensors obs_tensors = [ self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_com_pos, self.nut_com_quat, self.nut_com_linvel, self.nut_com_angvel, ] if self.cfg_task.rl.add_obs_finger_force: obs_tensors += [self.left_finger_force, self.right_finger_force] else: obs_tensors += [ torch.zeros_like(self.left_finger_force), torch.zeros_like(self.right_finger_force), ] self.obs_buf = torch.cat( obs_tensors, dim=-1 ) # shape = (num_envs, num_observations) observations = {self.frankas.name: {"obs_buf": self.obs_buf}} return observations def calculate_metrics(self) -> None: """Update reset and reward buffers.""" # Get successful and failed envs at current timestep curr_successes = self._get_curr_successes() curr_failures = self._get_curr_failures(curr_successes) self._update_reset_buf(curr_successes, curr_failures) self._update_rew_buf(curr_successes) if torch.any(self.is_expired): self.extras["successes"] = torch.mean(curr_successes.float()) def _update_reset_buf(self, curr_successes, curr_failures) -> None: """Assign environments for reset if successful or failed.""" self.reset_buf[:] = self.is_expired def _update_rew_buf(self, curr_successes) -> None: """Compute reward at current timestep.""" keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist) action_penalty = torch.norm(self.actions, p=2, dim=-1) self.rew_buf[:] = ( keypoint_reward * self.cfg_task.rl.keypoint_reward_scale - action_penalty * self.cfg_task.rl.action_penalty_scale + curr_successes * self.cfg_task.rl.success_bonus ) def _get_keypoint_dist(self, body) -> torch.Tensor: """Get keypoint distance.""" axis_length = ( self.asset_info_franka_table.franka_hand_length + self.asset_info_franka_table.franka_finger_length ) if body == "finger" or body == "nut": # Keypoint distance between finger/nut and target if body == "finger": self.keypoint1 = self.fingertip_midpoint_pos self.keypoint2 = fc.translate_along_local_z( pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device, ) elif body == "nut": self.keypoint1 = self.nut_com_pos self.keypoint2 = fc.translate_along_local_z( pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device, ) self.keypoint1_targ = self.target_pos self.keypoint2_targ = self.keypoint1_targ + torch.tensor( [0.0, 0.0, axis_length], device=self.device ) elif body == "finger_nut": # Keypoint distance between finger and nut self.keypoint1 = self.fingerpad_midpoint_pos self.keypoint2 = fc.translate_along_local_z( pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device, ) self.keypoint1_targ = self.nut_com_pos self.keypoint2_targ = fc.translate_along_local_z( pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device, ) self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0 self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0 self.keypoint3_targ = ( self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0 ) self.keypoint4_targ = ( self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0 ) keypoint_dist = ( torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1) + torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1) + torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1) + torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1) ) return keypoint_dist def _get_curr_successes(self) -> torch.Tensor: """Get success mask at current timestep.""" curr_successes = torch.zeros( (self.num_envs,), dtype=torch.bool, device=self.device ) # If nut is close enough to target pos is_close = torch.where( self.nut_dist_to_target < self.thread_pitches.squeeze(-1) * 5, torch.ones_like(curr_successes), torch.zeros_like(curr_successes), ) curr_successes = torch.logical_or(curr_successes, is_close) return curr_successes def _get_curr_failures(self, curr_successes) -> torch.Tensor: """Get failure mask at current timestep.""" curr_failures = torch.zeros( (self.num_envs,), dtype=torch.bool, device=self.device ) # If max episode length has been reached self.is_expired = torch.where( self.progress_buf[:] >= self.cfg_task.rl.max_episode_length, torch.ones_like(curr_failures), curr_failures, ) # If nut is too far from target pos self.is_far = torch.where( self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh, torch.ones_like(curr_failures), curr_failures, ) # If nut has slipped (distance-based definition) self.is_slipped = torch.where( self.nut_dist_to_fingerpads > self.asset_info_franka_table.franka_fingerpad_length * 0.5 + self.nut_heights.squeeze(-1) * 0.5, torch.ones_like(curr_failures), curr_failures, ) self.is_slipped = torch.logical_and( self.is_slipped, torch.logical_not(curr_successes) ) # ignore slip if successful # If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt) self.is_fallen = torch.logical_and( torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1) > self.bolt_widths.squeeze(-1) * 0.5, self.nut_com_pos[:, 2] < self.cfg_base.env.table_height + self.bolt_head_heights.squeeze(-1) + self.bolt_shank_lengths.squeeze(-1) + self.nut_heights.squeeze(-1) * 0.5, ) curr_failures = torch.logical_or(curr_failures, self.is_expired) curr_failures = torch.logical_or(curr_failures, self.is_far) curr_failures = torch.logical_or(curr_failures, self.is_slipped) curr_failures = torch.logical_or(curr_failures, self.is_fallen) return curr_failures
20,039
Python
37.390805
131
0.589051
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt pick task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPick """ import asyncio import hydra import omegaconf import torch import omni.kit from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.torch.transformations import tf_combine from typing import Tuple import omni.isaac.core.utils.torch as torch_utils import omniisaacgymenvs.tasks.factory.factory_control as fc from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from omniisaacgymenvs.tasks.factory.factory_schema_config_task import ( FactorySchemaConfigTask, ) class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, name, sim_config, env, offset=None) -> None: """Initialize environment superclass. Initialize instance variables.""" super().__init__(name, sim_config, env) self._get_task_yaml_params() def _get_task_yaml_params(self) -> None: """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg) self.max_episode_length = ( self.cfg_task.rl.max_episode_length ) # required instance var for VecTask asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][ "factory" ][ "yaml" ] # strip superfluous nesting ppo_path = "train/FactoryTaskNutBoltPickPPO.yaml" # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting def post_reset(self) -> None: """Reset the world. Called only once, before simulation begins.""" if self.cfg_task.sim.disable_gravity: self.disable_gravity() self.acquire_base_tensors() self._acquire_task_tensors() self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() # Reset all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) asyncio.ensure_future( self.reset_idx_async(indices, randomize_gripper_pose=False) ) def _acquire_task_tensors(self) -> None: """Acquire tensors.""" # Grasp pose tensors nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor( [0.0, 0.0, 1.0], device=self.device ).repeat((self.num_envs, 1)) self.nut_grasp_quat_local = ( torch.tensor([0.0, 0.0, 1.0, 0.0], device=self.device) .unsqueeze(0) .repeat(self.num_envs, 1) ) # Keypoint tensors self.keypoint_offsets = ( self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale ) self.keypoints_gripper = torch.zeros( (self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device, ) self.keypoints_nut = torch.zeros_like( self.keypoints_gripper, device=self.device ) self.identity_quat = ( torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) .unsqueeze(0) .repeat(self.num_envs, 1) ) self.actions = torch.zeros( (self.num_envs, self.num_actions), device=self.device ) def pre_physics_step(self, actions) -> None: """Reset environments. Apply actions from policy. Simulation step called after this method.""" if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids, randomize_gripper_pose=True) self.actions = actions.clone().to( self.device ) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets( actions=self.actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=True, ) async def pre_physics_step_async(self, actions) -> None: """Reset environments. Apply actions from policy. Simulation step called after this method.""" if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: await self.reset_idx_async(env_ids, randomize_gripper_pose=True) self.actions = actions.clone().to( self.device ) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets( actions=self.actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=True, ) def reset_idx(self, env_ids, randomize_gripper_pose) -> None: """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) if randomize_gripper_pose: self._randomize_gripper_pose( env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps ) self._reset_buffers(env_ids) async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None: """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) if randomize_gripper_pose: await self._randomize_gripper_pose_async( env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps ) self._reset_buffers(env_ids) def _reset_franka(self, env_ids) -> None: """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat( ( torch.tensor( self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device, ), torch.tensor( [self.asset_info_franka_table.franka_gripper_width_max], device=self.device, ), torch.tensor( [self.asset_info_franka_table.franka_gripper_width_max], device=self.device, ), ), dim=-1, ) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) def _reset_object(self, env_ids) -> None: """Reset root states of nut and bolt.""" # Randomize root state of nut nut_noise_xy = 2 * ( torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] nut_noise_xy = nut_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.nut_pos_xy_noise, device=self.device) ) self.nut_pos[env_ids, 0] = ( self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[env_ids, 0] ) self.nut_pos[env_ids, 1] = ( self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[env_ids, 1] ) self.nut_pos[ env_ids, 2 ] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1) self.nut_quat[env_ids, :] = torch.tensor( [1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device ).repeat(len(env_ids), 1) self.nut_linvel[env_ids, :] = 0.0 self.nut_angvel[env_ids, :] = 0.0 indices = env_ids.to(dtype=torch.int32) self.nuts.set_world_poses( self.nut_pos[env_ids] + self.env_pos[env_ids], self.nut_quat[env_ids], indices, ) self.nuts.set_velocities( torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1), indices, ) # Randomize root state of bolt bolt_noise_xy = 2 * ( torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device) ) self.bolt_pos[env_ids, 0] = ( self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0] ) self.bolt_pos[env_ids, 1] = ( self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1] ) self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height self.bolt_quat[env_ids, :] = torch.tensor( [1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device ).repeat(len(env_ids), 1) indices = env_ids.to(dtype=torch.int32) self.bolts.set_world_poses( self.bolt_pos[env_ids] + self.env_pos[env_ids], self.bolt_quat[env_ids], indices, ) def _reset_buffers(self, env_ids) -> None: """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _apply_actions_as_ctrl_targets( self, actions, ctrl_target_gripper_dof_pos, do_scale ) -> None: """Apply actions from policy as position/rotation/force/torque targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag( torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device) ) self.ctrl_target_fingertip_midpoint_pos = ( self.fingertip_midpoint_pos + pos_actions ) # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag( torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device) ) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat( self.num_envs, 1 ), ) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul( rot_actions_quat, self.fingertip_midpoint_quat ) if self.cfg_ctrl["do_force_ctrl"]: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor( self.cfg_task.rl.force_action_scale, device=self.device ) ) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor( self.cfg_task.rl.torque_action_scale, device=self.device ) ) self.ctrl_target_fingertip_contact_wrench = torch.cat( (force_actions, torque_actions), dim=-1 ) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def post_physics_step( self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 if self._env._world.is_playing(): # In this policy, episode length is constant is_last_step = self.progress_buf[0] == self.max_episode_length - 1 if is_last_step: # At this point, robot has executed RL policy. Now close gripper and lift (open-loop) if self.cfg_task.env.close_and_lift: self._close_gripper( sim_steps=self.cfg_task.env.num_gripper_close_sim_steps ) self._lift_gripper( franka_gripper_width=0.0, lift_distance=0.3, sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps, ) self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.get_observations() self.get_states() self.calculate_metrics() self.get_extras() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras async def post_physics_step_async(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 if self._env._world.is_playing(): # In this policy, episode length is constant is_last_step = self.progress_buf[0] == self.max_episode_length - 1 if self.cfg_task.env.close_and_lift: # At this point, robot has executed RL policy. Now close gripper and lift (open-loop) if is_last_step: await self._close_gripper_async( sim_steps=self.cfg_task.env.num_gripper_close_sim_steps ) await self._lift_gripper_async( sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps ) self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.get_observations() self.get_states() self.calculate_metrics() self.get_extras() return self.obs_buf, self.rew_buf, self.reset_buf, self.extras def _refresh_task_tensors(self): """Refresh tensors.""" # Compute pose of nut grasping frame self.nut_grasp_quat, self.nut_grasp_pos = tf_combine( self.nut_quat, self.nut_pos, self.nut_grasp_quat_local, self.nut_grasp_pos_local, ) # Compute pos of keypoints on gripper and nut in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_gripper[:, idx] = tf_combine( self.fingertip_midpoint_quat, self.fingertip_midpoint_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1), )[1] self.keypoints_nut[:, idx] = tf_combine( self.nut_grasp_quat, self.nut_grasp_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1), )[1] def get_observations(self) -> dict: """Compute observations.""" # Shallow copies of tensors obs_tensors = [ self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_grasp_pos, self.nut_grasp_quat, ] self.obs_buf = torch.cat( obs_tensors, dim=-1 ) # shape = (num_envs, num_observations) observations = {self.frankas.name: {"obs_buf": self.obs_buf}} return observations def calculate_metrics(self) -> None: """Update reward and reset buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self) -> None: """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where( self.progress_buf[:] >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf, ) def _update_rew_buf(self) -> None: """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = ( torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale ) self.rew_buf[:] = ( keypoint_reward * self.cfg_task.rl.keypoint_reward_scale - action_penalty * self.cfg_task.rl.action_penalty_scale ) # In this policy, episode length is constant across all envs is_last_step = self.progress_buf[0] == self.max_episode_length - 1 if is_last_step: # Check if nut is picked up and above table lift_success = self._check_lift_success(height_multiple=3.0) self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus self.extras["successes"] = torch.mean(lift_success.float()) def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor: """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = ( torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 ) return keypoint_offsets def _get_keypoint_dist(self) -> torch.Tensor: """Get keypoint distance.""" keypoint_dist = torch.sum( torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1 ) return keypoint_dist def _close_gripper(self, sim_steps=20) -> None: """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20) -> None: """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros( (self.num_envs, 6), device=self.device ) # No hand motion self._apply_actions_as_ctrl_targets( delta_hand_pose, gripper_dof_pos, do_scale=False ) # Step sim for _ in range(sim_steps): SimulationContext.step(self._env._world, render=True) def _lift_gripper( self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20 ) -> None: """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets( delta_hand_pose, franka_gripper_width, do_scale=False ) SimulationContext.step(self._env._world, render=True) async def _close_gripper_async(self, sim_steps=20) -> None: """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" await self._move_gripper_to_dof_pos_async( gripper_dof_pos=0.0, sim_steps=sim_steps ) async def _move_gripper_to_dof_pos_async( self, gripper_dof_pos, sim_steps=20 ) -> None: """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) # No hand motion self._apply_actions_as_ctrl_targets( delta_hand_pose, gripper_dof_pos, do_scale=False ) # Step sim for _ in range(sim_steps): self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() async def _lift_gripper_async( self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20 ) -> None: """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets( delta_hand_pose, franka_gripper_width, do_scale=False ) self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() def _check_lift_success(self, height_multiple) -> torch.Tensor: """Check if nut is above table by more than specified multiple times height of nut.""" lift_success = torch.where( self.nut_pos[:, 2] > self.cfg_base.env.table_height + self.nut_heights.squeeze(-1) * height_multiple, torch.ones((self.num_envs,), device=self.device), torch.zeros((self.num_envs,), device=self.device), ) return lift_success def _randomize_gripper_pose(self, env_ids, sim_steps) -> None: """Move gripper to random pose.""" # step once to update physx with the newly set joint positions from reset_franka() SimulationContext.step(self._env._world, render=True) # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device ) + torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device ) self.ctrl_target_fingertip_midpoint_pos = ( self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1 ) ) fingertip_midpoint_pos_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device ) ) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = ( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device, ) .unsqueeze(0) .repeat(self.num_envs, 1) ) fingertip_midpoint_rot_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device ) ) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2], ) # Step sim and render for _ in range(sim_steps): if not self._env._world.is_playing(): return self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets( actions=actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=False, ) SimulationContext.step(self._env._world, render=True) self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) # step once to update physx with the newly set joint velocities SimulationContext.step(self._env._world, render=True) async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None: """Move gripper to random pose.""" # step once to update physx with the newly set joint positions from reset_franka() await omni.kit.app.get_app().next_update_async() # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device ) + torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device ) self.ctrl_target_fingertip_midpoint_pos = ( self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1 ) ) fingertip_midpoint_pos_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device ) ) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = ( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device, ) .unsqueeze(0) .repeat(self.num_envs, 1) ) fingertip_midpoint_rot_noise = 2 * ( torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5 ) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor( self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device ) ) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2], ) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros( (self.num_envs, self.cfg_task.env.numActions), device=self.device ) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets( actions=actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=False, ) self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) indices = env_ids.to(dtype=torch.int32) self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices) # step once to update physx with the newly set joint velocities self._env._world.physics_sim_view.flush() await omni.kit.app.get_app().next_update_async()
31,568
Python
37.926017
131
0.589268
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for base class. Inherits ABC class. Inherited by base class. Defines template for base class. """ from abc import ABC, abstractmethod class FactoryABCBase(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize VecTask superclass.""" pass @abstractmethod def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" pass @abstractmethod def refresh_base_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass @abstractmethod def parse_controller_spec(self): """Parse controller specification into lower-level controller configuration.""" pass @abstractmethod def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" pass @abstractmethod def enable_gravity(self): """Enable gravity.""" pass @abstractmethod def disable_gravity(self): """Disable gravity.""" pass
2,843
Python
35
88
0.721069
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for base class configuration. Used by Hydra. Defines template for base class YAML file. """ from dataclasses import dataclass @dataclass class Mode: export_scene: bool # export scene to USD export_states: bool # export states to NPY @dataclass class Sim: dt: float # timestep size (default = 1.0 / 60.0) num_substeps: int # number of substeps (default = 2) num_pos_iters: int # number of position iterations for PhysX TGS solver (default = 4) num_vel_iters: int # number of velocity iterations for PhysX TGS solver (default = 1) gravity_mag: float # magnitude of gravitational acceleration add_damping: bool # add damping to stabilize gripper-object interactions @dataclass class Env: env_spacing: float # lateral offset between envs franka_depth: float # depth offset of Franka base relative to env origin table_height: float # height of table franka_friction: float # coefficient of friction associated with Franka table_friction: float # coefficient of friction associated with table @dataclass class FactorySchemaConfigBase: mode: Mode sim: Sim env: Env
2,724
Python
39.073529
90
0.757342
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_env_nut_bolt.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for nut-bolt env. Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed. Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml. """ import hydra import numpy as np import torch from omni.isaac.core.prims import RigidPrimView, XFormPrim from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage from omniisaacgymenvs.tasks.base.rl_task import RLTask from omni.physx.scripts import physicsUtils, utils from omniisaacgymenvs.robots.articulations.views.factory_franka_view import ( FactoryFrankaView, ) import omniisaacgymenvs.tasks.factory.factory_control as fc from omniisaacgymenvs.tasks.factory.factory_base import FactoryBase from omniisaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from omniisaacgymenvs.tasks.factory.factory_schema_config_env import ( FactorySchemaConfigEnv, ) class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv): def __init__(self, name, sim_config, env) -> None: """Initialize base superclass. Initialize instance variables.""" super().__init__(name, sim_config, env) self._get_env_yaml_params() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv) config_path = ( "task/FactoryEnvNutBolt.yaml" # relative to Hydra search path (cfg dir) ) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env["task"] # strip superfluous nesting asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][ "factory" ][ "yaml" ] # strip superfluous nesting def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._num_observations = self._task_cfg["env"]["numObservations"] self._num_actions = self._task_cfg["env"]["numActions"] self._env_spacing = self.cfg_base["env"]["env_spacing"] self._get_env_yaml_params() def set_up_scene(self, scene) -> None: """Import assets. Add to scene.""" # Increase buffer size to prevent overflow for Place and Screw tasks physxSceneAPI = self._env._world.get_physics_context()._physx_scene_api physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(256 * 1024 * 1024) self.import_franka_assets(add_to_stage=True) self.create_nut_bolt_material() RLTask.set_up_scene(self, scene, replicate_physics=False) self._import_env_assets(add_to_stage=True) self.frankas = FactoryFrankaView( prim_paths_expr="/World/envs/.*/franka", name="frankas_view" ) self.nuts = RigidPrimView( prim_paths_expr="/World/envs/.*/nut/factory_nut.*", name="nuts_view", track_contact_forces=True, ) self.bolts = RigidPrimView( prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*", name="bolts_view", track_contact_forces=True, ) scene.add(self.nuts) scene.add(self.bolts) scene.add(self.frankas) scene.add(self.frankas._hands) scene.add(self.frankas._lfingers) scene.add(self.frankas._rfingers) scene.add(self.frankas._fingertip_centered) return def initialize_views(self, scene) -> None: """Initialize views for extension workflow.""" super().initialize_views(scene) self.import_franka_assets(add_to_stage=False) self._import_env_assets(add_to_stage=False) if scene.object_exists("frankas_view"): scene.remove_object("frankas_view", registry_only=True) if scene.object_exists("nuts_view"): scene.remove_object("nuts_view", registry_only=True) if scene.object_exists("bolts_view"): scene.remove_object("bolts_view", registry_only=True) if scene.object_exists("hands_view"): scene.remove_object("hands_view", registry_only=True) if scene.object_exists("lfingers_view"): scene.remove_object("lfingers_view", registry_only=True) if scene.object_exists("rfingers_view"): scene.remove_object("rfingers_view", registry_only=True) if scene.object_exists("fingertips_view"): scene.remove_object("fingertips_view", registry_only=True) self.frankas = FactoryFrankaView( prim_paths_expr="/World/envs/.*/franka", name="frankas_view" ) self.nuts = RigidPrimView( prim_paths_expr="/World/envs/.*/nut/factory_nut.*", name="nuts_view" ) self.bolts = RigidPrimView( prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*", name="bolts_view" ) scene.add(self.nuts) scene.add(self.bolts) scene.add(self.frankas) scene.add(self.frankas._hands) scene.add(self.frankas._lfingers) scene.add(self.frankas._rfingers) scene.add(self.frankas._fingertip_centered) def create_nut_bolt_material(self): """Define nut and bolt material.""" self.nutboltPhysicsMaterialPath = "/World/Physics_Materials/NutBoltMaterial" utils.addRigidBodyMaterial( self._stage, self.nutboltPhysicsMaterialPath, density=self.cfg_env.env.nut_bolt_density, staticFriction=self.cfg_env.env.nut_bolt_friction, dynamicFriction=self.cfg_env.env.nut_bolt_friction, restitution=0.0, ) def _import_env_assets(self, add_to_stage=True): """Set nut and bolt asset options. Import assets.""" self.nut_heights = [] self.nut_widths_max = [] self.bolt_widths = [] self.bolt_head_heights = [] self.bolt_shank_lengths = [] self.thread_pitches = [] assets_root_path = get_assets_root_path() for i in range(0, self._num_envs): j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies)) subassembly = self.cfg_env.env.desired_subassemblies[j] components = list(self.asset_info_nut_bolt[subassembly]) nut_translation = torch.tensor( [ 0.0, self.cfg_env.env.nut_lateral_offset, self.cfg_base.env.table_height, ], device=self._device, ) nut_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device) nut_height = self.asset_info_nut_bolt[subassembly][components[0]]["height"] nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]][ "width_max" ] self.nut_heights.append(nut_height) self.nut_widths_max.append(nut_width_max) nut_file = ( assets_root_path + self.asset_info_nut_bolt[subassembly][components[0]]["usd_path"] ) if add_to_stage: add_reference_to_stage(nut_file, f"/World/envs/env_{i}" + "/nut") XFormPrim( prim_path=f"/World/envs/env_{i}" + "/nut", translation=nut_translation, orientation=nut_orientation, ) self._stage.GetPrimAtPath( f"/World/envs/env_{i}" + f"/nut/factory_{components[0]}/collisions" ).SetInstanceable( False ) # This is required to be able to edit physics material physicsUtils.add_physics_material_to_prim( self._stage, self._stage.GetPrimAtPath( f"/World/envs/env_{i}" + f"/nut/factory_{components[0]}/collisions/mesh_0" ), self.nutboltPhysicsMaterialPath, ) # applies articulation settings from the task configuration yaml file self._sim_config.apply_articulation_settings( "nut", self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/nut"), self._sim_config.parse_actor_config("nut"), ) bolt_translation = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self._device ) bolt_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device) bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]["width"] bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]][ "head_height" ] bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]][ "shank_length" ] self.bolt_widths.append(bolt_width) self.bolt_head_heights.append(bolt_head_height) self.bolt_shank_lengths.append(bolt_shank_length) if add_to_stage: bolt_file = ( assets_root_path + self.asset_info_nut_bolt[subassembly][components[1]]["usd_path"] ) add_reference_to_stage(bolt_file, f"/World/envs/env_{i}" + "/bolt") XFormPrim( prim_path=f"/World/envs/env_{i}" + "/bolt", translation=bolt_translation, orientation=bolt_orientation, ) self._stage.GetPrimAtPath( f"/World/envs/env_{i}" + f"/bolt/factory_{components[1]}/collisions" ).SetInstanceable( False ) # This is required to be able to edit physics material physicsUtils.add_physics_material_to_prim( self._stage, self._stage.GetPrimAtPath( f"/World/envs/env_{i}" + f"/bolt/factory_{components[1]}/collisions/mesh_0" ), self.nutboltPhysicsMaterialPath, ) # applies articulation settings from the task configuration yaml file self._sim_config.apply_articulation_settings( "bolt", self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/bolt"), self._sim_config.parse_actor_config("bolt"), ) thread_pitch = self.asset_info_nut_bolt[subassembly]["thread_pitch"] self.thread_pitches.append(thread_pitch) # For computing body COM pos self.nut_heights = torch.tensor( self.nut_heights, device=self._device ).unsqueeze(-1) self.bolt_head_heights = torch.tensor( self.bolt_head_heights, device=self._device ).unsqueeze(-1) # For setting initial state self.nut_widths_max = torch.tensor( self.nut_widths_max, device=self._device ).unsqueeze(-1) self.bolt_shank_lengths = torch.tensor( self.bolt_shank_lengths, device=self._device ).unsqueeze(-1) # For defining success or failure self.bolt_widths = torch.tensor( self.bolt_widths, device=self._device ).unsqueeze(-1) self.thread_pitches = torch.tensor( self.thread_pitches, device=self._device ).unsqueeze(-1) def refresh_env_tensors(self): """Refresh tensors.""" # Nut tensors self.nut_pos, self.nut_quat = self.nuts.get_world_poses(clone=False) self.nut_pos -= self.env_pos self.nut_com_pos = fc.translate_along_local_z( pos=self.nut_pos, quat=self.nut_quat, offset=self.bolt_head_heights + self.nut_heights * 0.5, device=self.device, ) self.nut_com_quat = self.nut_quat # always equal nut_velocities = self.nuts.get_velocities(clone=False) self.nut_linvel = nut_velocities[:, 0:3] self.nut_angvel = nut_velocities[:, 3:6] self.nut_com_linvel = self.nut_linvel + torch.cross( self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1 ) self.nut_com_angvel = self.nut_angvel # always equal self.nut_force = self.nuts.get_net_contact_forces(clone=False) # Bolt tensors self.bolt_pos, self.bolt_quat = self.bolts.get_world_poses(clone=False) self.bolt_pos -= self.env_pos self.bolt_force = self.bolts.get_net_contact_forces(clone=False)
14,709
Python
39.30137
110
0.603372
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/factory_control.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: control module. Imported by base, environment, and task classes. Not directly executed. """ import math import omni.isaac.core.utils.torch as torch_utils import torch def compute_dof_pos_target( cfg_ctrl, arm_dof_pos, fingertip_midpoint_pos, fingertip_midpoint_quat, jacobian, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos, device, ): """Compute Franka DOF position target to move fingertips towards target pose.""" ctrl_target_dof_pos = torch.zeros((cfg_ctrl["num_envs"], 9), device=device) pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) delta_arm_dof_pos = _get_delta_dof_pos( delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl["ik_method"], jacobian=jacobian, device=device, ) ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints return ctrl_target_dof_pos def compute_dof_torque( cfg_ctrl, dof_pos, dof_vel, fingertip_midpoint_pos, fingertip_midpoint_quat, fingertip_midpoint_linvel, fingertip_midpoint_angvel, left_finger_force, right_finger_force, jacobian, arm_mass_matrix, ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench, device, ): """Compute Franka DOF torque to move fingertips towards target pose.""" # References: # 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # 2) Modern Robotics dof_torque = torch.zeros((cfg_ctrl["num_envs"], 9), device=device) if cfg_ctrl["gain_space"] == "joint": pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72) delta_arm_dof_pos = _get_delta_dof_pos( delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl["ik_method"], jacobian=jacobian, device=device, ) dof_torque[:, 0:7] = cfg_ctrl[ "joint_prop_gains" ] * delta_arm_dof_pos + cfg_ctrl["joint_deriv_gains"] * (0.0 - dof_vel[:, 0:7]) if cfg_ctrl["do_inertial_comp"]: # Set tau = M * tau, where M is the joint-space mass matrix arm_mass_matrix_joint = arm_mass_matrix dof_torque[:, 0:7] = ( arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1) ).squeeze(-1) elif cfg_ctrl["gain_space"] == "task": task_wrench = torch.zeros((cfg_ctrl["num_envs"], 6), device=device) if cfg_ctrl["do_motion_ctrl"]: pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl["jacobian_type"], rot_error_type="axis_angle", ) delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98) task_wrench_motion = _apply_task_space_gains( delta_fingertip_pose=delta_fingertip_pose, fingertip_midpoint_linvel=fingertip_midpoint_linvel, fingertip_midpoint_angvel=fingertip_midpoint_angvel, task_prop_gains=cfg_ctrl["task_prop_gains"], task_deriv_gains=cfg_ctrl["task_deriv_gains"], ) if cfg_ctrl["do_inertial_comp"]: # Set tau = Lambda * tau, where Lambda is the task-space mass matrix jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) arm_mass_matrix_task = torch.inverse( jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T ) # ETH eq. 3.86; geometric Jacobian is assumed task_wrench_motion = ( arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1) ).squeeze(-1) task_wrench = ( task_wrench + cfg_ctrl["motion_ctrl_axes"] * task_wrench_motion ) if cfg_ctrl["do_force_ctrl"]: # Set tau = tau + F_t, where F_t is the target contact wrench task_wrench_force = torch.zeros((cfg_ctrl["num_envs"], 6), device=device) task_wrench_force = ( task_wrench_force + ctrl_target_fingertip_contact_wrench ) # open-loop force control (building towards ETH eq. 3.96-3.98) if cfg_ctrl["force_ctrl_method"] == "closed": force_error, torque_error = _get_wrench_error( left_finger_force=left_finger_force, right_finger_force=right_finger_force, ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench, num_envs=cfg_ctrl["num_envs"], device=device, ) # Set tau = tau + k_p * contact_wrench_error task_wrench_force = task_wrench_force + cfg_ctrl[ "wrench_prop_gains" ] * torch.cat( (force_error, torque_error), dim=1 ) # part of Modern Robotics eq. 11.61 task_wrench = ( task_wrench + torch.tensor(cfg_ctrl["force_ctrl_axes"], device=device).unsqueeze(0) * task_wrench_force ) # Set tau = J^T * tau, i.e., map tau into joint space as desired jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1) dof_torque[:, 7:9] = cfg_ctrl["gripper_prop_gains"] * ( ctrl_target_gripper_dof_pos - dof_pos[:, 7:9] ) + cfg_ctrl["gripper_deriv_gains"] * ( 0.0 - dof_vel[:, 7:9] ) # gripper finger joints dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0) return dof_torque def get_pose_error( fingertip_midpoint_pos, fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, jacobian_type, rot_error_type, ): """Compute task-space error between target Franka fingertip pose and current pose.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # Compute pos error pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos # Compute rot error if ( jacobian_type == "geometric" ): # See example 2.9.8; note use of J_g and transformation between rotation vectors # Compute quat error (i.e., difference quat) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html fingertip_midpoint_quat_norm = torch_utils.quat_mul( fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat) )[ :, 0 ] # scalar component fingertip_midpoint_quat_inv = torch_utils.quat_conjugate( fingertip_midpoint_quat ) / fingertip_midpoint_quat_norm.unsqueeze(-1) quat_error = torch_utils.quat_mul( ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv ) # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) elif ( jacobian_type == "analytic" ): # See example 2.9.7; note use of J_a and difference of rotation vectors # Compute axis-angle error axis_angle_error = axis_angle_from_quat( ctrl_target_fingertip_midpoint_quat ) - axis_angle_from_quat(fingertip_midpoint_quat) if rot_error_type == "quat": return pos_error, quat_error elif rot_error_type == "axis_angle": return pos_error, axis_angle_error def _get_wrench_error( left_finger_force, right_finger_force, ctrl_target_fingertip_contact_wrench, num_envs, device, ): """Compute task-space error between target Franka fingertip contact wrench and current wrench.""" fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device) fingertip_contact_wrench[:, 0:3] = ( left_finger_force + right_finger_force ) # net contact force on fingers # Cols 3 to 6 are all zeros, as we do not have enough information force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - ( -fingertip_contact_wrench[:, 0:3] ) torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - ( -fingertip_contact_wrench[:, 3:6] ) return force_error, torque_error def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device): """Get delta Franka DOF position from delta pose using specified IK method.""" # References: # 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf # 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47) if ik_method == "pinv": # Jacobian pseudoinverse k_val = 1.0 jacobian_pinv = torch.linalg.pinv(jacobian) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == "trans": # Jacobian transpose k_val = 1.0 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == "dls": # damped least squares (Levenberg-Marquardt) lambda_val = 0.1 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) lambda_matrix = (lambda_val**2) * torch.eye( n=jacobian.shape[1], device=device ) delta_dof_pos = ( jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1) ) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == "svd": # adaptive SVD k_val = 1.0 U, S, Vh = torch.linalg.svd(jacobian) S_inv = 1.0 / S min_singular_value = 1.0e-5 S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv)) jacobian_pinv = ( torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6] @ torch.diag_embed(S_inv) @ torch.transpose(U, dim0=1, dim1=2) ) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) return delta_dof_pos def _apply_task_space_gains( delta_fingertip_pose, fingertip_midpoint_linvel, fingertip_midpoint_angvel, task_prop_gains, task_deriv_gains, ): """Interpret PD gains as task-space gains. Apply to task-space error.""" task_wrench = torch.zeros_like(delta_fingertip_pose) # Apply gains to lin error components lin_error = delta_fingertip_pose[:, 0:3] task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + task_deriv_gains[ :, 0:3 ] * (0.0 - fingertip_midpoint_linvel) # Apply gains to rot error components rot_error = delta_fingertip_pose[:, 3:6] task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + task_deriv_gains[ :, 3:6 ] * (0.0 - fingertip_midpoint_angvel) return task_wrench def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device): """Convert geometric Jacobian to analytic Jacobian.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # NOTE: Gym returns world-space geometric Jacobians by default batch = num_envs # Overview: # x = [x_p; x_r] # From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot # From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv) # Eq. 2.12 gives an expression for E_p_inv # Eq. 2.107 gives an expression for E_r_inv # Compute E_inv_top (i.e., [E_p_inv, 0]) I = torch.eye(3, device=device) E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3) E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2) # Compute E_inv_bottom (i.e., [0, E_r_inv]) fingertip_axis_angle = axis_angle_from_quat(fingertip_quat) fingertip_axis_angle_cross = get_skew_symm_matrix( fingertip_axis_angle, device=device ) fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1) factor_1 = 1 / (fingertip_angle**2) factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / ( 1 - torch.cos(fingertip_angle) ) factor_3 = factor_1 * factor_2 E_r_inv = ( I - 1 * 0.5 * fingertip_axis_angle_cross + (fingertip_axis_angle_cross @ fingertip_axis_angle_cross) * factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3)) ) E_inv_bottom = torch.cat( (torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2 ) E_inv = torch.cat( (E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1 ).reshape((batch, 6, 6)) J_a = E_inv @ fingertip_jacobian return J_a def get_skew_symm_matrix(vec, device): """Convert vector to skew-symmetric matrix.""" # Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication batch = vec.shape[0] I = torch.eye(3, device=device) skew_symm = torch.transpose( torch.cross( vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1)) ).reshape(batch, 3, 3), dim0=1, dim1=2, ) return skew_symm def translate_along_local_z(pos, quat, offset, device): """Translate global body position along local Z-axis and express in global coordinates.""" num_vecs = pos.shape[0] offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat( (num_vecs, 1) ) _, translated_pos = torch_utils.tf_combine( q1=quat, t1=pos, q2=torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat((num_vecs, 1)), t2=offset_vec, ) return translated_pos def axis_angle_from_euler(euler): """Convert tensor of Euler angles to tensor of axis-angles.""" quat = torch_utils.quat_from_euler_xyz( roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2] ) quat = quat * torch.sign(quat[:, 0]).unsqueeze(-1) # smaller rotation axis_angle = axis_angle_from_quat(quat) return axis_angle def axis_angle_from_quat(quat, eps=1.0e-6): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544 mag = torch.linalg.norm(quat[:, 1:4], dim=1) half_angle = torch.atan2(mag, quat[:, 0]) angle = 2.0 * half_angle sin_half_angle_over_angle = torch.where( torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle**2.0 / 48 ) axis_angle = quat[:, 1:4] / sin_half_angle_over_angle.unsqueeze(-1) return axis_angle def axis_angle_from_quat_naive(quat): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation # NOTE: Susceptible to undesirable behavior due to divide-by-zero mag = torch.linalg.vector_norm(quat[:, 1:4], dim=1) # zero when quat = [1, 0, 0, 0] axis = quat[:, 1:4] / mag.unsqueeze(-1) angle = 2.0 * torch.atan2(mag, quat[:, 0]) axis_angle = axis * angle.unsqueeze(-1) return axis_angle def get_rand_quat(num_quats, device): """Generate tensor of random quaternions.""" # Reference: http://planning.cs.uiuc.edu/node198.html u = torch.rand((num_quats, 3), device=device) quat = torch.zeros((num_quats, 4), device=device) quat[:, 0] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2]) quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1]) quat[:, 2] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1]) quat[:, 3] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2]) return quat def get_nonrand_quat(num_quats, rot_perturbation, device): """Generate tensor of non-random quaternions by composing random Euler rotations.""" quat = torch_utils.quat_from_euler_xyz( torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, ) return quat
19,859
Python
37.864971
163
0.627574
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_nut_bolt.yaml
nut_bolt_m4: nut: usd_path: '/Isaac/Props/Factory/factory_nut_m4_tight/factory_nut_m4_tight.usd' width_min: 0.007 # distance from flat surface to flat surface width_max: 0.0080829 # distance from edge to edge height: 0.0032 # height of nut flat_length: 0.00404145 # length of flat surface bolt: usd_path: '/Isaac/Props/Factory/factory_bolt_m4_tight/factory_bolt_m4_tight.usd' width: 0.004 # major diameter of bolt head_height: 0.004 # height of bolt head shank_length: 0.016 # length of bolt shank thread_pitch: 0.0007 # distance between threads nut_bolt_m8: nut: usd_path: '/Isaac/Props/Factory/factory_nut_m8_tight/factory_nut_m8_tight.usd' width_min: 0.013 width_max: 0.01501111 height: 0.0065 flat_length: 0.00750555 bolt: usd_path: '/Isaac/Props/Factory/factory_bolt_m8_tight/factory_bolt_m8_tight.usd' width: 0.008 head_height: 0.008 shank_length: 0.018 thread_pitch: 0.00125 nut_bolt_m12: nut: usd_path: '/Isaac/Props/Factory/factory_nut_m12_tight/factory_nut_m12_tight.usd' width_min: 0.019 width_max: 0.02193931 height: 0.010 flat_length: 0.01096966 bolt: usd_path: '/Isaac/Props/Factory/factory_bolt_m12_tight/factory_bolt_m12_tight.usd' width: 0.012 head_height: 0.012 shank_length: 0.020 thread_pitch: 0.00175 nut_bolt_m16: nut: usd_path: '/Isaac/Props/Factory/factory_nut_m16_tight/factory_nut_m16_tight.usd' width_min: 0.024 width_max: 0.02771281 height: 0.013 flat_length: 0.01385641 bolt: usd_path: '/Isaac/Props/Factory/factory_bolt_m16_tight/factory_bolt_m16_tight.usd' width: 0.016 head_height: 0.016 shank_length: 0.025 thread_pitch: 0.002 nut_bolt_m20: nut: usd_path: '/Isaac/Props/Factory/factory_nut_m20_tight/factory_nut_m20_tight.usd' width_min: 0.030 width_max: 0.03464102 height: 0.016 flat_length: 0.01732051 bolt: usd_path: '/Isaac/Props/Factory/factory_bolt_m20_tight/factory_bolt_m20_tight.usd' width: 0.020 head_height: 0.020 shank_length: 0.045 thread_pitch: 0.0025
2,331
YAML
32.314285
90
0.617332
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_franka_table.yaml
franka_hand_length: 0.0584 # distance from origin of hand to origin of finger franka_finger_length: 0.053671 # distance from origin of finger to bottom of fingerpad franka_fingerpad_length: 0.017608 # distance from top of inner surface of fingerpad to bottom of inner surface of fingerpad franka_gripper_width_max: 0.080 # maximum opening width of gripper table_depth: 0.6 # depth of table table_width: 1.0 # width of table
431
YAML
52.999993
124
0.772622
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/utils/anymal_terrain_generator.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omniisaacgymenvs.utils.terrain_utils.terrain_utils import * # terrain generator class Terrain: def __init__(self, cfg, num_robots) -> None: self.horizontal_scale = 0.1 self.vertical_scale = 0.005 self.border_size = 20 self.num_per_env = 2 self.env_length = cfg["mapLength"] self.env_width = cfg["mapWidth"] self.proportions = [np.sum(cfg["terrainProportions"][: i + 1]) for i in range(len(cfg["terrainProportions"]))] self.env_rows = cfg["numLevels"] self.env_cols = cfg["numTerrains"] self.num_maps = self.env_rows * self.env_cols self.num_per_env = int(num_robots / self.num_maps) self.env_origins = np.zeros((self.env_rows, self.env_cols, 3)) self.width_per_env_pixels = int(self.env_width / self.horizontal_scale) self.length_per_env_pixels = int(self.env_length / self.horizontal_scale) self.border = int(self.border_size / self.horizontal_scale) self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border self.height_field_raw = np.zeros((self.tot_rows, self.tot_cols), dtype=np.int16) if cfg["curriculum"]: self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows) else: self.randomized_terrain() self.heightsamples = self.height_field_raw self.vertices, self.triangles = convert_heightfield_to_trimesh( self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"] ) def randomized_terrain(self): for k in range(self.num_maps): # Env coordinates in the world (i, j) = np.unravel_index(k, (self.env_rows, self.env_cols)) # Heightfield coordinate system from now on start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels terrain = SubTerrain( "terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale, ) choice = np.random.uniform(0, 1) if choice < 0.1: if np.random.choice([0, 1]): pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2) else: pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) elif choice < 0.6: # step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18]) step_height = np.random.choice([-0.15, 0.15]) pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0) elif choice < 1.0: discrete_obstacles_terrain(terrain, 0.15, 1.0, 2.0, 40, platform_size=3.0) self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale) x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale) y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale) y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z] def curiculum(self, num_robots, num_terrains, num_levels): num_robots_per_map = int(num_robots / num_terrains) left_over = num_robots % num_terrains idx = 0 for j in range(num_terrains): for i in range(num_levels): terrain = SubTerrain( "terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale, ) difficulty = i / num_levels choice = j / num_terrains slope = difficulty * 0.4 step_height = 0.05 + 0.175 * difficulty discrete_obstacles_height = 0.025 + difficulty * 0.15 stepping_stones_size = 2 - 1.8 * difficulty if choice < self.proportions[0]: if choice < 0.05: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0) elif choice < self.proportions[1]: if choice < 0.15: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2) elif choice < self.proportions[3]: if choice < self.proportions[2]: step_height *= -1 pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0) elif choice < self.proportions[4]: discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1.0, 2.0, 40, platform_size=3.0) else: stepping_stones_terrain( terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0.0, platform_size=3.0 ) # Heightfield coordinate system start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw robots_in_map = num_robots_per_map if j < left_over: robots_in_map += 1 env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale) x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale) y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale) y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
8,852
Python
50.47093
119
0.591618
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/utils/usd_utils.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from pxr import UsdLux, UsdPhysics def set_drive_type(prim_path, drive_type): joint_prim = get_prim_at_path(prim_path) # set drive type ("angular" or "linear") drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type) return drive def set_drive_target_position(drive, target_value): if not drive.GetTargetPositionAttr(): drive.CreateTargetPositionAttr(target_value) else: drive.GetTargetPositionAttr().Set(target_value) def set_drive_target_velocity(drive, target_value): if not drive.GetTargetVelocityAttr(): drive.CreateTargetVelocityAttr(target_value) else: drive.GetTargetVelocityAttr().Set(target_value) def set_drive_stiffness(drive, stiffness): if not drive.GetStiffnessAttr(): drive.CreateStiffnessAttr(stiffness) else: drive.GetStiffnessAttr().Set(stiffness) def set_drive_damping(drive, damping): if not drive.GetDampingAttr(): drive.CreateDampingAttr(damping) else: drive.GetDampingAttr().Set(damping) def set_drive_max_force(drive, max_force): if not drive.GetMaxForceAttr(): drive.CreateMaxForceAttr(max_force) else: drive.GetMaxForceAttr().Set(max_force) def set_drive(prim_path, drive_type, target_type, target_value, stiffness, damping, max_force) -> None: drive = set_drive_type(prim_path, drive_type) # set target type ("position" or "velocity") if target_type == "position": set_drive_target_position(drive, target_value) elif target_type == "velocity": set_drive_target_velocity(drive, target_value) set_drive_stiffness(drive, stiffness) set_drive_damping(drive, damping) set_drive_max_force(drive, max_force)
3,403
Python
36.406593
103
0.740229
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/shared/reacher.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Ref: /omniisaacgymenvs/tasks/shared/reacher.py import math from abc import abstractmethod import numpy as np import torch from omni.isaac.core.prims import RigidPrimView, XFormPrim from omni.isaac.core.scenes.scene import Scene from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask # `scale` maps [-1, 1] to [L, U]; `unscale` maps [L, U] to [-1, 1] from omni.isaac.core.utils.torch import scale, unscale from omni.isaac.gym.vec_env import VecEnvBase class ReacherTask(RLTask): def __init__( self, name: str, env: VecEnvBase, offset=None ) -> None: ReacherTask.update_config(self) RLTask.__init__(self, name, env) self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = torch.tensor(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 def update_config(self): self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.success_tolerance = self._task_cfg["env"]["successTolerance"] self.reach_goal_bonus = self._task_cfg["env"]["reachGoalBonus"] self.rot_eps = self._task_cfg["env"]["rotEps"] self.vel_obs_scale = self._task_cfg["env"]["velObsScale"] self.reset_position_noise = self._task_cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self._task_cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self._task_cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self._task_cfg["env"]["resetDofVelRandomInterval"] self.arm_dof_speed_scale = self._task_cfg["env"]["dofSpeedScale"] self.use_relative_control = self._task_cfg["env"]["useRelativeControl"] self.act_moving_average = self._task_cfg["env"]["actionsMovingAverage"] self.max_episode_length = self._task_cfg["env"]["episodeLength"] self.reset_time = self._task_cfg["env"].get("resetTime", -1.0) self.print_success_stat = self._task_cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self._task_cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self._task_cfg["env"].get("averFactor", 0.1) self.dt = 1.0 / 60 control_freq_inv = self._task_cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) def set_up_scene(self, scene: Scene) -> None: self._stage = get_current_stage() self._assets_root_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0' self.get_arm() self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device) self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.get_object() self.get_goal() super().set_up_scene(scene) self._arms = self.get_arm_view(scene) scene.add(self._arms) self._objects = RigidPrimView( prim_paths_expr="/World/envs/env_.*/object/object", name="object_view", reset_xform_properties=False, ) self._objects._non_root_link = True # hack to ignore kinematics scene.add(self._objects) self._goals = RigidPrimView( prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False ) self._goals._non_root_link = True # hack to ignore kinematics scene.add(self._goals) def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("dofbot_view"): scene.remove_object("dofbot_view", registry_only=True) if scene.object_exists("ur10_view"): scene.remove_object("ur10_view", registry_only=True) if scene.object_exists("kuka_view"): scene.remove_object("kuka_view", registry_only=True) if scene.object_exists("hiwin_view"): scene.remove_object("hiwin_view", registry_only=True) if scene.object_exists("goal_view"): scene.remove_object("goal_view", registry_only=True) if scene.object_exists("object_view"): scene.remove_object("object_view", registry_only=True) self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device) self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self._arms = self.get_arm_view(scene) scene.add(self._arms) self._objects = RigidPrimView( prim_paths_expr="/World/envs/env_.*/object/object", name="object_view", reset_xform_properties=False, ) self._objects._non_root_link = True # hack to ignore kinematics scene.add(self._objects) self._goals = RigidPrimView( prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False ) self._goals._non_root_link = True # hack to ignore kinematics scene.add(self._goals) @abstractmethod def get_num_dof(self): pass @abstractmethod def get_arm(self): pass @abstractmethod def get_arm_view(self): pass @abstractmethod def get_observations(self): pass @abstractmethod def get_reset_target_new_pos(self, n_reset_envs): pass @abstractmethod def send_joint_pos(self, joint_pos): pass def get_object(self): self.object_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd" add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/object") obj = XFormPrim( prim_path=self.default_zero_env_path + "/object/object", name="object", translation=self.object_start_translation, orientation=self.object_start_orientation, scale=self.object_scale, ) self._sim_config.apply_articulation_settings( "object", get_prim_at_path(obj.prim_path), self._sim_config.parse_actor_config("object") ) def get_goal(self): self.goal_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd" add_reference_to_stage(self.goal_usd_path, self.default_zero_env_path + "/goal") goal = XFormPrim( prim_path=self.default_zero_env_path + "/goal/object", name="goal", translation=self.goal_start_translation, orientation=self.goal_start_orientation, scale=self.goal_scale ) self._sim_config.apply_articulation_settings("goal", get_prim_at_path(goal.prim_path), self._sim_config.parse_actor_config("goal_object")) def post_reset(self): self.num_arm_dofs = self.get_num_dof() self.actuated_dof_indices = torch.arange(self.num_arm_dofs, dtype=torch.long, device=self.device) self.arm_dof_targets = torch.zeros((self.num_envs, self._arms.num_dof), dtype=torch.float, device=self.device) self.prev_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device) dof_limits = self._dof_limits[:, :self.num_arm_dofs] self.arm_dof_lower_limits, self.arm_dof_upper_limits = torch.t(dof_limits[0].to(self.device)) self.arm_dof_default_pos = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device) self.arm_dof_default_vel = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device) self.end_effectors_init_pos, self.end_effectors_init_rot = self._arms._end_effectors.get_world_poses() self.goal_pos, self.goal_rot = self._goals.get_world_poses() self.goal_pos -= self._env_pos # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self): self.fall_dist = 0 self.fall_penalty = 0 ( self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:], ) = compute_arm_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, ) self.extras["consecutive_successes"] = self.consecutive_successes.mean() if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term policy performance. print( "Direct average consecutive successes = {:.1f}".format( direct_average_successes / (self.total_resets + self.num_envs) ) ) if self.total_resets > 0: print( "Post-Reset average consecutive successes = {:.1f}".format(self.total_successes / self.total_resets) ) def pre_physics_step(self, actions): if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) end_effectors_pos, end_effectors_rot = self._arms._end_effectors.get_world_poses() # Reverse the default rotation and rotate the displacement tensor according to the current rotation self.object_pos = end_effectors_pos + quat_rotate(end_effectors_rot, quat_rotate_inverse(self.end_effectors_init_rot, self.get_object_displacement_tensor())) self.object_pos -= self._env_pos # subtract world env pos self.object_rot = end_effectors_rot object_pos = self.object_pos + self._env_pos object_rot = self.object_rot self._objects.set_world_poses(object_pos, object_rot) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids) elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # Reacher tasks don't require gripper actions, disable it. self.actions[:, 5] = 0.0 if self.use_relative_control: targets = ( self.prev_targets[:, self.actuated_dof_indices] + self.arm_dof_speed_scale * self.dt * self.actions ) self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp( targets, self.arm_dof_lower_limits[self.actuated_dof_indices], self.arm_dof_upper_limits[self.actuated_dof_indices], ) else: self.cur_targets[:, self.actuated_dof_indices] = scale( self.actions[:, :self.num_arm_dofs], self.arm_dof_lower_limits[self.actuated_dof_indices], self.arm_dof_upper_limits[self.actuated_dof_indices], ) self.cur_targets[:, self.actuated_dof_indices] = ( self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] ) self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp( self.cur_targets[:, self.actuated_dof_indices], self.arm_dof_lower_limits[self.actuated_dof_indices], self.arm_dof_upper_limits[self.actuated_dof_indices], ) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self._arms.set_joint_position_targets( self.cur_targets[:, self.actuated_dof_indices], indices=None, joint_indices=self.actuated_dof_indices ) if self._task_cfg['sim2real']['enabled'] and self.test and self.num_envs == 1: # Only retrieve the 0-th joint position even when multiple envs are used cur_joint_pos = self._arms.get_joint_positions(indices=[0], joint_indices=self.actuated_dof_indices) # Send the current joint positions to the real robot joint_pos = cur_joint_pos[0] if torch.any(joint_pos < self.arm_dof_lower_limits) or torch.any(joint_pos > self.arm_dof_upper_limits): print("get_joint_positions out of bound, send_joint_pos skipped") else: self.send_joint_pos(joint_pos) def is_done(self): pass def reset_target_pose(self, env_ids): # reset goal indices = env_ids.to(dtype=torch.int32) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_pos = self.get_reset_target_new_pos(len(env_ids)) new_rot = randomize_rotation( rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] ) self.goal_pos[env_ids] = new_pos self.goal_rot[env_ids] = new_rot goal_pos, goal_rot = self.goal_pos.clone(), self.goal_rot.clone() goal_pos[env_ids] = ( self.goal_pos[env_ids] + self._env_pos[env_ids] ) # add world env pos self._goals.set_world_poses(goal_pos[env_ids], goal_rot[env_ids], indices) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_arm_dofs * 2 + 5), device=self.device) self.reset_target_pose(env_ids) # reset arm delta_max = self.arm_dof_upper_limits - self.arm_dof_default_pos delta_min = self.arm_dof_lower_limits - self.arm_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * (rand_floats[:, 5:5+self.num_arm_dofs] + 1.0) * 0.5 pos = self.arm_dof_default_pos + self.reset_dof_pos_noise * rand_delta dof_pos = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device) dof_pos[env_ids, :self.num_arm_dofs] = pos dof_vel = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device) dof_vel[env_ids, :self.num_arm_dofs] = self.arm_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_arm_dofs:5+self.num_arm_dofs*2] self.prev_targets[env_ids, :self.num_arm_dofs] = pos self.cur_targets[env_ids, :self.num_arm_dofs] = pos self.arm_dof_targets[env_ids, :self.num_arm_dofs] = pos self._arms.set_joint_position_targets(self.arm_dof_targets[env_ids], indices) # set_joint_positions doesn't seem to apply immediately. self._arms.set_joint_positions(dof_pos[env_ids], indices) self._arms.set_joint_velocities(dof_vel[env_ids], indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul( quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor) ) @torch.jit.script def compute_arm_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ): goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin( torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0) ) # changed quat convention dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions**2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(goal_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) resets = reset_buf if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where( torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf ) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where( num_resets > 0, av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes, consecutive_successes, ) return reward, resets, goal_resets, progress_buf, successes, cons_successes
22,312
Python
42.836935
165
0.629482
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/shared/in_hand_manipulation.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math from abc import abstractmethod import numpy as np import torch from omni.isaac.core.prims import RigidPrimView, XFormPrim from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask class InHandManipulationTask(RLTask): def __init__(self, name, env, offset=None) -> None: InHandManipulationTask.update_config(self) RLTask.__init__(self, name, env) self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.randomization_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device) self.av_factor = torch.tensor(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 def update_config(self): self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.success_tolerance = self._task_cfg["env"]["successTolerance"] self.reach_goal_bonus = self._task_cfg["env"]["reachGoalBonus"] self.fall_dist = self._task_cfg["env"]["fallDistance"] self.fall_penalty = self._task_cfg["env"]["fallPenalty"] self.rot_eps = self._task_cfg["env"]["rotEps"] self.vel_obs_scale = self._task_cfg["env"]["velObsScale"] self.reset_position_noise = self._task_cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self._task_cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self._task_cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self._task_cfg["env"]["resetDofVelRandomInterval"] self.hand_dof_speed_scale = self._task_cfg["env"]["dofSpeedScale"] self.use_relative_control = self._task_cfg["env"]["useRelativeControl"] self.act_moving_average = self._task_cfg["env"]["actionsMovingAverage"] self.max_episode_length = self._task_cfg["env"]["episodeLength"] self.reset_time = self._task_cfg["env"].get("resetTime", -1.0) self.print_success_stat = self._task_cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self._task_cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self._task_cfg["env"].get("averFactor", 0.1) self.dt = 1.0 / 60 control_freq_inv = self._task_cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) def set_up_scene(self, scene) -> None: self._stage = get_current_stage() self._assets_root_path = get_assets_root_path() self.get_starting_positions() self.get_hand() self.object_start_translation = self.hand_start_translation.clone() self.object_start_translation[1] += self.pose_dy self.object_start_translation[2] += self.pose_dz self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.goal_displacement_tensor = torch.tensor([-0.2, -0.06, 0.12], device=self.device) self.goal_start_translation = self.object_start_translation + self.goal_displacement_tensor self.goal_start_translation[2] -= 0.04 self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.get_object(self.hand_start_translation, self.pose_dy, self.pose_dz) self.get_goal() super().set_up_scene(scene, filter_collisions=False) self._hands = self.get_hand_view(scene) scene.add(self._hands) self._objects = RigidPrimView( prim_paths_expr="/World/envs/env_.*/object/object", name="object_view", reset_xform_properties=False, masses=torch.tensor([0.07087] * self._num_envs, device=self.device), ) scene.add(self._objects) self._goals = RigidPrimView( prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False ) self._goals._non_root_link = True # hack to ignore kinematics scene.add(self._goals) if self._dr_randomizer.randomize: self._dr_randomizer.apply_on_startup_domain_randomization(self) def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("shadow_hand_view"): scene.remove_object("shadow_hand_view", registry_only=True) if scene.object_exists("finger_view"): scene.remove_object("finger_view", registry_only=True) if scene.object_exists("allegro_hand_view"): scene.remove_object("allegro_hand_view", registry_only=True) if scene.object_exists("goal_view"): scene.remove_object("goal_view", registry_only=True) if scene.object_exists("object_view"): scene.remove_object("object_view", registry_only=True) self.get_starting_positions() self.object_start_translation = self.hand_start_translation.clone() self.object_start_translation[1] += self.pose_dy self.object_start_translation[2] += self.pose_dz self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.goal_displacement_tensor = torch.tensor([-0.2, -0.06, 0.12], device=self.device) self.goal_start_translation = self.object_start_translation + self.goal_displacement_tensor self.goal_start_translation[2] -= 0.04 self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self._hands = self.get_hand_view(scene) scene.add(self._hands) self._objects = RigidPrimView( prim_paths_expr="/World/envs/env_.*/object/object", name="object_view", reset_xform_properties=False, masses=torch.tensor([0.07087] * self._num_envs, device=self.device), ) scene.add(self._objects) self._goals = RigidPrimView( prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False ) self._goals._non_root_link = True # hack to ignore kinematics scene.add(self._goals) if self._dr_randomizer.randomize: self._dr_randomizer.apply_on_startup_domain_randomization(self) @abstractmethod def get_hand(self): pass @abstractmethod def get_hand_view(self): pass @abstractmethod def get_observations(self): pass def get_object(self, hand_start_translation, pose_dy, pose_dz): self.object_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd" add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/object") obj = XFormPrim( prim_path=self.default_zero_env_path + "/object/object", name="object", translation=self.object_start_translation, orientation=self.object_start_orientation, scale=self.object_scale, ) self._sim_config.apply_articulation_settings( "object", get_prim_at_path(obj.prim_path), self._sim_config.parse_actor_config("object") ) def get_goal(self): add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/goal") goal = XFormPrim( prim_path=self.default_zero_env_path + "/goal", name="goal", translation=self.goal_start_translation, orientation=self.goal_start_orientation, scale=self.object_scale, ) self._sim_config.apply_articulation_settings( "goal", get_prim_at_path(goal.prim_path), self._sim_config.parse_actor_config("goal_object") ) def post_reset(self): self.num_hand_dofs = self._hands.num_dof self.actuated_dof_indices = self._hands.actuated_dof_indices self.hand_dof_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) dof_limits = self._hands.get_dof_limits() self.hand_dof_lower_limits, self.hand_dof_upper_limits = torch.t(dof_limits[0].to(self.device)) self.hand_dof_default_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device) self.hand_dof_default_vel = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device) self.object_init_pos, self.object_init_rot = self._objects.get_world_poses() self.object_init_pos -= self._env_pos self.object_init_velocities = torch.zeros_like( self._objects.get_velocities(), dtype=torch.float, device=self.device ) self.goal_pos = self.object_init_pos.clone() self.goal_pos[:, 2] -= 0.04 self.goal_rot = self.object_init_rot.clone() self.goal_init_pos = self.goal_pos.clone() self.goal_init_rot = self.goal_rot.clone() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) if self._dr_randomizer.randomize: self._dr_randomizer.set_up_domain_randomization(self) def get_object_goal_observations(self): self.object_pos, self.object_rot = self._objects.get_world_poses(clone=False) self.object_pos -= self._env_pos self.object_velocities = self._objects.get_velocities(clone=False) self.object_linvel = self.object_velocities[:, 0:3] self.object_angvel = self.object_velocities[:, 3:6] def calculate_metrics(self): ( self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:], ) = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, ) self.extras["consecutive_successes"] = self.consecutive_successes.mean() self.randomization_buf += 1 if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term policy performance. print( "Direct average consecutive successes = {:.1f}".format( direct_average_successes / (self.total_resets + self.num_envs) ) ) if self.total_resets > 0: print( "Post-Reset average consecutive successes = {:.1f}".format(self.total_successes / self.total_resets) ) def pre_physics_step(self, actions): if not self._env._world.is_playing(): return env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) reset_buf = self.reset_buf.clone() # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids) elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) if self.use_relative_control: targets = ( self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * self.actions ) self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp( targets, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices], ) else: self.cur_targets[:, self.actuated_dof_indices] = scale( self.actions, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices], ) self.cur_targets[:, self.actuated_dof_indices] = ( self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] ) self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp( self.cur_targets[:, self.actuated_dof_indices], self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices], ) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self._hands.set_joint_position_targets( self.cur_targets[:, self.actuated_dof_indices], indices=None, joint_indices=self.actuated_dof_indices ) if self._dr_randomizer.randomize: rand_envs = torch.where( self.randomization_buf >= self._dr_randomizer.min_frequency, torch.ones_like(self.randomization_buf), torch.zeros_like(self.randomization_buf), ) rand_env_ids = torch.nonzero(torch.logical_and(rand_envs, reset_buf)) self.dr.physics_view.step_randomization(rand_env_ids) self.randomization_buf[rand_env_ids] = 0 def is_done(self): pass def reset_target_pose(self, env_ids): # reset goal indices = env_ids.to(dtype=torch.int32) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_rot = randomize_rotation( rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] ) self.goal_pos[env_ids] = self.goal_init_pos[env_ids, 0:3] self.goal_rot[env_ids] = new_rot goal_pos, goal_rot = self.goal_pos.clone(), self.goal_rot.clone() goal_pos[env_ids] = ( self.goal_pos[env_ids] + self.goal_displacement_tensor + self._env_pos[env_ids] ) # add world env pos self._goals.set_world_poses(goal_pos[env_ids], goal_rot[env_ids], indices) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device) self.reset_target_pose(env_ids) # reset object new_object_pos = ( self.object_init_pos[env_ids] + self.reset_position_noise * rand_floats[:, 0:3] + self._env_pos[env_ids] ) # add world env pos new_object_rot = randomize_rotation( rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] ) object_velocities = torch.zeros_like(self.object_init_velocities, dtype=torch.float, device=self.device) self._objects.set_velocities(object_velocities[env_ids], indices) self._objects.set_world_poses(new_object_pos, new_object_rot, indices) # reset hand delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5 : 5 + self.num_hand_dofs] + 1.0) pos = self.hand_dof_default_pos + self.reset_dof_pos_noise * rand_delta dof_pos = torch.zeros((self.num_envs, self.num_hand_dofs), device=self.device) dof_pos[env_ids, :] = pos dof_vel = torch.zeros((self.num_envs, self.num_hand_dofs), device=self.device) dof_vel[env_ids, :] = ( self.hand_dof_default_vel + self.reset_dof_vel_noise * rand_floats[:, 5 + self.num_hand_dofs : 5 + self.num_hand_dofs * 2] ) self.prev_targets[env_ids, : self.num_hand_dofs] = pos self.cur_targets[env_ids, : self.num_hand_dofs] = pos self.hand_dof_targets[env_ids, :] = pos self._hands.set_joint_position_targets(self.hand_dof_targets[env_ids], indices) self._hands.set_joint_positions(dof_pos[env_ids], indices) self._hands.set_joint_velocities(dof_vel[env_ids], indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul( quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor) ) @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ): goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin( torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0) ) # changed quat convention dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions**2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) # Fall penalty: distance to the goal is larger than a threashold reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward) # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where( torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf ) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal if max_consecutive_successes > 0: reward = torch.where(progress_buf >= max_episode_length - 1, reward + 0.5 * fall_penalty, reward) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where( num_resets > 0, av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes, consecutive_successes, ) return reward, resets, goal_resets, progress_buf, successes, cons_successes
23,472
Python
43.12218
126
0.630624
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/tasks/shared/locomotion.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math from abc import abstractmethod import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omniisaacgymenvs.tasks.base.rl_task import RLTask class LocomotionTask(RLTask): def __init__(self, name, env, offset=None) -> None: LocomotionTask.update_config(self) RLTask.__init__(self, name, env) return def update_config(self): self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"] self.contact_force_scale = self._task_cfg["env"]["contactForceScale"] self.power_scale = self._task_cfg["env"]["powerScale"] self.heading_weight = self._task_cfg["env"]["headingWeight"] self.up_weight = self._task_cfg["env"]["upWeight"] self.actions_cost_scale = self._task_cfg["env"]["actionsCost"] self.energy_cost_scale = self._task_cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"] self.death_cost = self._task_cfg["env"]["deathCost"] self.termination_height = self._task_cfg["env"]["terminationHeight"] self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"] @abstractmethod def set_up_scene(self, scene) -> None: pass @abstractmethod def get_robot(self): pass def get_observations(self) -> dict: torso_position, torso_rotation = self._robots.get_world_poses(clone=False) velocities = self._robots.get_velocities(clone=False) velocity = velocities[:, 0:3] ang_velocity = velocities[:, 3:6] dof_pos = self._robots.get_joint_positions(clone=False) dof_vel = self._robots.get_joint_velocities(clone=False) # force sensors attached to the feet sensor_force_torques = self._robots.get_measured_joint_forces(joint_indices=self._sensor_indices) ( self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:], ) = get_observations( torso_position, torso_rotation, velocity, ang_velocity, dof_pos, dof_vel, self.targets, self.potentials, self.dt, self.inv_start_rot, self.basis_vec0, self.basis_vec1, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, sensor_force_torques, self._num_envs, self.contact_force_scale, self.actions, self.angular_velocity_scale, ) observations = {self._robots.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) forces = self.actions * self.joint_gears * self.power_scale indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device) # applies joint torques self._robots.set_joint_efforts(forces, indices=indices) def reset_idx(self, env_ids): num_resets = len(env_ids) # randomize DOF positions and velocities dof_pos = torch_rand_float(-0.2, 0.2, (num_resets, self._robots.num_dof), device=self._device) dof_pos[:] = tensor_clamp(self.initial_dof_pos[env_ids] + dof_pos, self.dof_limits_lower, self.dof_limits_upper) dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, self._robots.num_dof), device=self._device) root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids] root_vel = torch.zeros((num_resets, 6), device=self._device) # apply resets self._robots.set_joint_positions(dof_pos, indices=env_ids) self._robots.set_joint_velocities(dof_vel, indices=env_ids) self._robots.set_world_poses(root_pos, root_rot, indices=env_ids) self._robots.set_velocities(root_vel, indices=env_ids) to_target = self.targets[env_ids] - self.initial_root_pos[env_ids] to_target[:, 2] = 0.0 self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt self.potentials[env_ids] = self.prev_potentials[env_ids].clone() # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 num_resets = len(env_ids) def post_reset(self): self._robots = self.get_robot() self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() self.initial_dof_pos = self._robots.get_joint_positions() # initialize some data used later on self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32) self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1)) self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1)) self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1)) self.basis_vec0 = self.heading_vec.clone() self.basis_vec1 = self.up_vec.clone() self.targets = torch.tensor([1000, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1)) self.target_dirs = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1)) self.dt = 1.0 / 60.0 self.potentials = torch.tensor([-1000.0 / self.dt], dtype=torch.float32, device=self._device).repeat( self.num_envs ) self.prev_potentials = self.potentials.clone() self.actions = torch.zeros((self.num_envs, self.num_actions), device=self._device) # randomize all envs indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: self.rew_buf[:] = calculate_metrics( self.obs_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.termination_height, self.death_cost, self._robots.num_dof, self.get_dof_at_limit_cost(), self.alive_reward_scale, self.motor_effort_ratio, ) def is_done(self) -> None: self.reset_buf[:] = is_done( self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length ) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def normalize_angle(x): return torch.atan2(torch.sin(x), torch.cos(x)) @torch.jit.script def get_observations( torso_position, torso_rotation, velocity, ang_velocity, dof_pos, dof_vel, targets, potentials, dt, inv_start_rot, basis_vec0, basis_vec1, dof_limits_lower, dof_limits_upper, dof_vel_scale, sensor_force_torques, num_envs, contact_force_scale, actions, angular_velocity_scale, ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, int, float, Tensor, float) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] to_target = targets - torso_position to_target[:, 2] = 0.0 prev_potentials = potentials.clone() potentials = -torch.norm(to_target, p=2, dim=-1) / dt torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up( torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2 ) vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot( torso_quat, velocity, ang_velocity, targets, torso_position ) dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper) # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs obs = torch.cat( ( torso_position[:, 2].view(-1, 1), vel_loc, angvel_loc * angular_velocity_scale, normalize_angle(yaw).unsqueeze(-1), normalize_angle(roll).unsqueeze(-1), normalize_angle(angle_to_target).unsqueeze(-1), up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled, dof_vel * dof_vel_scale, sensor_force_torques.reshape(num_envs, -1) * contact_force_scale, actions, ), dim=-1, ) return obs, potentials, prev_potentials, up_vec, heading_vec @torch.jit.script def is_done(obs_buf, termination_height, reset_buf, progress_buf, max_episode_length): # type: (Tensor, float, Tensor, Tensor, float) -> Tensor reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return reset @torch.jit.script def calculate_metrics( obs_buf, actions, up_weight, heading_weight, potentials, prev_potentials, actions_cost_scale, energy_cost_scale, termination_height, death_cost, num_dof, dof_at_limit_cost, alive_reward_scale, motor_effort_ratio, ): # type: (Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, int, Tensor, float, Tensor) -> Tensor heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8) # aligning up axis of robot and environment up_reward = torch.zeros_like(heading_reward) up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward) # energy penalty for movement actions_cost = torch.sum(actions**2, dim=-1) electricity_cost = torch.sum( torch.abs(actions * obs_buf[:, 12 + num_dof : 12 + num_dof * 2]) * motor_effort_ratio.unsqueeze(0), dim=-1 ) # reward for duration of staying alive alive_reward = torch.ones_like(potentials) * alive_reward_scale progress_reward = potentials - prev_potentials total_reward = ( progress_reward + alive_reward + up_reward + heading_reward - actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost ) # adjust reward for fallen agents total_reward = torch.where( obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward ) return total_reward
13,249
Python
37.294798
214
0.628802
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/config.yaml
# Task name - used to pick the class to load task_name: ${task.name} # experiment name. defaults to name of training config experiment: '' # if set to positive integer, overrides the default number of environments num_envs: '' # seed - set to -1 to choose random seed seed: 42 # set to True for deterministic performance torch_deterministic: False # set the maximum number of learning iterations to train for. overrides default per-environment setting max_iterations: '' ## Device config physics_engine: 'physx' # whether to use cpu or gpu pipeline pipeline: 'gpu' # whether to use cpu or gpu physx sim_device: 'gpu' # used for gpu simulation only - device id for running sim and task if pipeline=gpu device_id: 0 # device to run RL rl_device: 'cuda:0' # multi-GPU training multi_gpu: False ## PhysX arguments num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only. solver_type: 1 # 0: pgs, 1: tgs # RLGames Arguments # test - if set, run policy in inference mode (requires setting checkpoint to load) test: False # used to set checkpoint path checkpoint: '' # evaluate checkpoint evaluation: False # disables rendering headless: False # enables native livestream enable_livestream: False # timeout for MT script mt_timeout: 90 wandb_activate: False wandb_group: '' wandb_name: ${train.params.config.name} wandb_entity: '' wandb_project: 'omniisaacgymenvs' # path to a kit app file kit_app: '' # Warp warp: False # set default task and default training config based on task defaults: - _self_ - task: Cartpole - train: ${task}PPO - override hydra/job_logging: disabled # set the directory where the output files get saved hydra: output_subdir: null run: dir: . use_urdf: False
1,739
YAML
21.894737
103
0.738355
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/KukaKR120R2500ProReacher.yaml
# Ref: /omniisaacgymenvs/cfg/task/ShadowHand.yaml # used to create the object name: KukaKR120R2500ProReacher physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:2048,${...num_envs}} envSpacing: 6 episodeLength: 600 clipObservations: 5.0 clipActions: 1.0 useRelativeControl: False dofSpeedScale: 20.0 actionsMovingAverage: 1.0 controlFrequencyInv: 2 # 60 Hz startPositionNoise: 0.01 startRotationNoise: 0.0 resetPositionNoise: 0.01 resetRotationNoise: 0.0 resetDofPosRandomInterval: 0.2 resetDofVelRandomInterval: 0.0 # Random forces applied to the object forceScale: 0.0 forceProbRange: [0.001, 0.1] forceDecay: 0.99 forceDecayInterval: 0.08 # reward -> dictionary distRewardScale: -2.0 rotRewardScale: 1.0 rotEps: 0.1 actionPenaltyScale: -0.0002 reachGoalBonus: 250 velObsScale: 0.2 observationType: "full" # can only be "full" successTolerance: 0.1 printNumSuccesses: False maxConsecutiveSuccesses: 0 useURDF: ${resolve_default:True,${...use_urdf}} sim: dt: 0.0083 # 1/120 s add_ground_plane: True add_distant_light: False use_gpu_pipeline: ${eq:${...pipeline},"gpu"} use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: # per-scene use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU worker_thread_count: ${....num_threads} solver_type: ${....solver_type} # 0: PGS, 1: TGS bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True # GPU buffers gpu_max_rigid_contact_count: 1048576 gpu_max_rigid_patch_count: 33554432 gpu_found_lost_pairs_capacity: 20971520 gpu_found_lost_aggregate_pairs_capacity: 20971520 gpu_total_aggregate_pairs_capacity: 20971520 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 33554432 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 kuka: # -1 to use default values override_usd_defaults: False enable_self_collisions: False object: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.000 stabilization_threshold: 0.0025 # per-body density: -1 max_depenetration_velocity: 1000.0 goal_object: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.000 stabilization_threshold: 0.0025 # per-body density: -1 max_depenetration_velocity: 1000.0 sim2real: enabled: False fail_quietely: False verbose: False
3,304
YAML
25.023622
71
0.70339
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/CartpoleCamera.yaml
defaults: - Cartpole - _self_ name: CartpoleCamera env: numEnvs: ${resolve_default:32,${...num_envs}} envSpacing: 20.0 cameraWidth: 240 cameraHeight: 160 exportImages: False sim: rendering_dt: 0.0166 # 1/60 # set to True if you use camera sensors in the environment enable_cameras: True add_ground_plane: False add_distant_light: True
363
YAML
16.333333
60
0.69697
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/FrankaDeformable.yaml
# used to create the object name: FrankaDeformable physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:1024,${...num_envs}} # 2048#4096 envSpacing: 3.0 episodeLength: 100 # 150 #350 #500 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 controlFrequencyInv: 2 # 60 Hz startPositionNoise: 0.0 startRotationNoise: 0.0 numProps: 4 aggregateMode: 3 actionScale: 7.5 dofVelocityScale: 0.1 distRewardScale: 2.0 rotRewardScale: 0.5 aroundHandleRewardScale: 10.0 openRewardScale: 7.5 fingerDistRewardScale: 100.0 actionPenaltyScale: 0.01 fingerCloseRewardScale: 10.0 sim: dt: 0.0083 # 1/120 s use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 8 # 12 solver_velocity_iteration_count: 0 # 1 contact_offset: 0.02 #0.005 rest_offset: 0.001 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 1000.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 33554432 gpu_found_lost_pairs_capacity: 524288 #20965884 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 1048576 gpu_max_soft_body_contacts: 4194304 #2097152 #16777216 #8388608 #2097152 #1048576 gpu_max_particle_contacts: 1048576 #2097152 #1048576 gpu_heap_capacity: 33554432 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 franka: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 beaker: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 cube: # -1 to use default values override_usd_defaults: False make_kinematic: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 # # per-shape # contact_offset: 0.02 # rest_offset: 0.001
3,421
YAML
25.944882
85
0.691903
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/FrankaCabinet.yaml
# used to create the object name: FrankaCabinet physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 3.0 episodeLength: 500 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 controlFrequencyInv: 2 # 60 Hz startPositionNoise: 0.0 startRotationNoise: 0.0 numProps: 4 aggregateMode: 3 actionScale: 7.5 dofVelocityScale: 0.1 distRewardScale: 2.0 rotRewardScale: 0.5 aroundHandleRewardScale: 10.0 openRewardScale: 7.5 fingerDistRewardScale: 100.0 actionPenaltyScale: 0.01 fingerCloseRewardScale: 10.0 sim: dt: 0.0083 # 1/120 s use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 contact_offset: 0.005 rest_offset: 0.0 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 1000.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 33554432 gpu_found_lost_pairs_capacity: 524288 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 1048576 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 33554432 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 franka: # -1 to use default values override_usd_defaults: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 cabinet: # -1 to use default values override_usd_defaults: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.0 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 prop: # -1 to use default values override_usd_defaults: False make_kinematic: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 12 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: 100 max_depenetration_velocity: 1000.0 # per-shape contact_offset: 0.005 rest_offset: 0.0
3,287
YAML
25.304
71
0.695467
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Ant.yaml
# used to create the object name: Ant physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: # numEnvs: ${...num_envs} numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 5 episodeLength: 1000 enableDebugVis: False clipActions: 1.0 powerScale: 0.5 controlFrequencyInv: 2 # 60 Hz # reward parameters headingWeight: 0.5 upWeight: 0.1 # cost parameters actionsCost: 0.005 energyCost: 0.05 dofVelocityScale: 0.2 angularVelocityScale: 1.0 contactForceScale: 0.1 jointsAtLimitCost: 0.1 deathCost: -2.0 terminationHeight: 0.31 alive_reward_scale: 0.5 sim: dt: 0.0083 # 1/120 s use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 10.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 8192 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 8192 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 Ant: # -1 to use default values override_usd_defaults: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 10.0
2,370
YAML
24.771739
71
0.690717
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/AnymalTerrain.yaml
name: AnymalTerrain physics_engine: ${..physics_engine} env: numEnvs: ${resolve_default:2048,${...num_envs}} numObservations: 188 numActions: 12 envSpacing: 3. # [m] terrain: staticFriction: 1.0 # [-] dynamicFriction: 1.0 # [-] restitution: 0. # [-] # rough terrain only: curriculum: true maxInitMapLevel: 0 mapLength: 8. mapWidth: 8. numLevels: 10 numTerrains: 20 # terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete] terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2] # tri mesh only: slopeTreshold: 0.5 baseInitState: pos: [0.0, 0.0, 0.62] # x,y,z [m] rot: [1.0, 0.0, 0.0, 0.0] # w,x,y,z [quat] vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s] vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s] randomCommandVelocityRanges: # train linear_x: [-1., 1.] # min max [m/s] linear_y: [-1., 1.] # min max [m/s] yaw: [-3.14, 3.14] # min max [rad/s] control: # PD Drive parameters: stiffness: 80.0 # [N*m/rad] damping: 2.0 # [N*m*s/rad] # action scale: target angle = actionScale * action + defaultAngle actionScale: 0.5 # decimation: Number of control action updates @ sim DT per policy DT decimation: 4 defaultJointAngles: # = target angles when action = 0.0 LF_HAA: 0.03 # [rad] LH_HAA: 0.03 # [rad] RF_HAA: -0.03 # [rad] RH_HAA: -0.03 # [rad] LF_HFE: 0.4 # [rad] LH_HFE: -0.4 # [rad] RF_HFE: 0.4 # [rad] RH_HFE: -0.4 # [rad] LF_KFE: -0.8 # [rad] LH_KFE: 0.8 # [rad] RF_KFE: -0.8 # [rad] RH_KFE: 0.8 # [rad] learn: # rewards terminalReward: 0.0 linearVelocityXYRewardScale: 1.0 linearVelocityZRewardScale: -4.0 angularVelocityXYRewardScale: -0.05 angularVelocityZRewardScale: 0.5 orientationRewardScale: -0. torqueRewardScale: -0.00002 jointAccRewardScale: -0.0005 baseHeightRewardScale: -0.0 actionRateRewardScale: -0.01 fallenOverRewardScale: -1.0 # cosmetics hipRewardScale: -0. #25 # normalization linearVelocityScale: 2.0 angularVelocityScale: 0.25 dofPositionScale: 1.0 dofVelocityScale: 0.05 heightMeasurementScale: 5.0 # noise addNoise: true noiseLevel: 1.0 # scales other values dofPositionNoise: 0.01 dofVelocityNoise: 1.5 linearVelocityNoise: 0.1 angularVelocityNoise: 0.2 gravityNoise: 0.05 heightMeasurementNoise: 0.06 #randomization pushInterval_s: 15 # episode length in seconds episodeLength_s: 20 sim: dt: 0.005 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: False add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: True # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 100.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 163840 gpu_found_lost_pairs_capacity: 4194304 gpu_found_lost_aggregate_pairs_capacity: 33554432 gpu_total_aggregate_pairs_capacity: 4194304 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 134217728 gpu_temp_buffer_capacity: 33554432 gpu_max_num_partitions: 8 anymal: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: False # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 100.0
4,346
YAML
25.345454
82
0.633916
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/BallBalance.yaml
# used to create the object name: BallBalance physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 2.0 maxEpisodeLength: 600 actionSpeedScale: 20 clipObservations: 5.0 clipActions: 1.0 sim: dt: 0.01 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 1000.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 262144 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 262144 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 table: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 ball: # -1 to use default values override_usd_defaults: False make_kinematic: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: 200 max_depenetration_velocity: 1000.0
2,458
YAML
25.728261
71
0.690806
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/FactoryBase.yaml
# See schema in factory_schema_config_base.py for descriptions of parameters. defaults: - _self_ - /factory_schema_config_base sim: add_damping: True disable_contact_processing: False env: env_spacing: 1.5 franka_depth: 0.5 table_height: 0.4 franka_friction: 1.0 table_friction: 0.3
309
YAML
16.222221
77
0.699029
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Humanoid.yaml
# used to create the object name: Humanoid physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: # numEnvs: ${...num_envs} numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 5 episodeLength: 1000 enableDebugVis: False clipActions: 1.0 powerScale: 1.0 controlFrequencyInv: 2 # 60 Hz # reward parameters headingWeight: 0.5 upWeight: 0.1 # cost parameters actionsCost: 0.01 energyCost: 0.05 dofVelocityScale: 0.1 angularVelocityScale: 0.25 contactForceScale: 0.01 jointsAtLimitCost: 0.25 deathCost: -1.0 terminationHeight: 0.8 alive_reward_scale: 2.0 sim: dt: 0.0083 # 1/120 s use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 10.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 8192 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 8192 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 Humanoid: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 10.0
2,335
YAML
24.670329
71
0.693362
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/AllegroHand.yaml
# used to create the object name: AllegroHand physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:8192,${...num_envs}} envSpacing: 0.75 episodeLength: 600 clipObservations: 5.0 clipActions: 1.0 useRelativeControl: False dofSpeedScale: 20.0 actionsMovingAverage: 1.0 controlFrequencyInv: 4 # 30 Hz startPositionNoise: 0.01 startRotationNoise: 0.0 resetPositionNoise: 0.01 resetRotationNoise: 0.0 resetDofPosRandomInterval: 0.2 resetDofVelRandomInterval: 0.0 # reward -> dictionary distRewardScale: -10.0 rotRewardScale: 1.0 rotEps: 0.1 actionPenaltyScale: -0.0002 reachGoalBonus: 250 fallDistance: 0.24 fallPenalty: 0.0 velObsScale: 0.2 objectType: "block" observationType: "full" # can be "full_no_vel", "full" successTolerance: 0.1 printNumSuccesses: False maxConsecutiveSuccesses: 0 sim: dt: 0.0083 # 1/120 s add_ground_plane: True add_distant_light: False use_gpu_pipeline: ${eq:${...pipeline},"gpu"} use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: # per-scene use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU worker_thread_count: ${....num_threads} solver_type: ${....solver_type} # 0: PGS, 1: TGS bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 33554432 gpu_found_lost_pairs_capacity: 819200 gpu_found_lost_aggregate_pairs_capacity: 819200 gpu_total_aggregate_pairs_capacity: 1048576 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 33554432 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 allegro_hand: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: False # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.0005 # per-body density: -1 max_depenetration_velocity: 1000.0 object: # -1 to use default values override_usd_defaults: False make_kinematic: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.0025 # per-body density: 400.0 max_depenetration_velocity: 1000.0 goal_object: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 8 solver_velocity_iteration_count: 0 sleep_threshold: 0.000 stabilization_threshold: 0.0025 # per-body density: -1 max_depenetration_velocity: 1000.0
3,360
YAML
25.464567
71
0.69881
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/HumanoidSAC.yaml
# used to create the object defaults: - Humanoid - _self_ # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:64,${...num_envs}}
168
YAML
20.124997
52
0.678571
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Ingenuity.yaml
# used to create the object name: Ingenuity physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 2.5 maxEpisodeLength: 2000 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 sim: dt: 0.01 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -3.721] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False # set to True if you use camera sensors in the environment enable_cameras: False disable_contact_processing: False physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 1000.0 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: False # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 4194304 gpu_found_lost_aggregate_pairs_capacity: 33554432 gpu_total_aggregate_pairs_capacity: 4194304 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 ingenuity: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 ball: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0
2,351
YAML
27
71
0.693322
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Quadcopter.yaml
# used to create the object name: Quadcopter physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 1.25 maxEpisodeLength: 500 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 sim: dt: 0.01 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 1000.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 8192 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 8192 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 copter: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 ball: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0
2,452
YAML
25.663043
71
0.690457
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Crazyflie.yaml
# used to create the object name: Crazyflie physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 2.5 maxEpisodeLength: 700 enableDebugVis: False clipObservations: 5.0 clipActions: 1.0 sim: dt: 0.01 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False # set to True if you use camera sensors in the environment enable_cameras: False disable_contact_processing: False physx: num_threads: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 max_depenetration_velocity: 1000.0 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: False # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 4194304 gpu_found_lost_aggregate_pairs_capacity: 33554432 gpu_total_aggregate_pairs_capacity: 4194304 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 crazyflie: # -1 to use default values override_usd_defaults: False enable_self_collisions: True enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0 ball: # -1 to use default values override_usd_defaults: False make_kinematic: True enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 6 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 1000.0
2,350
YAML
26.658823
71
0.692766
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml
# See schema in factory_schema_config_env.py for descriptions of common parameters. defaults: - _self_ - /factory_schema_config_env sim: disable_franka_collisions: False disable_nut_collisions: False disable_bolt_collisions: False disable_contact_processing: False env: env_name: 'FactoryEnvNutBolt' desired_subassemblies: ['nut_bolt_m16', 'nut_bolt_m16'] nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt nut_bolt_density: 7850.0 nut_bolt_friction: 0.3 # Subassembly options: # {nut_bolt_m4, nut_bolt_m8, nut_bolt_m12, nut_bolt_m16, nut_bolt_m20}
643
YAML
28.272726
116
0.73717
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/AntSAC.yaml
# used to create the object defaults: - Ant - _self_ # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:64,${...num_envs}}
163
YAML
19.499998
52
0.668712
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Cartpole.yaml
# used to create the object name: Cartpole physics_engine: ${..physics_engine} # if given, will override the device setting in gym. env: numEnvs: ${resolve_default:512,${...num_envs}} envSpacing: 4.0 resetDist: 3.0 maxEffort: 400.0 clipObservations: 5.0 clipActions: 1.0 controlFrequencyInv: 2 # 60 Hz sim: dt: 0.0083 # 1/120 s use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 contact_offset: 0.02 rest_offset: 0.001 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 100.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 81920 gpu_found_lost_pairs_capacity: 1024 gpu_found_lost_aggregate_pairs_capacity: 262144 gpu_total_aggregate_pairs_capacity: 1024 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 67108864 gpu_temp_buffer_capacity: 16777216 gpu_max_num_partitions: 8 Cartpole: # -1 to use default values override_usd_defaults: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 0 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 100.0 # per-shape contact_offset: 0.02 rest_offset: 0.001
2,124
YAML
26.243589
71
0.686911
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/Anymal.yaml
# used to create the object name: Anymal physics_engine: ${..physics_engine} env: numEnvs: ${resolve_default:4096,${...num_envs}} envSpacing: 4. # [m] clipObservations: 5.0 clipActions: 1.0 controlFrequencyInv: 2 baseInitState: pos: [0.0, 0.0, 0.62] # x,y,z [m] rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat] vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s] vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s] randomCommandVelocityRanges: linear_x: [-2., 2.] # min max [m/s] linear_y: [-1., 1.] # min max [m/s] yaw: [-1., 1.] # min max [rad/s] control: # PD Drive parameters: stiffness: 85.0 # [N*m/rad] damping: 2.0 # [N*m*s/rad] actionScale: 13.5 defaultJointAngles: # = target angles when action = 0.0 LF_HAA: 0.03 # [rad] LH_HAA: 0.03 # [rad] RF_HAA: -0.03 # [rad] RH_HAA: -0.03 # [rad] LF_HFE: 0.4 # [rad] LH_HFE: -0.4 # [rad] RF_HFE: 0.4 # [rad] RH_HFE: -0.4 # [rad] LF_KFE: -0.8 # [rad] LH_KFE: 0.8 # [rad] RF_KFE: -0.8 # [rad] RH_KFE: 0.8 # [rad] learn: # rewards linearVelocityXYRewardScale: 1.0 angularVelocityZRewardScale: 0.5 linearVelocityZRewardScale: -0.03 jointAccRewardScale: -0.0003 actionRateRewardScale: -0.006 cosmeticRewardScale: -0.06 # normalization linearVelocityScale: 2.0 angularVelocityScale: 0.25 dofPositionScale: 1.0 dofVelocityScale: 0.05 # episode length in seconds episodeLength_s: 50 sim: dt: 0.01 use_gpu_pipeline: ${eq:${...pipeline},"gpu"} gravity: [0.0, 0.0, -9.81] add_ground_plane: True add_distant_light: False use_fabric: True enable_scene_query_support: False disable_contact_processing: False # set to True if you use camera sensors in the environment enable_cameras: False default_physics_material: static_friction: 1.0 dynamic_friction: 1.0 restitution: 0.0 physx: worker_thread_count: ${....num_threads} solver_type: ${....solver_type} use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU solver_position_iteration_count: 4 solver_velocity_iteration_count: 1 contact_offset: 0.02 rest_offset: 0.0 bounce_threshold_velocity: 0.2 friction_offset_threshold: 0.04 friction_correlation_distance: 0.025 enable_sleeping: True enable_stabilization: True max_depenetration_velocity: 100.0 # GPU buffers gpu_max_rigid_contact_count: 524288 gpu_max_rigid_patch_count: 163840 gpu_found_lost_pairs_capacity: 4194304 gpu_found_lost_aggregate_pairs_capacity: 33554432 gpu_total_aggregate_pairs_capacity: 4194304 gpu_max_soft_body_contacts: 1048576 gpu_max_particle_contacts: 1048576 gpu_heap_capacity: 134217728 gpu_temp_buffer_capacity: 33554432 gpu_max_num_partitions: 8 Anymal: # -1 to use default values override_usd_defaults: False enable_self_collisions: False enable_gyroscopic_forces: True # also in stage params # per-actor solver_position_iteration_count: 4 solver_velocity_iteration_count: 1 sleep_threshold: 0.005 stabilization_threshold: 0.001 # per-body density: -1 max_depenetration_velocity: 100.0
3,270
YAML
24.960317
71
0.626911
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml
# specifies what the config is when running `ShadowHandOpenAI` in LSTM mode defaults: - ShadowHandOpenAI_FF - _self_ env: numEnvs: ${resolve_default:8192,${...num_envs}}
178
YAML
18.888887
75
0.707865
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [400, 400, 200, 100] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} load_path: ${...checkpoint} config: name: ${resolve_default:ShadowHandOpenAI_FF,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu multi_gpu: ${....multi_gpu} ppo: True mixed_precision: False normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.998 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 score_to_win: 100000 max_epochs: ${resolve_default:10000,${....max_iterations}} save_best_after: 100 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 16384 mini_epochs: 4 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001 central_value_config: minibatch_size: 32864 mini_epochs: 4 learning_rate: 5e-4 lr_schedule: adaptive schedule_type: standard kl_threshold: 0.016 clip_value: True normalize_input: True truncate_grads: True network: name: actor_critic central_value: True mlp: units: [512, 512, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None player: deterministic: True games_num: 100000 print_stats: True
2,215
YAML
20.940594
66
0.577427
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/AnymalTerrainPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: True space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0. # std = 1. fixed_sigma: True mlp: units: [512, 256, 128] activation: elu d2rl: False initializer: name: default regularizer: name: None # rnn: # name: lstm # units: 128 # layers: 1 # before_mlp: True # concat_input: True # layer_norm: False load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:AnymalTerrain,${....experiment}} full_experiment_name: ${.name} device: ${....rl_device} device_name: ${....rl_device} env_name: rlgpu multi_gpu: ${....multi_gpu} ppo: True mixed_precision: False # True normalize_input: True normalize_value: True normalize_advantage: True value_bootstrap: True clip_actions: False num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 1.0 gamma: 0.99 tau: 0.95 e_clip: 0.2 entropy_coef: 0.001 learning_rate: 3.e-4 # overwritten by adaptive lr_schedule lr_schedule: adaptive kl_threshold: 0.008 # target kl for adaptive lr truncate_grads: True grad_norm: 1. horizon_length: 48 minibatch_size: 16384 mini_epochs: 5 critic_coef: 2 clip_value: True seq_length: 4 # only for rnn bounds_loss_coef: 0. max_epochs: ${resolve_default:2000,${....max_iterations}} save_best_after: 100 score_to_win: 20000 save_frequency: 50 print_stats: True
1,928
YAML
21.694117
101
0.592842
j3soon/OmniIsaacGymEnvs-KukaReacher/omniisaacgymenvs/cfg/train/HumanoidPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [400, 200, 100] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:Humanoid,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} multi_gpu: ${....multi_gpu} ppo: True mixed_precision: True normalize_input: True normalize_value: True value_bootstrap: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 20000 max_epochs: ${resolve_default:1000,${....max_iterations}} save_best_after: 100 save_frequency: 100 grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 32768 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
1,639
YAML
21.465753
101
0.594875