file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_nut_bolt.yaml | nut_bolt_m4:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m4_tight/factory_nut_m4_tight.usd'
width_min: 0.007 # distance from flat surface to flat surface
width_max: 0.0080829 # distance from edge to edge
height: 0.0032 # height of nut
flat_length: 0.00404145 # length of flat surface
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m4_tight/factory_bolt_m4_tight.usd'
width: 0.004 # major diameter of bolt
head_height: 0.004 # height of bolt head
shank_length: 0.016 # length of bolt shank
thread_pitch: 0.0007 # distance between threads
nut_bolt_m8:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m8_tight/factory_nut_m8_tight.usd'
width_min: 0.013
width_max: 0.01501111
height: 0.0065
flat_length: 0.00750555
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m8_tight/factory_bolt_m8_tight.usd'
width: 0.008
head_height: 0.008
shank_length: 0.018
thread_pitch: 0.00125
nut_bolt_m12:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m12_tight/factory_nut_m12_tight.usd'
width_min: 0.019
width_max: 0.02193931
height: 0.010
flat_length: 0.01096966
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m12_tight/factory_bolt_m12_tight.usd'
width: 0.012
head_height: 0.012
shank_length: 0.020
thread_pitch: 0.00175
nut_bolt_m16:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m16_tight/factory_nut_m16_tight.usd'
width_min: 0.024
width_max: 0.02771281
height: 0.013
flat_length: 0.01385641
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m16_tight/factory_bolt_m16_tight.usd'
width: 0.016
head_height: 0.016
shank_length: 0.025
thread_pitch: 0.002
nut_bolt_m20:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m20_tight/factory_nut_m20_tight.usd'
width_min: 0.030
width_max: 0.03464102
height: 0.016
flat_length: 0.01732051
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m20_tight/factory_bolt_m20_tight.usd'
width: 0.020
head_height: 0.020
shank_length: 0.045
thread_pitch: 0.0025
| 2,331 | YAML | 32.314285 | 90 | 0.617332 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_franka_table.yaml | franka_hand_length: 0.0584 # distance from origin of hand to origin of finger
franka_finger_length: 0.053671 # distance from origin of finger to bottom of fingerpad
franka_fingerpad_length: 0.017608 # distance from top of inner surface of fingerpad to bottom of inner surface of fingerpad
franka_gripper_width_max: 0.080 # maximum opening width of gripper
table_depth: 0.6 # depth of table
table_width: 1.0 # width of table
| 431 | YAML | 52.999993 | 124 | 0.772622 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/utils/anymal_terrain_generator.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
# terrain generator
class Terrain:
def __init__(self, cfg, num_robots) -> None:
self.horizontal_scale = 0.1
self.vertical_scale = 0.005
self.border_size = 20
self.num_per_env = 2
self.env_length = cfg["mapLength"]
self.env_width = cfg["mapWidth"]
self.proportions = [np.sum(cfg["terrainProportions"][: i + 1]) for i in range(len(cfg["terrainProportions"]))]
self.env_rows = cfg["numLevels"]
self.env_cols = cfg["numTerrains"]
self.num_maps = self.env_rows * self.env_cols
self.num_per_env = int(num_robots / self.num_maps)
self.env_origins = np.zeros((self.env_rows, self.env_cols, 3))
self.width_per_env_pixels = int(self.env_width / self.horizontal_scale)
self.length_per_env_pixels = int(self.env_length / self.horizontal_scale)
self.border = int(self.border_size / self.horizontal_scale)
self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border
self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border
self.height_field_raw = np.zeros((self.tot_rows, self.tot_cols), dtype=np.int16)
if cfg["curriculum"]:
self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows)
else:
self.randomized_terrain()
self.heightsamples = self.height_field_raw
self.vertices, self.triangles = convert_heightfield_to_trimesh(
self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"]
)
def randomized_terrain(self):
for k in range(self.num_maps):
# Env coordinates in the world
(i, j) = np.unravel_index(k, (self.env_rows, self.env_cols))
# Heightfield coordinate system from now on
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
choice = np.random.uniform(0, 1)
if choice < 0.1:
if np.random.choice([0, 1]):
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2)
else:
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
elif choice < 0.6:
# step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18])
step_height = np.random.choice([-0.15, 0.15])
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < 1.0:
discrete_obstacles_terrain(terrain, 0.15, 1.0, 2.0, 40, platform_size=3.0)
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
def curiculum(self, num_robots, num_terrains, num_levels):
num_robots_per_map = int(num_robots / num_terrains)
left_over = num_robots % num_terrains
idx = 0
for j in range(num_terrains):
for i in range(num_levels):
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
difficulty = i / num_levels
choice = j / num_terrains
slope = difficulty * 0.4
step_height = 0.05 + 0.175 * difficulty
discrete_obstacles_height = 0.025 + difficulty * 0.15
stepping_stones_size = 2 - 1.8 * difficulty
if choice < self.proportions[0]:
if choice < 0.05:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
elif choice < self.proportions[1]:
if choice < 0.15:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2)
elif choice < self.proportions[3]:
if choice < self.proportions[2]:
step_height *= -1
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < self.proportions[4]:
discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1.0, 2.0, 40, platform_size=3.0)
else:
stepping_stones_terrain(
terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0.0, platform_size=3.0
)
# Heightfield coordinate system
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
robots_in_map = num_robots_per_map
if j < left_over:
robots_in_map += 1
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
| 8,852 | Python | 50.47093 | 119 | 0.591618 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/utils/usd_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from pxr import UsdLux, UsdPhysics
def set_drive_type(prim_path, drive_type):
joint_prim = get_prim_at_path(prim_path)
# set drive type ("angular" or "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type)
return drive
def set_drive_target_position(drive, target_value):
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
def set_drive_target_velocity(drive, target_value):
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
def set_drive_stiffness(drive, stiffness):
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
def set_drive_damping(drive, damping):
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
def set_drive_max_force(drive, max_force):
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def set_drive(
prim_path, drive_type, target_type, target_value, stiffness, damping, max_force
) -> None:
drive = set_drive_type(prim_path, drive_type)
# set target type ("position" or "velocity")
if target_type == "position":
set_drive_target_position(drive, target_value)
elif target_type == "velocity":
set_drive_target_velocity(drive, target_value)
set_drive_stiffness(drive, stiffness)
set_drive_damping(drive, damping)
set_drive_max_force(drive, max_force)
def create_distant_light(prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.GetPrim().GetAttribute("intensity").Set(intensity)
| 3,642 | Python | 36.173469 | 83 | 0.739154 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/utils/fp_utils.py | import torch
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
from typing import Callable
def quantize_tensor_values(tensor, n_values):
"""
Quantizes the values of a tensor into N*2 +1 discrete values in the range [-1,1] using PyTorch's quantization functions.
Args:
- tensor: a PyTorch tensor of shape (batch_size, num_features)
- n_values: an integer indicating the number of discrete values to use
Returns:
- a new tensor of the same shape as the input tensor, with each value quantized to a discrete value in the range [-1,1]
"""
assert n_values >= 1, "n_values must be greater than or equal to 1"
assert tensor.min() >= -1 and tensor.max() <= 1, "tensor values must be in the range [-1,1]"
scale = 1.0 / n_values
quantized_tensor = torch.quantize_per_tensor(tensor, scale=scale, zero_point=0,
dtype=torch.qint8)
quantized_tensor = quantized_tensor.dequantize()
return quantized_tensor
def quaternion_to_rotation_matrix(Q):
"""
Covert a quaternion into a full three-dimensional rotation matrix.
Input
:param Q: A 4 element array representing the quaternion (q0,q1,q2,q3)
Output
:return: A 3x3 element matrix representing the full 3D rotation matrix.
This rotation matrix converts a point in the local reference
frame to a point in the global reference frame.
"""
# Extract the values from Q
q0 = Q[0]
q1 = Q[1]
q2 = Q[2]
q3 = Q[3]
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
| 3,896 | Python | 38.765306 | 125 | 0.669148 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/shared/in_hand_manipulation.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
class InHandManipulationTask(RLTask):
def __init__(self, name, env, offset=None) -> None:
InHandManipulationTask.update_config(self)
RLTask.__init__(self, name, env)
self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.randomization_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device)
self.av_factor = torch.tensor(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self._task_cfg["env"]["successTolerance"]
self.reach_goal_bonus = self._task_cfg["env"]["reachGoalBonus"]
self.fall_dist = self._task_cfg["env"]["fallDistance"]
self.fall_penalty = self._task_cfg["env"]["fallPenalty"]
self.rot_eps = self._task_cfg["env"]["rotEps"]
self.vel_obs_scale = self._task_cfg["env"]["velObsScale"]
self.reset_position_noise = self._task_cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self._task_cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self._task_cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self._task_cfg["env"]["resetDofVelRandomInterval"]
self.hand_dof_speed_scale = self._task_cfg["env"]["dofSpeedScale"]
self.use_relative_control = self._task_cfg["env"]["useRelativeControl"]
self.act_moving_average = self._task_cfg["env"]["actionsMovingAverage"]
self.max_episode_length = self._task_cfg["env"]["episodeLength"]
self.reset_time = self._task_cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self._task_cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self._task_cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self._task_cfg["env"].get("averFactor", 0.1)
self.dt = 1.0 / 60
control_freq_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
self._assets_root_path = get_assets_root_path()
self.get_starting_positions()
self.get_hand()
self.object_start_translation = self.hand_start_translation.clone()
self.object_start_translation[1] += self.pose_dy
self.object_start_translation[2] += self.pose_dz
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([-0.2, -0.06, 0.12], device=self.device)
self.goal_start_translation = self.object_start_translation + self.goal_displacement_tensor
self.goal_start_translation[2] -= 0.04
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.get_object(self.hand_start_translation, self.pose_dy, self.pose_dz)
self.get_goal()
super().set_up_scene(scene, filter_collisions=False)
self._hands = self.get_hand_view(scene)
scene.add(self._hands)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
masses=torch.tensor([0.07087] * self._num_envs, device=self.device),
)
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
if self._dr_randomizer.randomize:
self._dr_randomizer.apply_on_startup_domain_randomization(self)
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("shadow_hand_view"):
scene.remove_object("shadow_hand_view", registry_only=True)
if scene.object_exists("finger_view"):
scene.remove_object("finger_view", registry_only=True)
if scene.object_exists("allegro_hand_view"):
scene.remove_object("allegro_hand_view", registry_only=True)
if scene.object_exists("goal_view"):
scene.remove_object("goal_view", registry_only=True)
if scene.object_exists("object_view"):
scene.remove_object("object_view", registry_only=True)
self.get_starting_positions()
self.object_start_translation = self.hand_start_translation.clone()
self.object_start_translation[1] += self.pose_dy
self.object_start_translation[2] += self.pose_dz
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([-0.2, -0.06, 0.12], device=self.device)
self.goal_start_translation = self.object_start_translation + self.goal_displacement_tensor
self.goal_start_translation[2] -= 0.04
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self._hands = self.get_hand_view(scene)
scene.add(self._hands)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
masses=torch.tensor([0.07087] * self._num_envs, device=self.device),
)
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
if self._dr_randomizer.randomize:
self._dr_randomizer.apply_on_startup_domain_randomization(self)
@abstractmethod
def get_hand(self):
pass
@abstractmethod
def get_hand_view(self):
pass
@abstractmethod
def get_observations(self):
pass
def get_object(self, hand_start_translation, pose_dy, pose_dz):
self.object_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd"
add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/object")
obj = XFormPrim(
prim_path=self.default_zero_env_path + "/object/object",
name="object",
translation=self.object_start_translation,
orientation=self.object_start_orientation,
scale=self.object_scale,
)
self._sim_config.apply_articulation_settings(
"object", get_prim_at_path(obj.prim_path), self._sim_config.parse_actor_config("object")
)
def get_goal(self):
add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/goal")
goal = XFormPrim(
prim_path=self.default_zero_env_path + "/goal",
name="goal",
translation=self.goal_start_translation,
orientation=self.goal_start_orientation,
scale=self.object_scale,
)
self._sim_config.apply_articulation_settings(
"goal", get_prim_at_path(goal.prim_path), self._sim_config.parse_actor_config("goal_object")
)
def post_reset(self):
self.num_hand_dofs = self._hands.num_dof
self.actuated_dof_indices = self._hands.actuated_dof_indices
self.hand_dof_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
dof_limits = self._hands.get_dof_limits()
self.hand_dof_lower_limits, self.hand_dof_upper_limits = torch.t(dof_limits[0].to(self.device))
self.hand_dof_default_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device)
self.hand_dof_default_vel = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device)
self.object_init_pos, self.object_init_rot = self._objects.get_world_poses()
self.object_init_pos -= self._env_pos
self.object_init_velocities = torch.zeros_like(
self._objects.get_velocities(), dtype=torch.float, device=self.device
)
self.goal_pos = self.object_init_pos.clone()
self.goal_pos[:, 2] -= 0.04
self.goal_rot = self.object_init_rot.clone()
self.goal_init_pos = self.goal_pos.clone()
self.goal_init_rot = self.goal_rot.clone()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
if self._dr_randomizer.randomize:
self._dr_randomizer.set_up_domain_randomization(self)
def get_object_goal_observations(self):
self.object_pos, self.object_rot = self._objects.get_world_poses(clone=False)
self.object_pos -= self._env_pos
self.object_velocities = self._objects.get_velocities(clone=False)
self.object_linvel = self.object_velocities[:, 0:3]
self.object_angvel = self.object_velocities[:, 3:6]
def calculate_metrics(self):
(
self.rew_buf[:],
self.reset_buf[:],
self.reset_goal_buf[:],
self.progress_buf[:],
self.successes[:],
self.consecutive_successes[:],
) = compute_hand_reward(
self.rew_buf,
self.reset_buf,
self.reset_goal_buf,
self.progress_buf,
self.successes,
self.consecutive_successes,
self.max_episode_length,
self.object_pos,
self.object_rot,
self.goal_pos,
self.goal_rot,
self.dist_reward_scale,
self.rot_reward_scale,
self.rot_eps,
self.actions,
self.action_penalty_scale,
self.success_tolerance,
self.reach_goal_bonus,
self.fall_dist,
self.fall_penalty,
self.max_consecutive_successes,
self.av_factor,
)
self.extras["consecutive_successes"] = self.consecutive_successes.mean()
self.randomization_buf += 1
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term policy performance.
print(
"Direct average consecutive successes = {:.1f}".format(
direct_average_successes / (self.total_resets + self.num_envs)
)
)
if self.total_resets > 0:
print(
"Post-Reset average consecutive successes = {:.1f}".format(self.total_successes / self.total_resets)
)
def pre_physics_step(self, actions):
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
reset_buf = self.reset_buf.clone()
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids)
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device)
if self.use_relative_control:
targets = (
self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * self.actions
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
targets,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices],
)
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(
self.actions,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices],
)
self.cur_targets[:, self.actuated_dof_indices] = (
self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices]
+ (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
self.cur_targets[:, self.actuated_dof_indices],
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices],
)
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self._hands.set_joint_position_targets(
self.cur_targets[:, self.actuated_dof_indices], indices=None, joint_indices=self.actuated_dof_indices
)
if self._dr_randomizer.randomize:
rand_envs = torch.where(
self.randomization_buf >= self._dr_randomizer.min_frequency,
torch.ones_like(self.randomization_buf),
torch.zeros_like(self.randomization_buf),
)
rand_env_ids = torch.nonzero(torch.logical_and(rand_envs, reset_buf))
self.dr.physics_view.step_randomization(rand_env_ids)
self.randomization_buf[rand_env_ids] = 0
def is_done(self):
pass
def reset_target_pose(self, env_ids):
# reset goal
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_rot = randomize_rotation(
rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
self.goal_pos[env_ids] = self.goal_init_pos[env_ids, 0:3]
self.goal_rot[env_ids] = new_rot
goal_pos, goal_rot = self.goal_pos.clone(), self.goal_rot.clone()
goal_pos[env_ids] = (
self.goal_pos[env_ids] + self.goal_displacement_tensor + self._env_pos[env_ids]
) # add world env pos
self._goals.set_world_poses(goal_pos[env_ids], goal_rot[env_ids], indices)
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device)
self.reset_target_pose(env_ids)
# reset object
new_object_pos = (
self.object_init_pos[env_ids] + self.reset_position_noise * rand_floats[:, 0:3] + self._env_pos[env_ids]
) # add world env pos
new_object_rot = randomize_rotation(
rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
object_velocities = torch.zeros_like(self.object_init_velocities, dtype=torch.float, device=self.device)
self._objects.set_velocities(object_velocities[env_ids], indices)
self._objects.set_world_poses(new_object_pos, new_object_rot, indices)
# reset hand
delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos
delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5 : 5 + self.num_hand_dofs] + 1.0)
pos = self.hand_dof_default_pos + self.reset_dof_pos_noise * rand_delta
dof_pos = torch.zeros((self.num_envs, self.num_hand_dofs), device=self.device)
dof_pos[env_ids, :] = pos
dof_vel = torch.zeros((self.num_envs, self.num_hand_dofs), device=self.device)
dof_vel[env_ids, :] = (
self.hand_dof_default_vel
+ self.reset_dof_vel_noise * rand_floats[:, 5 + self.num_hand_dofs : 5 + self.num_hand_dofs * 2]
)
self.prev_targets[env_ids, : self.num_hand_dofs] = pos
self.cur_targets[env_ids, : self.num_hand_dofs] = pos
self.hand_dof_targets[env_ids, :] = pos
self._hands.set_joint_position_targets(self.hand_dof_targets[env_ids], indices)
self._hands.set_joint_positions(dof_pos[env_ids], indices)
self._hands.set_joint_velocities(dof_vel[env_ids], indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(
quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)
)
@torch.jit.script
def compute_hand_reward(
rew_buf,
reset_buf,
reset_goal_buf,
progress_buf,
successes,
consecutive_successes,
max_episode_length: float,
object_pos,
object_rot,
target_pos,
target_rot,
dist_reward_scale: float,
rot_reward_scale: float,
rot_eps: float,
actions,
action_penalty_scale: float,
success_tolerance: float,
reach_goal_bonus: float,
fall_dist: float,
fall_penalty: float,
max_consecutive_successes: int,
av_factor: float,
):
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(
torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0)
) # changed quat convention
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions**2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
# Fall penalty: distance to the goal is larger than a threashold
reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward)
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(
torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf
)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
if max_consecutive_successes > 0:
reward = torch.where(progress_buf >= max_episode_length - 1, reward + 0.5 * fall_penalty, reward)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(
num_resets > 0,
av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes,
consecutive_successes,
)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
| 23,466 | Python | 43.110902 | 126 | 0.630657 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
class LocomotionTask(RLTask):
def __init__(self, name, env, offset=None) -> None:
LocomotionTask.update_config(self)
RLTask.__init__(self, name, env)
return
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
velocity = velocities[:, 0:3]
ang_velocity = velocities[:, 3:6]
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces(joint_indices=self._sensor_indices)
(
self.obs_buf[:],
self.potentials[:],
self.prev_potentials[:],
self.up_vec[:],
self.heading_vec[:],
) = get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
self.targets,
self.potentials,
self.dt,
self.inv_start_rot,
self.basis_vec0,
self.basis_vec1,
self.dof_limits_lower,
self.dof_limits_upper,
self.dof_vel_scale,
sensor_force_torques,
self._num_envs,
self.contact_force_scale,
self.actions,
self.angular_velocity_scale,
)
observations = {self._robots.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
forces = self.actions * self.joint_gears * self.power_scale
indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
# applies joint torques
self._robots.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions and velocities
dof_pos = torch_rand_float(-0.2, 0.2, (num_resets, self._robots.num_dof), device=self._device)
dof_pos[:] = tensor_clamp(self.initial_dof_pos[env_ids] + dof_pos, self.dof_limits_lower, self.dof_limits_upper)
dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, self._robots.num_dof), device=self._device)
root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
self._robots.set_joint_positions(dof_pos, indices=env_ids)
self._robots.set_joint_velocities(dof_vel, indices=env_ids)
self._robots.set_world_poses(root_pos, root_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
to_target = self.targets[env_ids] - self.initial_root_pos[env_ids]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
num_resets = len(env_ids)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = torch.tensor([1000, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.target_dirs = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.dt = 1.0 / 60.0
self.potentials = torch.tensor([-1000.0 / self.dt], dtype=torch.float32, device=self._device).repeat(
self.num_envs
)
self.prev_potentials = self.potentials.clone()
self.actions = torch.zeros((self.num_envs, self.num_actions), device=self._device)
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = calculate_metrics(
self.obs_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.termination_height,
self.death_cost,
self._robots.num_dof,
self.get_dof_at_limit_cost(),
self.alive_reward_scale,
self.motor_effort_ratio,
)
def is_done(self) -> None:
self.reset_buf[:] = is_done(
self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
targets,
potentials,
dt,
inv_start_rot,
basis_vec0,
basis_vec1,
dof_limits_lower,
dof_limits_upper,
dof_vel_scale,
sensor_force_torques,
num_envs,
contact_force_scale,
actions,
angular_velocity_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, int, float, Tensor, float) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
to_target = targets - torso_position
to_target[:, 2] = 0.0
prev_potentials = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2
)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position
)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs = torch.cat(
(
torso_position[:, 2].view(-1, 1),
vel_loc,
angvel_loc * angular_velocity_scale,
normalize_angle(yaw).unsqueeze(-1),
normalize_angle(roll).unsqueeze(-1),
normalize_angle(angle_to_target).unsqueeze(-1),
up_proj.unsqueeze(-1),
heading_proj.unsqueeze(-1),
dof_pos_scaled,
dof_vel * dof_vel_scale,
sensor_force_torques.reshape(num_envs, -1) * contact_force_scale,
actions,
),
dim=-1,
)
return obs, potentials, prev_potentials, up_vec, heading_vec
@torch.jit.script
def is_done(obs_buf, termination_height, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, float, Tensor, Tensor, float) -> Tensor
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return reset
@torch.jit.script
def calculate_metrics(
obs_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
termination_height,
death_cost,
num_dof,
dof_at_limit_cost,
alive_reward_scale,
motor_effort_ratio,
):
# type: (Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, int, Tensor, float, Tensor) -> Tensor
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# aligning up axis of robot and environment
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
# energy penalty for movement
actions_cost = torch.sum(actions**2, dim=-1)
electricity_cost = torch.sum(
torch.abs(actions * obs_buf[:, 12 + num_dof : 12 + num_dof * 2]) * motor_effort_ratio.unsqueeze(0), dim=-1
)
# reward for duration of staying alive
alive_reward = torch.ones_like(potentials) * alive_reward_scale
progress_reward = potentials - prev_potentials
total_reward = (
progress_reward
+ alive_reward
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost
)
# adjust reward for fallen agents
total_reward = torch.where(
obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward
)
return total_reward
| 13,243 | Python | 37.277457 | 214 | 0.628861 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/config_mujoco.yaml | # Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ${resolve_default:${task.name},${task.experiment}}
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# used to set checkpoint path
checkpoint: ''
wandb_activate: False
use_rl: True
# set default task and default training config based on task
defaults:
- task: virtual_floating_platform/MFP2D_Virtual_GoToXY
- train: virtual_floating_platform/MFP2D_PPOmulti_dict_MLP
- controller: Optimal_LQR_DC
- hl_task: GoToXY_Square
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 1,447 | YAML | 25.814814 | 103 | 0.742225 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ${resolve_default:${task.name},${task.experiment}}
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# evaluate checkpoint
evaluation: False
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 300
# enables viewport recording
enable_recording: False
# interval between video recordings (in steps)
recording_interval: 2000
# length of the recorded video (in steps)
recording_length: 100
# fps for writing recorded video
recording_fps: 30
# directory to save recordings in
recording_dir: ''
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'omniisaacgymenvs'
# path to a kit app file
kit_app: ''
# Warp
warp: False
# set default task and default training config based on task
defaults:
- _self_
- task: Cartpole
- train: ${task}PPO
- override hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 2,055 | YAML | 22.906976 | 103 | 0.745012 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/CartpoleCamera.yaml | defaults:
- Cartpole
- _self_
name: CartpoleCamera
env:
numEnvs: ${resolve_default:32,${...num_envs}}
envSpacing: 20.0
cameraWidth: 240
cameraHeight: 160
exportImages: False
sim:
rendering_dt: 0.0166 # 1/60
# set to True if you use camera sensors in the environment
enable_cameras: True
add_ground_plane: False
add_distant_light: True
| 363 | YAML | 16.333333 | 60 | 0.69697 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/FrankaDeformable.yaml | # used to create the object
name: FrankaDeformable
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:1024,${...num_envs}} # 2048#4096
envSpacing: 3.0
episodeLength: 100 # 150 #350 #500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.016 # 1/60s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 8 # 12
solver_velocity_iteration_count: 0 # 1
contact_offset: 0.02 #0.005
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288 #20965884
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 4194304 #2097152 #16777216 #8388608 #2097152 #1048576
gpu_max_particle_contacts: 1048576 #2097152 #1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
franka:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
beaker:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cube:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
# # per-shape
# contact_offset: 0.02
# rest_offset: 0.001
| 3,418 | YAML | 25.92126 | 85 | 0.691925 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/FrankaCabinet.yaml | # used to create the object
name: FrankaCabinet
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 3.0
episodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
franka:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cabinet:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.0
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
| 3,287 | YAML | 25.304 | 71 | 0.695467 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Ant.yaml | # used to create the object
name: Ant
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
# numEnvs: ${...num_envs}
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 0.5
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.005
energyCost: 0.05
dofVelocityScale: 0.2
angularVelocityScale: 1.0
contactForceScale: 0.1
jointsAtLimitCost: 0.1
deathCost: -2.0
terminationHeight: 0.31
alive_reward_scale: 0.5
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 10.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Ant:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0 | 2,370 | YAML | 24.771739 | 71 | 0.690717 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/AnymalTerrain.yaml | name: AnymalTerrain
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:2048,${...num_envs}}
numObservations: 188
numActions: 12
envSpacing: 3. # [m]
terrain:
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
# rough terrain only:
curriculum: true
maxInitMapLevel: 0
mapLength: 8.
mapWidth: 8.
numLevels: 10
numTerrains: 20
# terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete]
terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2]
# tri mesh only:
slopeTreshold: 0.5
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [1.0, 0.0, 0.0, 0.0] # w,x,y,z [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
# train
linear_x: [-1., 1.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-3.14, 3.14] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 80.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
actionScale: 0.5
# decimation: Number of control action updates @ sim DT per policy DT
decimation: 4
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
learn:
# rewards
terminalReward: 0.0
linearVelocityXYRewardScale: 1.0
linearVelocityZRewardScale: -4.0
angularVelocityXYRewardScale: -0.05
angularVelocityZRewardScale: 0.5
orientationRewardScale: -0.
torqueRewardScale: -0.00002
jointAccRewardScale: -0.0005
baseHeightRewardScale: -0.0
actionRateRewardScale: -0.01
fallenOverRewardScale: -1.0
# cosmetics
hipRewardScale: -0. #25
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
heightMeasurementScale: 5.0
# noise
addNoise: true
noiseLevel: 1.0 # scales other values
dofPositionNoise: 0.01
dofVelocityNoise: 1.5
linearVelocityNoise: 0.1
angularVelocityNoise: 0.2
gravityNoise: 0.05
heightMeasurementNoise: 0.06
#randomization
pushInterval_s: 15
# episode length in seconds
episodeLength_s: 20
sim:
dt: 0.005
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: False
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: True
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 163840
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 134217728
gpu_temp_buffer_capacity: 33554432
gpu_max_num_partitions: 8
anymal:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: False
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
| 4,346 | YAML | 25.345454 | 82 | 0.633916 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/BallBalance.yaml | # used to create the object
name: BallBalance
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.0
maxEpisodeLength: 600
actionSpeedScale: 20
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 262144
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 262144
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
table:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 200
max_depenetration_velocity: 1000.0
| 2,458 | YAML | 25.728261 | 71 | 0.690806 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/FactoryBase.yaml | # See schema in factory_schema_config_base.py for descriptions of parameters.
defaults:
- _self_
- /factory_schema_config_base
sim:
add_damping: True
disable_contact_processing: False
env:
env_spacing: 1.5
franka_depth: 0.5
table_height: 0.4
franka_friction: 1.0
table_friction: 0.3
| 309 | YAML | 16.222221 | 77 | 0.699029 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Humanoid.yaml | # used to create the object
name: Humanoid
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
# numEnvs: ${...num_envs}
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 1.0
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.01
energyCost: 0.05
dofVelocityScale: 0.1
angularVelocityScale: 0.25
contactForceScale: 0.01
jointsAtLimitCost: 0.25
deathCost: -1.0
terminationHeight: 0.8
alive_reward_scale: 2.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 10.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Humanoid:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0
| 2,335 | YAML | 24.670329 | 71 | 0.693362 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/AllegroHand.yaml | # used to create the object
name: AllegroHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 0.75
episodeLength: 600
clipObservations: 5.0
clipActions: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 1.0
controlFrequencyInv: 4 # 30 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
velObsScale: 0.2
objectType: "block"
observationType: "full" # can be "full_no_vel", "full"
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 0
sim:
dt: 0.0083 # 1/120 s
add_ground_plane: True
add_distant_light: False
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
# per-scene
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type} # 0: PGS, 1: TGS
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 819200
gpu_found_lost_aggregate_pairs_capacity: 819200
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
allegro_hand:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: False
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.0005
# per-body
density: -1
max_depenetration_velocity: 1000.0
object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.0025
# per-body
density: 400.0
max_depenetration_velocity: 1000.0
goal_object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.000
stabilization_threshold: 0.0025
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 3,360 | YAML | 25.464567 | 71 | 0.69881 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/HumanoidSAC.yaml | # used to create the object
defaults:
- Humanoid
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}} | 168 | YAML | 20.124997 | 52 | 0.678571 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Ingenuity.yaml | # used to create the object
name: Ingenuity
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.5
maxEpisodeLength: 2000
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -3.721]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
# set to True if you use camera sensors in the environment
enable_cameras: False
disable_contact_processing: False
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: False
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
ingenuity:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0 | 2,351 | YAML | 27 | 71 | 0.693322 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Quadcopter.yaml | # used to create the object
name: Quadcopter
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 1.25
maxEpisodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
copter:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 2,452 | YAML | 25.663043 | 71 | 0.690457 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Crazyflie.yaml | # used to create the object
name: Crazyflie
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:128,${...num_envs}}
envSpacing: 2.5
maxEpisodeLength: 700
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
# set to True if you use camera sensors in the environment
enable_cameras: False
disable_contact_processing: False
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: False
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
crazyflie:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 2,349 | YAML | 26.647059 | 71 | 0.692635 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
disable_nut_collisions: False
disable_bolt_collisions: False
disable_contact_processing: False
env:
env_name: 'FactoryEnvNutBolt'
desired_subassemblies: ['nut_bolt_m16', 'nut_bolt_m16']
nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt
nut_bolt_density: 7850.0
nut_bolt_friction: 0.3
# Subassembly options:
# {nut_bolt_m4, nut_bolt_m8, nut_bolt_m12, nut_bolt_m16, nut_bolt_m20}
| 643 | YAML | 28.272726 | 116 | 0.73717 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/AntSAC.yaml | # used to create the object
defaults:
- Ant
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}} | 163 | YAML | 19.499998 | 52 | 0.668712 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Cartpole.yaml | # used to create the object
name: Cartpole
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:512,${...num_envs}}
envSpacing: 4.0
resetDist: 3.0
maxEffort: 400.0
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 1024
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1024
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Cartpole:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
# per-shape
contact_offset: 0.02
rest_offset: 0.001 | 2,124 | YAML | 26.243589 | 71 | 0.686911 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/Anymal.yaml | # used to create the object
name: Anymal
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 4. # [m]
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
linear_x: [-2., 2.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-1., 1.] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 85.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
actionScale: 13.5
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
learn:
# rewards
linearVelocityXYRewardScale: 1.0
angularVelocityZRewardScale: 0.5
linearVelocityZRewardScale: -0.03
jointAccRewardScale: -0.0003
actionRateRewardScale: -0.006
cosmeticRewardScale: -0.06
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
# episode length in seconds
episodeLength_s: 50
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 1
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 163840
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 134217728
gpu_temp_buffer_capacity: 33554432
gpu_max_num_partitions: 8
Anymal:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
| 3,270 | YAML | 24.960317 | 71 | 0.626911 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml | # specifies what the config is when running `ShadowHandOpenAI` in LSTM mode
defaults:
- ShadowHandOpenAI_FF
- _self_
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
| 178 | YAML | 18.888887 | 75 | 0.707865 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHandOpenAI_FF,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32864
mini_epochs: 4
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
player:
deterministic: True
games_num: 100000
print_stats: True
| 2,215 | YAML | 20.940594 | 66 | 0.577427 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/AnymalTerrainPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: True
# concat_input: True
# layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AnymalTerrain,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False # True
normalize_input: True
normalize_value: True
normalize_advantage: True
value_bootstrap: True
clip_actions: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.001
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 48
minibatch_size: 16384
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_length: 4 # only for rnn
bounds_loss_coef: 0.
max_epochs: ${resolve_default:2000,${....max_iterations}}
save_best_after: 100
score_to_win: 20000
save_frequency: 50
print_stats: True
| 1,928 | YAML | 21.694117 | 101 | 0.592842 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/HumanoidPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Humanoid,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 100
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,639 | YAML | 21.465753 | 101 | 0.594875 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/CrazyfliePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Crazyflie,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,614 | YAML | 21.430555 | 101 | 0.593556 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHand,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,703 | YAML | 20.56962 | 62 | 0.589548 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/HumanoidSAC.yaml | params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:HumanoidSAC,${....experiment}}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: ${resolve_default:50000,${....max_iterations}}
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
| 1,165 | YAML | 21.423077 | 101 | 0.603433 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHandOpenAI_LSTM,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
zero_rnn_on_done: False
player:
deterministic: True
games_num: 100000
print_stats: True
| 2,402 | YAML | 20.265487 | 68 | 0.562448 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/IngenuityPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ingenuity,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,612 | YAML | 21.402777 | 101 | 0.593052 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/QuadcopterPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Quadcopter,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,613 | YAML | 21.416666 | 101 | 0.593304 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltScrew,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 512
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/BallBalancePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:BallBalance,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:250,${....max_iterations}}
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,593 | YAML | 21.450704 | 101 | 0.593848 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/FrankaDeformablePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaDeformable,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: ${resolve_default:6000,${....max_iterations}}
save_best_after: 500
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384 #2048 #4096 #8192 #16384
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,665 | YAML | 22.138889 | 101 | 0.600601 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPlace,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/CartpoleCameraPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: None
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: default
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: False
# concat_input: True
# layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:CartpoleCamera,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: False
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0 #0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 50
save_frequency: 10
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 512 #1024
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001 | 2,124 | YAML | 21.135416 | 101 | 0.556026 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/AntPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ant,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,657 | YAML | 21.405405 | 101 | 0.594448 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/FrankaCabinetPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaCabinet,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: ${resolve_default:1500,${....max_iterations}}
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,636 | YAML | 21.736111 | 101 | 0.598411 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/AntSAC.yaml | params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AntSAC,${....experiment}}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: ${resolve_default:20000,${....max_iterations}}
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
| 1,160 | YAML | 21.326923 | 101 | 0.601724 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/AllegroHandPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:AllegroHand,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.02
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,694 | YAML | 20.455696 | 62 | 0.590909 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/AnymalPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Anymal,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.0
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 24
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_length: 4 # only for rnn
bounds_loss_coef: 0.001
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 200
score_to_win: 20000
save_frequency: 50
print_stats: True
| 1,744 | YAML | 21.960526 | 101 | 0.600917 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/CartpolePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Cartpole,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:100,${....max_iterations}}
save_best_after: 50
save_frequency: 25
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001 | 1,583 | YAML | 21.628571 | 101 | 0.593178 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPick,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:200,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,596 | YAML | 20.581081 | 69 | 0.594612 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_RNN.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_rnn_dict
separate: False
mlp:
first_units: [128]
second_units: [128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
units: 128
layers: 1
name: "gru"
layer_norm: False
concat_input: False
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 48
minibatch_size: 8192
mini_epochs: 4
critic_coef: 2.0
clip_value: True
seq_len: 16
bounds_loss_coef: 0.0001
| 1,845 | YAML | 20.97619 | 101 | 0.588076 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_MLP_dock.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_mlp_dict
separate: True
mlp:
units: [128, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 1024
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001 | 1,699 | YAML | 21.972973 | 101 | 0.595644 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_MLP.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_mlp_dict
separate: True
mlp:
units: [128, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,700 | YAML | 21.68 | 101 | 0.595294 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_MLP_SE.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_mlp_dict
separate: True
mlp:
units: [128, 128]
activation: tanh
d2rl: False
use_state_encoding: True
pe_nfreq: 6
pe_log: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,769 | YAML | 21.692307 | 101 | 0.593556 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_CNN.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_cnn_dict
separate: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: None
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
normalization: batch_norm
mlp:
units: [128, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 64
mini_epochs: 4
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 2,147 | YAML | 21.14433 | 101 | 0.564974 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_Transformer.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_transformer_a2c
network:
name: actor_critic_transformer
separate: False
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
transformer:
input_sequence_length: 8
pos_embedding: learnt
num_head: 2
num_layer: 2
dim_feedforward: 1024
transforms_mlp_dim: [128, 128]
state_mlp_dim: [128, 128]
critic_mlp_dim: [512, 512, 256]
fuse_mlp_dim: [128, 128]
d_model: 128
dropout: 0.0
decoder_mlp_dim: [256, 256]
embed_init: 0.1
decoder_init: 0.01
activation: tanh
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: transformer
min_lr: 0
base_lr: 3e-4
warmup_steps: 15
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 2,067 | YAML | 22.5 | 101 | 0.595065 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP3D_PPOmulti_dict_MLP.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_mlp_dict
separate: False
mlp:
units: [256, 256, 256]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,706 | YAML | 21.76 | 101 | 0.595545 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/train/MFP/MFP2D_PPOmulti_dict_MLP_thruster.yaml | params:
seed: ${...seed}
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
network:
name: actor_critic_mlp_dict_thruster
separate: True
mlp:
units: [128, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
space:
multi_discrete:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FloatingPlatform,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
ppo: True
mixed_precision: False
normalize_input: True
normalize_input_keys: ["state"]
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
min_lr: 0
base_lr: 3e-4
warmup_steps: 10
warmup_factor: 0.1
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 0.5
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,709 | YAML | 21.8 | 101 | 0.59684 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_zero_convergence.yaml | name: "position"
run_batch: 1
goals_x: []
goals_y: []
position_distance_threshold: 0.03
save_dir: "mj_runs/position_zero_convergence" | 133 | YAML | 21.33333 | 45 | 0.729323 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_Square.yaml | name: "position"
run_batch: 0
goals_x: [ 2, 2, 2, 2, 2, 1, 0,-1,-2,-2,-2,-2,-2,-1, 0, 1, 2]
goals_y: [-2,-1, 0, 1, 2, 2, 2, 2, 2, 1, 0,-1,-2,-2,-2,-2,-2]
position_distance_threshold: 0.03
save_dir: "mj_runs/position_square" | 223 | YAML | 36.333327 | 61 | 0.547085 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Circle.yaml | name: "linear_velocity"
run_batch: 0
trajectory_type: "circle"
x_offset: 0
y_offset: 0
radius: 1.5
closed: True
lookahead_dist: 0.10
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_circle" | 194 | YAML | 18.499998 | 30 | 0.742268 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Spiral.yaml | name: "linear_velocity"
run_batch: 0
trajectory_type: "spiral"
x_offset: 0
y_offset: 0
start_radius: 0.5
end_radius: 1.5
num_loops: 4
closed: True
lookahead_dist: 0.15
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_spiral" | 229 | YAML | 18.166665 | 30 | 0.737991 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Circle.yaml | name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "circle"
x_offset: 0
y_offset: 0
radius: 1.5
closed: True
lookahead_dist: 0.10
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_heading_circle" | 210 | YAML | 20.099998 | 38 | 0.752381 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Circle_lab.yaml | name: "linear_velocity"
run_batch: 0
trajectory_type: "circle"
x_offset: 2.5
y_offset: -1.5
radius: 0.75
closed: True
lookahead_dist: 0.15
target_tracking_velocity: 0.3
save_dir: "ros_runs/trackXYVel/vel_circle_run_0" | 217 | YAML | 20.799998 | 48 | 0.741935 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_convergence.yaml | name: "pose"
run_batch: 1024
goals_x: [0]
goals_y: [0]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_zero_convergence" | 184 | YAML | 22.124997 | 41 | 0.73913 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Infinite.yaml | name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "infinite"
x_offset: 0
y_offset: 0
a: 1.5 # 'a' controls the size of the lemniscate, analogous to 'radius' for the circle
closed: True
lookahead_dist: 0.1
target_tracking_velocity: 0.2
save_dir: "mj_runs/vel_heading_infinite" | 290 | YAML | 28.099997 | 87 | 0.741379 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Infinite.yaml | name: "linear_velocity"
run_batch: 0
trajectory_type: "infinite"
x_offset: 0
y_offset: 0
a: 1.5 # 'a' controls the size of the lemniscate, analogous to 'radius' for the circle
closed: True
lookahead_dist: 0.1
target_tracking_velocity: 0.2
save_dir: "mj_runs/vel_infinite" | 274 | YAML | 26.499997 | 87 | 0.733577 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Square.yaml | name: "linear_velocity"
run_batch: 0
trajectory_type: "square"
x_offset: 0
y_offset: 0
height: 3.0
closed: True
lookahead_dist: 0.3
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_square" | 193 | YAML | 18.399998 | 30 | 0.740933 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Spiral.yaml | name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "spiral"
x_offset: 0
y_offset: 0
start_radius: 0.5
end_radius: 1.5
num_loops: 4
closed: True
lookahead_dist: 0.15
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_heading_spiral" | 245 | YAML | 19.499998 | 38 | 0.746939 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_Circle.yaml | name: "position"
run_batch: 0
goals_x: [1.5 , 1.37031819, 1.00369591, 0.46352549, -0.15679269, -0.75, -1.21352549, -1.4672214 , -1.4672214 , -1.21352549, -0.75, -0.15679269, 0.46352549, 1.00369591, 1.37031819, 1.5 ]
goals_y: [ 0.00000000, 0.610104965, 1.11471724, 1.42658477, 1.49178284, 1.29903811, 0.881677878, 0.311867536, -0.311867536, -0.881677878, -1.29903811, -1.49178284, -1.42658477, -1.11471724, -0.610104965, 0.0]
position_distance_threshold: 0.03
save_dir: "mj_runs/position_square" | 509 | YAML | 83.999986 | 214 | 0.681729 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_lab.yaml | name: "pose"
run_batch: 1024
goals_x: [2.5]
goals_y: [-1.5]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "ros_runs/pose_disturbance/run_1" | 191 | YAML | 22.999997 | 43 | 0.727749 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_convergence_lab.yaml | name: "pose"
run_batch: 256
goals_x: [2.5]
goals_y: [-1.5]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_zero_convergence" | 188 | YAML | 22.624997 | 41 | 0.728723 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_Square.yaml | name: "pose"
run_batch: 0
goals_x: [ 2, 2, 2, 2, 2, 1, 0, -1, -2, -2, -2, -2,-2,-1, 0, 1, 2]
goals_y: [ -2, -1, 0, 1, 2, 2, 2, 2, 2, 1, 0, -1,-2,-2,-2,-2,-2]
goals_theta: [1.57,1.57,1.57,1.57,3.14,3.14,3.14,3.14,-1.57,-1.57,-1.57,-1.57, 0, 0, 0, 0, 0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_square" | 410 | YAML | 50.374994 | 93 | 0.478049 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Square.yaml | name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "square"
x_offset: 0
y_offset: 0
height: 3.0
closed: True
lookahead_dist: 0.3
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_heading_square" | 209 | YAML | 19.999998 | 38 | 0.751196 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/controller/Optimal_LQR_DAC.yaml | name: Discrete_Adaptive_LQR_Controller
# State cost matrix
Q: [1,1,5,5,1,1,1]
# Control cost matrix
R: [0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# Disturbance weight matrix
W: [0.1,0.1,0.1,0.1,0.1,0.1,0.1]
make_planar_compatible: True
control_type: LQR
update_matrices_every_n_steps: 100 | 288 | YAML | 25.272725 | 44 | 0.701389 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/controller/Optimal_LQR_DC.yaml | name: Discrete_LQR_Controller
# State cost matrix
# pos_x, pos_y, vel_x, vel_y, qw, qz, wz
Q: [0.0001,0.00001,100,100,0.000001,0.000001,1]
# Control cost matrix
R: [0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# Disturbance weight matrix
W: [0.1,0.1,0.1,0.1,0.1,0.1,0.1]
make_planar_compatible: True
control_type: LQR | 314 | YAML | 27.636361 | 47 | 0.678344 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/generate_data.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import os
import datetime
import numpy as np
import torch
import hydra
import carb
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames_mfp import VecEnvRLGames
def run_sdg(cfg, horizon, num_ep=1):
"""
Generate synthetic data using the trained agent.
TODO: Discard terminated agents
"""
root_dir = "./sdg/" + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(root_dir, exist_ok=True)
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
agent.restore(cfg.checkpoint)
agent.has_batch_dimension = True
agent.batch_size = 1024
# agent.init_rnn()
env = agent.env
obs = env.reset()
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
for n in range(num_ep):
evaluation_dir = os.path.join(root_dir, str(n))
os.makedirs(evaluation_dir, exist_ok=True)
env._task.reset_idx(env._task.all_indices.long())
obs = env.reset()
ep_data = {
"act": [], "state": [], "task": [],
"rgb": [], "depth": [], "rews": []
}
for _ in range(horizon):
actions = agent.get_action(obs["obs"], is_deterministic=True)
position = env._task.current_state["position"]
obs, reward, _, _ = env.step(actions)
state = obs["obs"]["state"][:, :5]
task_data = obs["obs"]["state"][:, 6:]
state = torch.cat([position, state], dim=-1)
rgb, depth = env._task.get_rgbd_data()
ep_data["act"].append(actions.cpu())
ep_data["state"].append(state.cpu())
ep_data["task"].append(task_data.cpu())
ep_data["rgb"].append(rgb.cpu())
ep_data["depth"].append(depth.cpu())
ep_data["rews"].append(reward.cpu())
ep_data["act"] = torch.stack(ep_data["act"]).transpose(0, 1)
ep_data["state"] = torch.stack(ep_data["state"]).transpose(0, 1)
ep_data["task"] = torch.stack(ep_data["task"]).transpose(0, 1)
ep_data["rews"] = torch.stack(ep_data["rews"]).transpose(0, 1)
ep_data["rgb"] = torch.stack(ep_data["rgb"]).transpose(0, 1)
ep_data["depth"] = torch.stack(ep_data["depth"]).transpose(0, 1)
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data["act"] = ep_data["act"] * (1 - killed_thrusters_idxs.cpu().numpy())
# save the episode data
torch.save(ep_data["act"], os.path.join(evaluation_dir, "act.pt"))
torch.save(ep_data["state"], os.path.join(evaluation_dir, "state.pt"))
torch.save(ep_data["task"], os.path.join(evaluation_dir, "task.pt"))
torch.save(ep_data["rews"], os.path.join(evaluation_dir, "rews.pt"))
torch.save(ep_data["rgb"], os.path.join(evaluation_dir, "rgb.pt"))
torch.save(ep_data["depth"], os.path.join(evaluation_dir, "depth.pt"))
carb.log_info("Data generation complete")
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.checkpoint is None:
print("No checkpoint specified. Exiting...")
return
horizon = 250 #5s(50fps)
num_ep = 300
cfg.task.env.maxEpisodeLength = horizon + 2
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
run_sdg(cfg, horizon, num_ep)
env.close()
if __name__ == "__main__":
parse_hydra_configs() | 5,146 | Python | 35.764285 | 126 | 0.629615 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/rlgames_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import gym
import hydra
import torch
from omegaconf import DictConfig
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.scripts.rlgames_train import RLGTrainer
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.demo_util import initialize_demo
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
class RLGDemo(RLGTrainer):
def __init__(self, cfg, cfg_dict):
RLGTrainer.__init__(self, cfg, cfg_dict)
self.cfg.test = True
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream)
# parse experiment directory
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == '':
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {"render_modes": ["rgb_array"], "render_fps": cfg.recording_fps}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env, video_folder=videos_dir, step_trigger=video_interval, video_length=video_length
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_demo(cfg_dict, env)
if cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
rlg_trainer = RLGDemo(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run(module_path, experiment_dir)
env.close()
if cfg.wandb_activate:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 4,814 | Python | 37.52 | 109 | 0.701703 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/rlgames_play.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
import hydra
from omegaconf import DictConfig
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
import datetime
import os
import torch
import numpy as np
class RLGTrainer():
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register('RLGPU',
lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: env
})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
agent = runner.create_player()
agent.restore(self.cfg.checkpoint)
is_done = False
env = agent.env
obs = env.reset()
print(obs)
#input()
#prev_screen = env.render(mode='rgb_array')
#plt.imshow(prev_screen)
total_reward = 0
num_steps = 0
while not is_done:
action = agent.get_action(obs['obs'], is_deterministic=True)
obs, reward, done, info = env.step(action)
print(f'Step {num_steps}: obs={obs["obs"]}, rews={reward}, dones={done}, info={info} \n')
total_reward += reward
num_steps += 1
is_done = done
print(total_reward, num_steps)
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
cfg.checkpoint = "./runs/MFP2DGoToPose/nn/MFP2DGoToPose.pth"
headless = cfg.headless
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
task = initialize_task(cfg_dict, env)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if __name__ == '__main__':
parse_hydra_configs() | 4,935 | Python | 36.393939 | 116 | 0.694428 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/multi_model_eval.py | import numpy as np
import hydra
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from utils.plot_experiment import plot_episode_data_virtual
from utils.eval_metrics import get_GoToPose_success_rate_new
import os
import glob
import pandas as pd
from tqdm import tqdm
# filter out invalid experiments and retrieve valid models
def get_valid_models(load_dir, experiments):
valid_models = []
invalid_experiments = []
for experiment in experiments:
try:
file_pattern = os.path.join(load_dir, experiment, "nn", "last_*ep_2000_rew__*.pth")
model = glob.glob(file_pattern)
if model:
valid_models.append(model[0])
except:
invalid_experiments.append(experiment)
if invalid_experiments:
print(f'Invalid experiments: {invalid_experiments}')
else:
print('All experiments are valid')
return valid_models
def eval_multi_agents(cfg, agent, models, horizon, plot_intermediate=False):
evaluation_dir = "./evaluations/" + models[0].split("/")[1] + "/"
os.makedirs(evaluation_dir, exist_ok=True)
store_all_agents = True # store all agents generated data, if false only the first agent is stored
is_done = False
all_success_rate_df = pd.DataFrame()
for i, model in enumerate(tqdm(models)):
agent.restore(model)
env = agent.env
obs = env.reset()
ep_data = {'act': [], 'obs': [], 'rews': []}
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
for _ in range(horizon):
actions = agent.get_action(obs['obs'], is_deterministic=True)
obs, reward, done, info = env.step(actions)
if store_all_agents:
ep_data['act'].append(actions.cpu().numpy())
ep_data['obs'].append(obs['obs']['state'].cpu().numpy())
ep_data['rews'].append(reward.cpu().numpy())
else:
ep_data['act'].append(actions[0].cpu().numpy())
ep_data['obs'].append(obs['obs']['state'][0].cpu().numpy())
ep_data['rews'].append(reward[0].cpu().numpy())
is_done = done.any()
ep_data['obs'] = np.array(ep_data['obs'])
ep_data['rews'] = np.array(ep_data['rews'])
ep_data['act'] = np.array(ep_data['act'])
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data['act'] = ep_data['act'] * (1 - killed_thrusters_idxs.cpu().numpy())
# Find the episode where the sum of actions has only zeros (no action) for all the time steps
broken_episodes = [i for i in range(0,ep_data['act'].shape[1]) if ep_data['act'][:,i,:].sum() == 0]
# Remove episodes that are broken by the environment (IsaacGym bug)
if broken_episodes:
print(f'Broken episodes: {broken_episodes}')
print(f'Ep data shape before: {ep_data["act"].shape}')
for key in ep_data.keys():
ep_data[key] = np.delete(ep_data[key], broken_episodes, axis=1)
print(f'Ep data shape after: {ep_data["act"].shape}')
task_flag = ep_data['obs'][0, 0, 5].astype(int)
# if task_flag == 0: # GoToXY
# success_rate = get_GoToXY_success_rate(ep_data, print_intermediate=True)
# success_rate_df = success_rate['position']
# elif task_flag == 1: # GoToPose
# success_rate = get_GoToPose_success_rate(ep_data, print_intermediate=True)
# success_rate_df = pd.concat([success_rate['position'], success_rate['heading']], axis=1)
# elif task_flag == 2: # TrackXYVelocity
# success_rate = get_TrackXYVelocity_success_rate(ep_data, print_intermediate=True)
# success_rate_df = success_rate['xy_velocity']
# elif task_flag == 3: # TrackXYOVelocity
# success_rate = get_TrackXYOVelocity_success_rate(ep_data, print_intermediate=True)
# success_rate_df = pd.concat([success_rate['xy_velocity'], success_rate['omega_velocity']], axis=1)
success_rate = get_GoToPose_success_rate_new(ep_data, print_intermediate=True)
success_rate_df = success_rate['pose']
# Collect the data for the success rate table
#success_rate_df['avg_rew'] = [np.mean(ep_data['rews'])]
lin_vel_x = ep_data['obs'][:, 2:3]
lin_vel_y = ep_data['obs'][:, 3:4]
lin_vel = np.linalg.norm(np.array([lin_vel_x, lin_vel_y]), axis=0)
success_rate_df['ALV'] = [np.mean(lin_vel.mean(axis=1))]
ang_vel_z = np.absolute(ep_data['obs'][:, :, 4:5][:,:,0])
success_rate_df['AAV'] = [np.mean(ang_vel_z.mean(axis=1))]
success_rate_df['AAC'] = np.mean(ep_data['act'])
all_success_rate_df = pd.concat([all_success_rate_df, success_rate_df], ignore_index=True)
# If want to print the latex code for the table use the following line
if plot_intermediate:
save_dir = evaluation_dir + model.split("/")[2] + "/"
plot_episode_data_virtual(ep_data, save_dir, store_all_agents)
# create index for the dataframe and save it
model_names = [model.split("/")[2] for model in models]
all_success_rate_df.insert(loc=0, column="model", value=model_names)
all_success_rate_df.to_csv(evaluation_dir + "/new_xy.csv")
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
# specify the experiment load directory
load_dir = cfg.checkpoint #"./models/icra24_Pose_new/" #+ "expR_SE/"
print(f'Loading models from: {load_dir} ...')
experiments = os.listdir(load_dir)
print(f'Experiments found in {load_dir} folder: {len(experiments)}')
models = get_valid_models(load_dir, experiments)
#models = [m for m in models if "BB" in m.split("/")[2]]
print(f'Final models: {models}')
if not models:
print('No valid models found')
exit()
# _____Create task_____
# customize environment parameters based on model
if "BB" in models[0]:
print("Using BB model ...")
cfg.train.params.network.mlp.units = [256, 256]
# if "BBB" in models[0]:
# print("Using BBB model ...")
# cfg.train.params.network.mlp.units = [256, 256, 256]
# if "AN" in models[0]:
# print("Adding noise on act ...")
# cfg.task.env.add_noise_on_act = True
# if "AVN" in models[0]:
# print("Adding noise on act and vel ...")
#cfg.task.env.add_noise_on_act = True
#cfg.task.env.add_noise_on_vel = True
# if "UF" in models[0]:
# print("Setting uneven floor in the environment ...")
# cfg.task.env.use_uneven_floor = True
# cfg.task.env.max_floor_force = 0.25
horizon = 250
cfg.task.env.maxEpisodeLength = horizon + 2
cfg.task.env.platform.core.mass = 5.32
cfg.task.env.split_thrust = True
cfg.task.env.clipObservations['state'] = 20.0
cfg.task.env.task_parameters['max_spawn_dist'] = 4.0
cfg.task.env.task_parameters['min_spawn_dist'] = 3.0
cfg.task.env.task_parameters['kill_dist'] = 6.0
cfg.task.env.task_parameters['kill_after_n_steps_in_tolerance'] = 250
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream, enable_viewport=enable_viewport)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict['seed'] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
# _____Create players (model)_____
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
plot_intermediate = False
eval_multi_agents(cfg, agent, models, horizon, plot_intermediate)
env.close()
if __name__ == '__main__':
parse_hydra_configs()
| 8,938 | Python | 42.604878 | 142 | 0.628664 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/rlgames_train.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import gym
import hydra
import torch
from omegaconf import DictConfig
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import (
retrieve_checkpoint_path,
get_experience,
)
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register(
"RLGPU",
lambda config_name, num_actors, **kwargs: RLGPUEnv(
config_name, num_actors, **kwargs
),
)
env_configurations.register(
"rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env}
)
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self, module_path, experiment_dir):
self.rlg_config_dict["params"]["config"]["train_dir"] = os.path.join(
module_path, "runs"
)
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
print(f" Experiment name: {self.cfg.train.params.config.name}")
# dump config dict
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{
"train": not self.cfg.test,
"play": self.cfg.test,
"checkpoint": self.cfg.checkpoint,
"sigma": None,
}
)
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f"cuda:{local_rank}"
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(
headless,
cfg.enable_livestream,
enable_viewport,
cfg.enable_recording,
cfg.kit_app,
)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience,
)
# parse experiment directory
module_path = os.path.abspath(
os.path.join(os.path.dirname(omniisaacgymenvs.__file__))
)
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == "":
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {
"render_modes": ["rgb_array"],
"render_fps": cfg.recording_fps,
}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env,
video_folder=videos_dir,
step_trigger=video_interval,
video_length=video_length,
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
if cfg.wandb_activate and global_rank == 0:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
name=run_name,
resume="allow",
)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run(module_path, experiment_dir)
env.close()
if cfg.wandb_activate and global_rank == 0:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 7,426 | Python | 34.879227 | 116 | 0.655131 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/rlgames_train_mfp.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.utils.config_utils.path_utils import (
retrieve_checkpoint_path,
get_experience,
)
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.envs.vec_env_rlgames_mfp import VecEnvRLGames
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
from omegaconf import DictConfig
import omniisaacgymenvs
import datetime
import hydra
import torch
import gym
import os
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register(
"RLGPU",
lambda config_name, num_actors, **kwargs: RLGPUEnv(
config_name, num_actors, **kwargs
),
)
env_configurations.register(
"rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env}
)
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self, module_path, experiment_dir):
self.rlg_config_dict["params"]["config"]["train_dir"] = os.path.join(
module_path, "runs"
)
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
print(f" Experiment name: {self.cfg.train.params.config.name}")
# dump config dict
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{
"train": not self.cfg.test,
"play": self.cfg.test,
"checkpoint": self.cfg.checkpoint,
"sigma": None,
}
)
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f"cuda:{local_rank}"
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(
headless,
cfg.enable_livestream,
enable_viewport,
cfg.enable_recording,
cfg.kit_app,
)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience,
)
# parse experiment directory
module_path = os.path.abspath(
os.path.join(os.path.dirname(omniisaacgymenvs.__file__))
)
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == "":
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {
"render_modes": ["rgb_array"],
"render_fps": cfg.recording_fps,
}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env,
video_folder=videos_dir,
step_trigger=video_interval,
video_length=video_length,
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
if cfg.wandb_activate and global_rank == 0:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
name=run_name,
resume="allow",
)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run(module_path, experiment_dir)
env.close()
if cfg.wandb_activate and global_rank == 0:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 7,431 | Python | 34.730769 | 116 | 0.655094 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/random_policy.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gym
import hydra
from omegaconf import DictConfig
import os
import time
import numpy as np
import torch
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.task_util import initialize_task
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
render = not headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(
headless,
cfg.enable_livestream,
enable_viewport,
cfg.enable_recording,
cfg.kit_app,
)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience,
)
# parse experiment directory
module_path = os.path.abspath(
os.path.join(os.path.dirname(omniisaacgymenvs.__file__))
)
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == "":
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {
"render_modes": ["rgb_array"],
"render_fps": cfg.recording_fps,
}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env,
video_folder=videos_dir,
step_trigger=video_interval,
video_length=video_length,
)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
num_frames = 0
first_frame = True
prev_time = time.time()
while env.simulation_app.is_running():
if env.world.is_playing():
if first_frame:
env.reset()
prev_time = time.time()
first_frame = False
# get upper and lower bounds of action space, sample actions randomly on this interval
action_high = env.action_space.high[0]
action_low = env.action_space.low[0]
actions = (action_high - action_low) * torch.rand(
env.num_envs, env.action_space.shape[0], device=task.rl_device
) - action_high
if time.time() - prev_time >= 1:
print("FPS:", num_frames, "FPS * num_envs:", env.num_envs * num_frames)
num_frames = 0
prev_time = time.time()
else:
num_frames += 1
env.step(actions)
else:
env.world.step(render=render)
env.simulation_app.close()
if __name__ == "__main__":
parse_hydra_configs()
| 5,245 | Python | 35.685314 | 98 | 0.665205 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/run_experiments.py | import subprocess
import argparse
import json
import sys
import os
parser = argparse.ArgumentParser("Processes one or more experiments.")
parser.add_argument(
"--exps",
type=str,
nargs="+",
default=None,
help="List of path to the experiments' config to be ran.",
)
parser.add_argument(
"--isaac_path", type=str, default=None, help="Path to the python exec of isaac."
)
args, unknown_args = parser.parse_known_args()
WORKINGDIR = os.getcwd()
s = WORKINGDIR.split("/")[:3]
s = "/".join(s)
if args.isaac_path is None:
ov_path = os.path.join(s, ".local/share/ov/pkg/isaac_sim-2022.2.1/python.sh")
else:
ov_path = args.isaac_path
for exp in args.exps:
# Load the configuration file
with open(exp, "r") as f:
experiments = json.load(f)
# Loop through each experiment and execute it
for experiment_name, arguments in experiments.items():
# Construct the command to execute the experiment
cmd = [ov_path, "scripts/rlgames_train_mfp.py"]
for arg, value in arguments.items():
cmd.extend(["{}".format(arg) + "=" + str(value)])
print(f'Running command: {" ".join(cmd)}')
# Execute the command
subprocess.run(cmd)
| 1,222 | Python | 27.44186 | 84 | 0.648118 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/evaluate_policy.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as np
import hydra
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames_mfp import VecEnvRLGames
from omniisaacgymenvs.utils.plot_experiment import plot_episode_data_virtual
from omniisaacgymenvs.utils.eval_metrics import (
get_GoToXY_success_rate,
get_GoToPose_results,
get_TrackXYVelocity_success_rate,
get_TrackXYOVelocity_success_rate,
)
import pandas as pd
import os
def eval_multi_agents(cfg, horizon):
"""
Evaluate a trained agent for a given number of steps"""
base_dir = "./evaluations/" + cfg.checkpoint.split("/")[1] + "/"
experiment_name = cfg.checkpoint.split("/")[2]
print(f"Experiment name: {experiment_name}")
evaluation_dir = base_dir + experiment_name + "/"
os.makedirs(evaluation_dir, exist_ok=True)
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
agent.restore(cfg.checkpoint)
agent.has_batch_dimension = True
agent.batch_size = 4096
agent.init_rnn()
store_all_agents = (
True # store all agents generated data, if false only the first agent is stored
)
is_done = False
env = agent.env
obs = env.reset()
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
ep_data = {"act": [], "obs": [], "rews": []}
total_reward = 0
num_steps = 0
for _ in range(horizon):
actions = agent.get_action(obs["obs"], is_deterministic=True)
obs, reward, done, info = env.step(actions)
if store_all_agents:
ep_data["act"].append(actions.cpu().numpy())
ep_data["obs"].append(obs["obs"]["state"].cpu().numpy())
ep_data["rews"].append(reward.cpu().numpy())
else:
ep_data["act"].append(actions[0].cpu().numpy())
ep_data["obs"].append(obs["obs"]["state"][0].cpu().numpy())
ep_data["rews"].append(reward[0].cpu().numpy())
total_reward += reward[0]
num_steps += 1
is_done = done.any()
ep_data["obs"] = np.array(ep_data["obs"])
ep_data["rews"] = np.array(ep_data["rews"])
ep_data["act"] = np.array(ep_data["act"])
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data["act"] = ep_data["act"] * (1 - killed_thrusters_idxs.cpu().numpy())
# Find the episode where the sum of actions has only zeros (no action) for all the time steps
broken_episodes = [
i
for i in range(0, ep_data["act"].shape[1])
if ep_data["act"][:, i, :].sum() == 0
]
print(broken_episodes)
broken_episodes = []
# Remove episodes that are broken by the environment (IsaacGym bug)
if broken_episodes:
print(f"Broken episodes: {broken_episodes}")
# save in csv the broken episodes
broken_episodes_df = pd.DataFrame(
ep_data[:, broken_episodes, :], index=broken_episodes
)
broken_episodes_df.to_csv(evaluation_dir + "broken_episodes.csv", index=False)
print(f'Ep data shape before: {ep_data["act"].shape}')
for key in ep_data.keys():
ep_data[key] = np.delete(ep_data[key], broken_episodes, axis=1)
print(f'Ep data shape after: {ep_data["act"].shape}')
print(f"\n Episode: rew_sum={total_reward:.2f}, tot_steps={num_steps} \n")
print(f'Episode data obs shape: {ep_data["obs"].shape} \n')
task_flag = ep_data["obs"][0, 0, 5].astype(int)
if task_flag == 0: # GoToXY
success_rate = get_GoToXY_success_rate(ep_data, print_intermediate=True)
elif task_flag == 1: # GoToPose
success_rate = get_GoToPose_results(ep_data)
elif task_flag == 2: # TrackXYVelocity
success_rate = get_TrackXYVelocity_success_rate(
ep_data, print_intermediate=True
)
elif task_flag == 3: # TrackXYOVelocity
success_rate = get_TrackXYOVelocity_success_rate(
ep_data, print_intermediate=True
)
if cfg.headless:
plot_episode_data_virtual(ep_data, evaluation_dir, store_all_agents)
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.checkpoint is None:
print("No checkpoint specified. Exiting...")
return
# customize environment parameters based on model
if "BB" in cfg.checkpoint:
print("Using BB model ...")
cfg.train.params.network.mlp.units = [256, 256]
if "BBB" in cfg.checkpoint:
print("Using BBB model ...")
cfg.train.params.network.mlp.units = [256, 256, 256]
if "AN" in cfg.checkpoint:
print("Adding noise on act ...")
cfg.task.env.add_noise_on_act = True
if "AVN" in cfg.checkpoint:
print("Adding noise on act and vel ...")
cfg.task.env.add_noise_on_act = True
cfg.task.env.add_noise_on_vel = True
if "UF" in cfg.checkpoint:
print("Setting uneven floor in the environment ...")
cfg.task.env.use_uneven_floor = True
cfg.task.env.max_floor_force = 0.25
horizon = 500
cfg.task.env.maxEpisodeLength = horizon + 2
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
# _____Create players (model)_____
# eval_single_agent(cfg_dict, cfg, env)
eval_multi_agents(cfg, horizon)
env.close()
if __name__ == "__main__":
parse_hydra_configs()
| 6,987 | Python | 34.835897 | 122 | 0.643767 |
elharirymatteo/RANS/omniisaacgymenvs/demos/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask, wrap_to_pi
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import tf_combine
import numpy as np
import torch
import math
import omni
import carb
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from omni.kit.viewport.utility import get_viewport_from_window_name
from pxr import Sdf
class AnymalTerrainDemo(AnymalTerrainTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
max_num_envs = 128
if sim_config.task_config["env"]["numEnvs"] >= max_num_envs:
print(f"num_envs reduced to {max_num_envs} for this demo.")
sim_config.task_config["env"]["numEnvs"] = max_num_envs
sim_config.task_config["env"]["learn"]["episodeLength_s"] = 120
AnymalTerrainTask.__init__(self, name, sim_config, env)
self.add_noise = False
self.knee_threshold = 0.05
self.create_camera()
self._current_command = [0.0, 0.0, 0.0, 0.0]
self.set_up_keyboard()
self._prim_selection = omni.usd.get_context().get_selection()
self._selected_id = None
self._previous_selected_id = None
return
def create_camera(self):
stage = omni.usd.get_context().get_stage()
self.view_port = get_viewport_from_window_name("Viewport")
# Create camera
self.camera_path = "/World/Camera"
self.perspective_path = "/OmniverseKit_Persp"
camera_prim = stage.DefinePrim(self.camera_path, "Camera")
camera_prim.GetAttribute("focalLength").Set(8.5)
coi_prop = camera_prim.GetProperty("omni:kit:centerOfInterest")
if not coi_prop or not coi_prop.IsValid():
camera_prim.CreateAttribute(
"omni:kit:centerOfInterest", Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform
).Set(Gf.Vec3d(0, 0, -10))
self.view_port.set_active_camera(self.perspective_path)
def set_up_keyboard(self):
self._input = carb.input.acquire_input_interface()
self._keyboard = omni.appwindow.get_default_app_window().get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event)
T = 1
R = 1
self._key_to_control = {
"UP": [T, 0.0, 0.0, 0.0],
"DOWN": [-T, 0.0, 0.0, 0.0],
"LEFT": [0.0, T, 0.0, 0.0],
"RIGHT": [0.0, -T, 0.0, 0.0],
"Z": [0.0, 0.0, R, 0.0],
"X": [0.0, 0.0, -R, 0.0],
}
def _on_keyboard_event(self, event, *args, **kwargs):
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._key_to_control:
self._current_command = self._key_to_control[event.input.name]
elif event.input.name == "ESCAPE":
self._prim_selection.clear_selected_prim_paths()
elif event.input.name == "C":
if self._selected_id is not None:
if self.view_port.get_active_camera() == self.camera_path:
self.view_port.set_active_camera(self.perspective_path)
else:
self.view_port.set_active_camera(self.camera_path)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self._current_command = [0.0, 0.0, 0.0, 0.0]
def update_selected_object(self):
self._previous_selected_id = self._selected_id
selected_prim_paths = self._prim_selection.get_selected_prim_paths()
if len(selected_prim_paths) == 0:
self._selected_id = None
self.view_port.set_active_camera(self.perspective_path)
elif len(selected_prim_paths) > 1:
print("Multiple prims are selected. Please only select one!")
else:
prim_splitted_path = selected_prim_paths[0].split("/")
if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_":
self._selected_id = int(prim_splitted_path[3][4:])
if self._previous_selected_id != self._selected_id:
self.view_port.set_active_camera(self.camera_path)
self._update_camera()
else:
print("The selected prim was not an Anymal")
if self._previous_selected_id is not None and self._previous_selected_id != self._selected_id:
self.commands[self._previous_selected_id, 0] = np.random.uniform(self.command_x_range[0], self.command_x_range[1])
self.commands[self._previous_selected_id, 1] = np.random.uniform(self.command_y_range[0], self.command_y_range[1])
self.commands[self._previous_selected_id, 2] = 0.0
def _update_camera(self):
base_pos = self.base_pos[self._selected_id, :].clone()
base_quat = self.base_quat[self._selected_id, :].clone()
camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device)
camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos
camera_state = ViewportCameraState(self.camera_path, self.view_port)
eye = Gf.Vec3d(camera_pos[0].item(), camera_pos[1].item(), camera_pos[2].item())
target = Gf.Vec3d(base_pos[0].item(), base_pos[1].item(), base_pos[2].item()+0.6)
camera_state.set_position_world(eye, True)
camera_state.set_target_world(target, True)
def post_physics_step(self):
self.progress_buf[:] += 1
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.update_selected_object()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.)
self.check_termination()
if self._selected_id is not None:
self.commands[self._selected_id, :] = torch.tensor(self._current_command, device=self.device)
self.timeout_buf[self._selected_id] = 0
self.reset_buf[self._selected_id] = 0
self.get_states()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras | 8,841 | Python | 44.577319 | 126 | 0.636127 |
elharirymatteo/RANS/omniisaacgymenvs/doc/domain_randomization.md | # Domain Randomization
Unlike the regular version of OmniIsaacGymEnv, this modified version chooses to apply domain randomization
directly inside the task. This is done so that different parameters can receive different level of noise.
For instance, the state is composed of unnormalized angular values and linear velocity values, both of which
have largely different scales. Furthermore, the domain randomization we apply here is not limited to noise
on actions or observations, but we also offer the possibility to randomize the mass of the system,
or apply forces and torques directly onto the system.
All the parameters to add domaine randomization onto the system must be added under the `task.env.disturbances`
flag inside the configuration file. As of today, we support the following disturbances:
- `force_disturbance` it applies random amount of forces at the system origin.
- `torque_disturbance` it applies random amount of torque at the system origin.
- `mass_disturbance` it changes the mass, and center of mass of the system.
- `observations_disturbance` it adds noise onto the obervations.
- `actions_disturbance` it adds noise onto the actions.
## Applying disturbances
In the following, we will go over the different parameters available for the disturbances and how to set them.
All the disturbances build ontop of a scheduler, and a sampler.
The scheduler regulates how quickly the disturbances should take effect during the training.
The sampler allows to randomly pick the amount of disturbance that should be apply on each environment.
A detailed explenation of the schedulers and samplers can be found in the curriculum documentation [LINK].
### Force disturbance
This disturbance applies a force on the system. By default, the force is applied at the root/origin of the body.
This behavior can be adjusted by modifying the body on which the force is applied. When setting the parameters
for the disturbance the user will select the magnitude of the force. It will then be randomly applied in a plane,
or on a sphere. Practically this is done by sampling a radius value (that is the magnitude) using the scheduler
and sampler. Then a theta value (for a 2D problem), or a theta and phi value (for a 3D problem), are sampled
uniformly projecting the force accordingly.
Below, is an example of a configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, the sampler is
following a `truncated_normal` distribution (a normal distribution with extremas) and the
scheduler is using a sigmoid growth. We can see that at the begining, there will be almost no force applied,
at the end it is almost uniformly sampled on the \[0, 0.5\] range.
```yaml
force_disturbance:
enable: False # Setting this to True will enable this disturbance
use_sinusoidal_patterns: False # Setting this to True will create none-constant forces.
min_freq: 0.25
max_freq: 3
min_offset: -6
max_offset: 6
# Scheduling and sampling of the disturbance
force_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.5
end_std: 0.5
min_value: 0.0
max_value: 0.5
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
Setting the `use_sinusoidal_patterns` flag to `False` will mean that each environment will have a constant force applied on it.
If this flag is set to `True`, the force magnitude will be modified depending on the position of the system.
This is meant to recreate attraction and repulsion points. The non-constant force means that recurrent networks will struggle more
to reliably estimate the disturbance.
Sinusoidal pattern, freq = 0.25 | Sinusoidal pattern, freq = 3.0
:------------------------------:|:------------------------------:
 | 
Please note that the values for the sinusoidal patterns and the magnitude of the force are updated on an environment reset only.
This means that the magnitude of the force will not evolve through an episode.
### Torque disturbance
This disturbance applies a torque on the system. By default, the torque is applied at the root/origin of the body.
This behavior can be adjusted by modifying the body on which the torque is applied. When setting the parameters
for the disturbance the user will select the magnitude of the torque. It will then be randomly applied in a plane,
or on a sphere. Practically this is done by sampling a radius value (that is the magnitude) using the scheduler
and sampler. For a 2D problem, this is the only thing needed, as there is only 1 rotation DoF. For a 3D problem,
theta and phi value (for a 3D problem), are sampledcuniformly projecting the torque accordingly.
Below, is an example of a configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, the sampler is
following a `truncated_normal` distribution (a normal distribution with extremas) and the
scheduler is using a sigmoid growth. We can see that at the begining, there will be almost no torque applied,
at the end it is almost uniformly sampled on the [0, 0.1] range.
```yaml
torque_disturbance:
enable: False # Setting this to True will enable this disturbance
use_sinusoidal_patterns: False # Setting this to True will create none-constant forces.
min_freq: 0.25
max_freq: 3
min_offset: -6
max_offset: 6
# Scheduling and sampling of the disturbance
torque_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
min_value: -0.1
max_value: 0.1
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
Setting the `use_sinusoidal_patterns` flag to `False` will mean that each environment will have a constant torque applied on it.
If this flag is set to `True`, the torque magnitude will be modified depending on the position of the system.
This is meant to recreate attraction and repulsion points. The non-constant torque means that recurrent networks will struggle more
to reliably estimate the disturbance.
Sinusoidal pattern, freq = 0.25 | Sinusoidal pattern, freq = 3.0
:------------------------------:|:------------------------------:
 | 
Please note that the values for the sinusoidal patterns and the magnitude of the torque are updated on an environment reset only.
This means that the magnitude of the torque will not evolve through an episode.
### Mass disturbance
The mass disturbances allows to randomize the mass and the CoM of a rigid body. While it is not currently
possible to randomize the CoM of a rigid bodies inside omniverse, we solve this issue by adding two prismatic
joints to the system, at the end of which lies a fixed mass. All of the other elements inside of
the system are changed to have almost no mass such that the only meaningful contribution to the total system
mass and CoM comes from this movable body.
To randomize the mass value, a scheduler and sampler are used, the mass is directly sampled from it.
For the CoM, another set of scheduler and sampler are used, from it a radius is sampled which can then
be used to move the CoM in a 2D plane by uniformly sampling a theta value, or using in 3D by uniformly
sampling a theta and phi value.
Below is an example configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, we can see
that both the mass and the CoM have indepent samplers and rates.
```yaml
mass_disturbance:
enable: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the mass disturbance
mass_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 5.32 # Initial mass
start_std: 0.0001 # Low std ensures the mass will remain constand during warmup
end_mean: 5.32
end_std: 3.0
min_value: 3.32
max_value: 7.32
# Scheduling and sampling of the CoM disturbance
com_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0 # displacement about the resting position of the CoM joints
start_std: 0.0001 # Low std ensures the mass will remain constand during warmup
end_mean: 0.25
end_std: 0.25
min_value: 0.0
max_value: 0.25
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
### Observations disturbance
The observations disturbance adds a given type of noise onto the different constituting elements of the
observation tensor. The noise can be independently controlled and applied or not on 3 type of variables:
- positions (meters)
- velocities (meters/s or radians/s)
- orientation (radians)
For each of them, a scheduler and sampler can be set up, enabling fine control over how the system is exposed
to observation noise during its training.
below is am example configuration, please note that all the parameters have default values.
So you do not need to set them unless you want to modify them.
```yaml
observations_disturbance:
enable_position_noise: False # Setting this to True will enable this disturbance
enable_velocity_noise: False # Setting this to True will enable this disturbance
enable_orientation_noise: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the position disturbance
position_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.03
min_value: -0.015
max_value: 0.015
# Scheduling and sampling of the velocity disturbance
velocity_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.03
min_value: -0.015
max_value: 0.015
# Scheduling and sampling of the orientation disturbance
orientation_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.05
min_value: -0.025
max_value: 0.025
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
### Actions disturbance
The actions disturbance adds a given type of noise onto the actions sent by the agent. In our case this is done by
adding (or removing) some force to the output of the thrusters. This should be scale accordingly with the maximum
thrust that your system is capable of. Similarly to all previous disturbances, it also comes with a scheduler and
a sampler.
below is am example configuration, please note that all the parameters have default values.
So you do not need to set them unless you want to modify them.
```yaml
actions_disturbance:
enable: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the disturbance
action_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.1
min_value: -0.05
max_value: 0.05
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
## Adding new disturbances
To add new disturbances, we would recommend adding them within the file located within `tasks/virtual_floating_platform/MFP2D_disturbances.py`,
with its defaut parameters given inside `tasks/virtual_floating_platform/MFP2D_disturbances_parameters.py`. Or its 3D counter part.
### Creating a new set of parameters
To create a new set of parameters for a disturbance, you should create a dataclass, with that comes with a scheduler and sampler.
An example of such a class is given below:
```python
@dataclass
class NoisyActionsParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
# Note how the default factory is an empty dict. This is leveraged to
# enable automatic building of the CurriculumParameters.
action_curriculum: CurriculumParameters = field(default_factory=dict)
# Here you could add any other parameters as long as they have a
# default type and value.
enable: bool = False
def __post_init__(self):
# We transform the dictionary into a CurriculumParameter object.
# This action automatically converts the dictionnary.
# Note though that this does not support unknown keys.
# I.e. if a the user adds a key that does not exist, the
# dataclass will complain.
self.action_curriculum = CurriculumParameters(**self.action_curriculum)
```
This very simple class creates the required parameters for the scheduler.
You can of course add your own set of parameters. Do not forget to specify their type, and
assign a default value. This is important as the class disturbance should be instantiable
even if no parameter is given.
To wrap up the parameters part, you will need to add this disturbance to the list of
allowed disturbances. This can be done by adding it to the `DisturbancesParameters` class
at the end of the `tasks/virtual_floating_platform/MFP2D_disturbances_parameters.py`.
```python
@dataclass
class DisturbancesParameters:
[...] # Some more disturbances
actions_disturbance: NoisyActionsParameters = field(default_factory=dict)
def __post_init__(self):
[...] # Some more initializations
# Here we build the object, note that this will automatically retrieve everything under
# `task.env.disturbances.actions_disturbance` inside the configuration file.
self.actions_disturbance = NoisyActionsParameters(**self.actions_disturbance)
```
### Creating a new disturbance
With the parameters created you can now create the class that will implement your new
disturbance. Below, we provide and example of how this could be done.
```python
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyActionsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
# Here the parameters from the sampler are being loaded and transformed
# into and object that can be used to sample different noises.
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
# This is used in case you'd want to access other parameters.
self.parameters = parameters
# These are used to known how many environments are used, and the device
# on which the tensors must be store.
self._num_envs = num_envs
self._device = device
def add_noise_on_act(self, act: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
act (torch.Tensor): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The actions of the robot with noise.
"""
if self.parameters.enable:
# Note that this depends on the current step / epoch of the training
# this is particularly important for the scheduler as it uses this
# information to adjust its rate
act += self.action_sampler.sample(self._num_envs, step, device=self._device)
return act
```
With this done, we now need to instantiate our disturbance, this can be done by adding it
to the `Disturbances` class at the end of the `MFP2D_disturbances.py` (or its 3D counterpart).
This is done as shown below:
```python
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
# Loads all the parameters after `task.env.disturbances` inside the configuration file.
self.parameters = DisturbancesParameters(**parameters)
[...] # Some more initialization
# This builds the object
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
```
With all this done, your disturbance can be parametrized, and instantiated!
All that's left to do is to apply it!
### Applying domain randomization
First we need to instantiate the `Disturbances` class.
This can be done as shown below:
```python
# Get all the parameters inside the task config as a dict
self._task_cfg = sim_config.task_config
# Get the dict related to the disturbances
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Initializes the disturbances
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
```
This should be done in the `__init__` method of your task.
With this done you can apply it as you see fit.
In this example this would be done like so:
```python
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts)
``` | 19,160 | Markdown | 41.019737 | 143 | 0.719729 |
elharirymatteo/RANS/omniisaacgymenvs/doc/curriculum.md | # Curriculum
To prevent penalties, disturbances, or tasks from being to hard from the beginning,
we use simple fixed curriculum strategies. Here fixed denotes that the rate at which
the task becomes harder is not dynamically adapting to the agent's current capacities.
Instead, it relies on the current step to set the difficulty accordingly.
## Parametrizing the curriculum
In the following we present how to setup the different components of our curriculum objects.
A curriculum object is always composed of a scheduler and a sampler:
```yaml
curriculum_parameters:
rate_parameters:
[...]
sampler_parameters:
[...]
```
The objects come with default parameters which will result in the rate/scheduler always outputing 1.0.
### Setting up the scheduling/rate of the curriculum
To set the schedule or rate, of the curriculum we provide three main functions:
- a `sigmoid` style growth.
- a `power` style growth.
- a `linear` style growth.
- `none`, the scheduler always returns 1.0.
Below, we provide 4 sample configuration for each of these functions.
```yaml
rate_parameters: # Sigmoid
function: sigmoid
start: 0
end: 1000
extent: 4.5 # Must be larger than 0.
```
```yaml
rate_parameters: # Power
function: power
start: 0
end: 1000
alpha: 2.0 # Can be smaller than 1! Must be larger than 0.
```
```yaml
rate_parameters: # Linear
function: linear
start: 0
end: 1000
```
```yaml
rate_parameters: # None
function: none
```
How the different parameters impact the scheduling of the curiculum is given in the figure below.
Note than once the scheduler reaches 1.0 it means that the highest difficulty has been reached.
The value outputed by the scheduler is always comprised between \[0,1\].

We can see that for the `sigmoid`, large extent, for instance 12, generate sigmoid with a steeper slope,
while smaller extent get closer to the `linear`. Similarly, creating a `power` function with parameter `alpha`
set to 1.0 will generate the exact same curve as the `linear` function. When `alpha` larger than 1.0 will
have a small slope at the beginning and a high slope at the end, and `alpha` smaller than 1.0 will have
the opposite.
### Setting up the sampler of the curriculum
As of now, we provide 3 basic distribution to sample from:
- `uniform`, a uniform distribution between a max and a min.
- `normal`, a normal distribution around a mean with a given sigma.
- `truncated_normal`, a normal distribution with hard boundaries.
Below, we provide 3 sample configurations:
```yaml
sampling_parameters: # Uniform
distribution: uniform
start_min_value: -0.1
start_max_value: 0.1
end_min_value: -0.3
end_max_value: 0.3
```
```yaml
sampling_parameters: # Normal
distribution: normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
```
```yaml
sampling_parameters: # Truncated normal
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
min_value: -0.1
max_value: 0.1
```
In the above example, we can see that there is always a start, and an end parameter, be it for the mean, std,
or max and min value of the uniform distribution. Start denotes the distribution as it will be when the
scheduler/rate output is 0. End denotes the distribution as it will be when the scheduler/rate output is 1.
In between, the distribution will transition from one to the other following the function given to the scheduler.
## Modifying the curriculum
In the following we explain how to add new samplers and schedulers to the current set
of curriculum. In the futur we plan on expanding the curriculum to support non-fixed steps.
### Modifying the scheduler
Adding a new scheduler is relatively easy and straight forward.
Create a new function inside `tasks.virtual_floating_platform.curriculum_helpers.py`.
Make sure this function has the following header:
```python
def your_new_function(step: int = 0, start: int = 0, end: int = 1000, **kwargs) -> float
```
Note that in practice step can be a float.
Below is our linear function:
```python
def curriculum_linear_growth(step: int = 0, start: int = 0, end: int = 1000, **kwargs) -> float:
"""
Generates a curriculum with a linear growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = current / (relative_end)
return rate
```
Then add this function to the RateFunctionDict, here is an example.
```python
RateFunctionDict = {
"none": lambda step, start, end, **kwargs: 1.0,
"linear": curriculum_linear_growth,
"sigmoid": curriculum_sigmoid_growth,
"pow": curriculum_pow_growth,
}
```
Finally to call your own function, use the key you set inside the dictionary as
the `function` parameter in the rate/scheduler config.
But what if you wanted to add more parameters? In theory, there is an automatic parameter collector.
That means that as long as you create functions with named variables, and that these named variables
match the name of the parameters given to the dataclass, everything should be seemless. With the
notable exception of functions. Below is the automatic parameter collector:
```python
self.kwargs = {
key: value for key, value in self.__dict__.items() if not isfunction(value)
}
```
This is then process inside the following:
```python
def get(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.function(
step=step,
**self.kwargs,
)
```
### Modifying the sampler
Similarly, a new sampler can be added in the same fashion.
Create a new function inside `tasks.virtual_floating_platform.curriculum_helpers.py`.
This function must follow the following header style:
```python
def your_new_function(n: int = 1, device: str = "cpu", **kwargs) -> torch.Tensor:
```
You can add arguments as you see fit.
Below is our implementation of the uniform sampling:
```python
def uniform(
n: int = 1,
min_value: float = 0.0,
max_value: float = 1.1,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a uniform distribution.
Args:
n (int, optional): Number of samples to generate.
min_value (float, optional): Minimum value of the uniform distribution.
max_value (float, optional): Maximum value of the uniform distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a uniform distribution.
"""
return torch.rand((n), device=device) * (max_value - min_value) + min_value
```
Proceed to add this function inside the `SampleFunctionDict`:
```python
SampleFunctionDict = {
"uniform": uniform,
"normal": normal,
"truncated_normal": truncated_normal,
}
```
With this done, all that's left to is to define the routine to update the different parameters
given the rate. While this operation could be automated this would likely lead to the overall
code being less flexible. Thus, we require to update the `CurriculumSampler` class.
Inside the `sample` function, you will need to add an if statement that matches your distribution's
name. An example is given below:
```python
elif self.sp.distribution == "normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(n=n, mean=mean, std=std, device=device)
``` | 7,926 | Markdown | 30.835341 | 113 | 0.713348 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/__init__.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
| 290 | Python | 28.099997 | 79 | 0.648276 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/run_mujoco.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omegaconf import DictConfig, OmegaConf
import hydra
import os
from omniisaacgymenvs.mujoco_envs.controllers.discrete_LQR_controller import (
DiscreteController,
parseControllerConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import (
RLGamesModel,
)
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import (
MuJoCoFloatingPlatform,
parseEnvironmentConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.hl_controllers import hlControllerFactory
@hydra.main(config_name="config_mujoco", config_path="../cfg")
def run(cfg: DictConfig):
""" "
Run the simulation.
Args:
cfg (DictConfig): A dictionary containing the configuration of the simulation.
"""
# print_dict(cfg)
cfg_dict = omegaconf_to_dict(cfg)
# Create the environment
env = MuJoCoFloatingPlatform(**parseEnvironmentConfig(cfg_dict))
# Get the low-level controller
if cfg_dict["use_rl"]:
assert os.path.exists(
cfg_dict["checkpoint"]
), "A correct path to a neural network must be provided to infer an RL agent."
ll_controller = RLGamesModel(
config=cfg_dict["train"], model_path=cfg_dict["checkpoint"]
)
else:
ll_controller = DiscreteController(**parseControllerConfig(cfg_dict, env))
dt = cfg_dict["task"]["sim"]["dt"]
# Get the high-level controller
hl_controller = hlControllerFactory(cfg_dict, ll_controller, dt)
env.runLoop(hl_controller)
if __name__ == "__main__":
run()
| 1,987 | Python | 29.121212 | 87 | 0.698037 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/discrete_LQR_controller.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Callable, NamedTuple, Optional, Union, List, Dict
from scipy.linalg import solve_discrete_are
import numpy as np
import mujoco
import cvxpy as cp
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import (
MuJoCoFloatingPlatform,
)
def parseControllerConfig(
cfg_dict: Dict, env: MuJoCoFloatingPlatform
) -> Dict[str, Union[List[float], int, float, str, MuJoCoFloatingPlatform]]:
"""
Parse the controller configuration.
Args:
cfg_dict (Dict): A dictionary containing the configuration.
env (MuJoCoFloatingPlatform): A MuJoCoFloatingPlatform object.
Returns:
Dict[str, Union[List[float], int, float, str, MuJoCoFloatingPlatform]]: A dictionary containing the parsed configuration.
"""
config = {}
config["target_position"] = [0, 0, 0]
config["target_orientation"] = [1, 0, 0, 0]
config["target_linear_velocity"] = [0, 0, 0]
config["target_angular_velocity"] = [0, 0, 0]
config["thruster_count"] = (
cfg_dict["task"]["env"]["platform"]["configuration"]["num_anchors"] * 2
)
config["dt"] = cfg_dict["task"]["sim"]["dt"]
config["Mod"] = env
config["control_type"] = cfg_dict["controller"]["control_type"]
config["Q"] = cfg_dict["controller"]["Q"]
config["R"] = cfg_dict["controller"]["R"]
config["W"] = cfg_dict["controller"]["W"]
return config
class DiscreteController:
"""
Discrete pose controller for the Floating Platform."""
def __init__(
self,
target_position: List[float] = [0, 0, 0],
target_orientation: List[float] = [1, 0, 0, 0],
target_linear_velocity: List[float] = [0, 0, 0],
target_angular_velocity: List[float] = [0, 0, 0],
thruster_count: int = 8,
dt: float = 0.02,
Mod: MuJoCoFloatingPlatform = None,
control_type: str = "LQR",
Q: List[float] = [1, 1, 5, 5, 1, 1, 1],
W: List[float] = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
R: List[float] = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
**kwargs
) -> None:
"""
Initialize the discrete controller.
Args:
target_position (List[float], optional): A list containing the target position. Defaults to [0,0,0].
target_orientation (List[float], optional): A list containing the target orientation. Defaults to [1,0,0,0].
target_linear_velocity (List[float], optional): A list containing the target linear velocity. Defaults to [0,0,0].
target_angular_velocity (List[float], optional): A list containing the target angular velocity. Defaults to [0,0,0].
thruster_count (int, optional): An integer containing the number of thrusters. Defaults to 8.
dt (float, optional): A float containing the time step. Defaults to 0.02.
Mod (MuJoCoFloatingPlatform, optional): A MuJoCoFloatingPlatform object. Used to compute the linearized system matrices. Defaults to None.
control_type (str, optional): A string containing the type of control. Either 'H-inf' or 'LQR'. Defaults to 'LQR'.
Q (List[float], optional): A list containing the state cost matrix. Defaults to [1,1,5,5,1,1,1].
W (List[float], optional): A list containing the disturbance weight matrix. Defaults to [0.01,0.01,0.01,0.01,0.01,0.01,0.01].
R (List[float], optional): A list containing the control cost matrix. Defaults to [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1].
**kwargs: Additional arguments."""
self.thruster_count = thruster_count
self.thrusters = np.zeros(thruster_count) # Initialize all thrusters to off
self.dt = dt
self.FP = Mod
self.control_type = control_type
self.opti_states = None
# Instantiate goals to be null
self.target_position = target_position
self.target_orientation = target_orientation
self.target_linear_velocity = target_linear_velocity
self.target_angular_velocity = target_angular_velocity
# Control parameters
# State cost matrix
self.Q = np.diag(Q)
# Control cost matrix
self.R = np.diag(R)
# Disturbance weight matrix
self.W = np.diag(W)
self.findGains()
def findGains(self, r0=None) -> None:
"""
Find the gains for the controller.
Args:
r0 (np.ndarray, optional): An array containing the initial state. Defaults to None.
"""
# Compute linearized system matrices A and B based on your system dynamics
self.A, self.B = self.computeLinearizedSystem(
r0
) # Compute linearized system matrices
self.makePlanarCompatible()
if self.control_type == "H-inf":
self.computeHInfinityGains()
elif self.control_type == "LQR":
self.computeLQRGains()
else:
raise ValueError("Invalid control type specified.")
def computeLQRGains(self) -> None:
"""
Compute the LQR gains."""
self.P = solve_discrete_are(self.A, self.B, self.Q, self.R)
self.L = (
np.linalg.inv(self.R + self.B.T @ self.P @ self.B)
@ self.B.T
@ self.P
@ self.A
)
def computeHInfinityGains(self) -> None:
"""
Compute the H-infinity gains."""
X = cp.Variable((self.A.shape[0], self.A.shape[0]), symmetric=True)
gamma = cp.Parameter(nonneg=True) # Define gamma as a parameter
regularization_param = 1e-6
# Regularize matrix using the pseudo-inverse
A_regularized = self.A @ np.linalg.inv(
self.A.T @ self.A + regularization_param * np.eye(self.A.shape[1])
)
B_regularized = self.B @ np.linalg.inv(
self.B.T @ self.B + regularization_param * np.eye(self.B.shape[1])
)
# Define the constraints using regularized matrices
constraints = [X >> np.eye(A_regularized.shape[1])] # X >= 0
# Define a relaxation factor
relaxation_factor = 1 # Adjust this value based on your experimentation
# Linear matrix inequality constraint with relaxation
constraints += [
cp.bmat(
[
[
A_regularized.T @ X @ A_regularized - X + self.Q,
A_regularized.T @ X @ B_regularized,
],
[
B_regularized.T @ X @ A_regularized,
B_regularized.T @ X @ B_regularized
- (gamma**2)
* relaxation_factor
* np.eye(B_regularized.shape[1]),
],
]
)
<< 0
]
objective = cp.Minimize(gamma)
prob = cp.Problem(objective, constraints)
# Set the value of the parameter gamma
gamma.value = 1.0 # You can set the initial value based on your problem
prob.solve()
if prob.status == cp.OPTIMAL:
self.L = (
np.linalg.inv(
self.B.T @ X.value @ self.B
+ gamma.value**2 * np.eye(self.B.shape[1])
)
@ self.B.T
@ X.value
@ self.A
)
breakpoint()
else:
raise Exception("H-infinity control design failed.")
def setTarget(
self,
target_position: List[float] = None,
target_heading: List[float] = None,
target_linear_velocity: List[float] = None,
target_angular_velocity: List[float] = None,
) -> None:
"""
Sets the target position, orientation, and velocities.
Args:
target_position (List[float], optional): A list containing the target position. Defaults to None.
target_heading (List[float], optional): A list containing the target heading. Defaults to None.
target_linear_velocity (List[float], optional): A list containing the target linear velocity. Defaults to None.
target_angular_velocity (List[float], optional): A list containing the target angular velocity. Defaults to None.
"""
if target_position is not None:
self.target_position = np.array(target_position)
if target_heading is not None:
self.target_orientation = np.array(target_heading)
if target_linear_velocity is not None:
self.target_linear_velocity = np.array(target_linear_velocity)
if target_angular_velocity is not None:
self.target_angular_velocity = np.array(target_angular_velocity)
def computeLinearizedSystem(self, r0: np.ndarray = None) -> None:
"""
Compute linearized system matrices A and B.
With A the state transition matrix.
With B the control input matrix.
Args:
r0 (np.ndarray, optional): An array containing the initial state. Defaults to None.
"""
if r0 is None:
r0 = np.concatenate(
(
self.FP.data.qpos[:3],
self.FP.data.qvel[:3],
self.FP.data.qpos[3:],
self.FP.data.qvel[3:],
),
axis=None,
)
t_int = 0.2 # time-interval at 5Hz
A = self.f_STM(r0, t_int, self.FP.model, self.FP.data, self.FP.body_id)
B = self.f_B(
r0, t_int, self.FP.model, self.FP.data, self.FP.body_id, self.thruster_count
)
return A, B
def makePlanarCompatible(self) -> None:
"""
Remove elements of the STM to make it planar compatible.
Required states #[x,y,vx,vy,qw,qz,wz]."""
a = self.A
b = self.B
a = np.delete(a, 11, axis=0) # Remove row: wy
a = np.delete(a, 10, axis=0) # Remove row: wx
a = np.delete(a, 8, axis=0) # Remove row: qy
a = np.delete(a, 7, axis=0) # Remove row: qz
a = np.delete(a, 5, axis=0) # Remove row: vz
a = np.delete(a, 2, axis=0) # Remove row: z
a = np.delete(a, 11, axis=1) # Remove col: wy
a = np.delete(a, 10, axis=1) # Remove col: wx
a = np.delete(a, 8, axis=1) # Remove col: qy
a = np.delete(a, 7, axis=1) # Remove col: qz
a = np.delete(a, 5, axis=1) # Remove col: vz
a = np.delete(a, 2, axis=1) # Remove col: z
b = np.delete(b, 11, axis=0) # Remove row: wy
b = np.delete(b, 10, axis=0) # Remove row: wx
b = np.delete(b, 8, axis=0) # Remove row: qy
b = np.delete(b, 7, axis=0) # Remove row: qz
b = np.delete(b, 5, axis=0) # Remove row: vz
b = np.delete(b, 2, axis=0) # Remove row: z
b[b == 0] = 1e-4
self.A = a
self.B = b
return None
def f_STM(self, r0: np.ndarray, t_int: float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id."""
IC_temp0 = r0
force = [0.0, 0.0, 0.0]
torque = [0.0, 0.0, 0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
current_time = data.time
for k in range(np.size(r0)):
delta = max(1e-3, IC_temp0[k] / 100)
delta_vec = np.zeros(np.size(r0))
delta_vec[k] = delta
IC_temp_pos = np.add(IC_temp0, delta_vec)
IC_temp_neg = np.subtract(IC_temp0, delta_vec)
# Positive direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_pos[0:3]
data.qvel[:3] = IC_temp_pos[3:6]
data.qpos[3:] = IC_temp_pos[6:10]
data.qvel[3:] = IC_temp_pos[10:13]
mujoco.mj_applyFT(
model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied
)
mujoco.mj_step(model, data)
ans_pos = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# print('final_time', data.time)
# Negative direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_neg[0:3]
data.qvel[:3] = IC_temp_neg[3:6]
data.qpos[3:] = IC_temp_neg[6:10]
data.qvel[3:] = IC_temp_neg[10:13]
mujoco.mj_applyFT(
model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied
)
mujoco.mj_step(model, data)
ans_neg = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# print('final_time', data.time)
if k == 0:
STM = np.subtract(ans_pos, ans_neg) / (2 * delta)
else:
temp = np.subtract(ans_pos, ans_neg) / (2 * delta)
STM = np.vstack((STM, temp))
STM = STM.transpose()
STM[6, 6] = 1.0
data.time = current_time
model.opt.timestep = default_tstep
return STM
def f_STM_analytical(
self, r0: np.ndarray, t_int: float, model, data, body_id
) -> None:
"""
Identify A matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id."""
IC_temp0 = r0
STM = np.eye(np.size(r0))
w1 = IC_temp0[10]
w2 = IC_temp0[11]
w3 = IC_temp0[12]
qw = IC_temp0[6]
qx = IC_temp0[7]
qy = IC_temp0[8]
qz = IC_temp0[9]
STM[0, 3] = t_int
STM[1, 4] = t_int
STM[2, 5] = t_int
STM[6, 6] = 1
STM[6, 7] = -0.5 * w1 * t_int
STM[6, 8] = -0.5 * w2 * t_int
STM[6, 9] = -0.5 * w3 * t_int
STM[6, 10] = -0.5 * qx * t_int
STM[6, 11] = -0.5 * qy * t_int
STM[6, 12] = -0.5 * qz * t_int
STM[7, 6] = 0.5 * w1 * t_int
STM[7, 7] = 1
STM[7, 8] = 0.5 * w3 * t_int
STM[7, 9] = -0.5 * w2 * t_int
STM[7, 10] = 0.5 * qw * t_int
STM[7, 11] = -0.5 * qz * t_int
STM[7, 12] = 0.5 * qy * t_int
STM[8, 6] = 0.5 * w2 * t_int
STM[8, 7] = -0.5 * w3 * t_int
STM[8, 8] = 1
STM[8, 9] = 0.5 * w1 * t_int
STM[8, 10] = 0.5 * qz * t_int
STM[8, 11] = 0.5 * qw * t_int
STM[8, 12] = -0.5 * qx * t_int
STM[9, 6] = 0.5 * w3 * t_int
STM[9, 7] = -0.5 * w2 * t_int
STM[9, 8] = -0.5 * w1 * t_int
STM[9, 9] = 1
STM[9, 10] = -0.5 * qy * t_int
STM[9, 11] = 0.5 * qx * t_int
STM[9, 12] = 0.5 * qw * t_int
return STM
def f_B(
self, r0: np.ndarray, t_int: float, model, data, body_id, number_thrust: int
) -> None:
"""
Identify B matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id.
number_thrust (int): An integer containing the number of thrusters."""
IC_temp0 = r0
force = [0.0, 0.0, 0.0]
torque = [0.0, 0.0, 0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
u = np.zeros(number_thrust)
current_time = data.time
# for k in range(np.size(u)):
for k in range(np.size(u)):
delta = 0.01
delta_vec = np.zeros(np.size(u))
delta_vec[k] = delta
# Positive direction
u_plus = np.add(u, delta_vec)
force_plus = u_plus[k] * self.FP.forces[k] # * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3, 3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_plus = np.matmul(
rmat, force_plus
) # Rotate the force to the body frame.
p2 = (
np.matmul(rmat, self.FP.positions[k]) + p
) # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(
model, data, force_plus, torque, p2, body_id, data.qfrc_applied
) # Apply the force.
mujoco.mj_step(model, data)
ans_pos = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# Negative direction
u_minus = np.subtract(u, delta_vec)
force_minus = u_minus[k] * self.FP.forces[k] * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3, 3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_minus = np.matmul(
rmat, force_minus
) # Rotate the force to the body frame.
p2 = (
np.matmul(rmat, self.FP.positions[k]) + p
) # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(
model, data, force_minus, torque, p2, body_id, data.qfrc_applied
) # Apply the force.
mujoco.mj_step(model, data)
ans_neg = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
if k == 0:
B = np.subtract(ans_pos, ans_neg) / (2 * delta)
else:
temp = np.subtract(ans_pos, ans_neg) / (2 * delta)
B = np.vstack((B, temp))
B = B.transpose()
model.opt.timestep = default_tstep
data.time = current_time
return B
def controlCost(self) -> np.ndarray:
"""
Compute the control cost.
Returns:
np.ndarray: An array containing the control cost."""
# Cost function to be minimized for control input optimization
if self.control_type == "H-inf":
control_input = np.array(self.L @ self.state) + self.disturbance
elif self.control_type == "LQR":
self.findGains(r0=self.opti_states)
control_input = np.array(self.L @ self.state)
else:
raise ValueError("Invalid control type specified.")
return control_input
def makeState4Controller(self, state: Dict[str, np.ndarray]) -> List[np.ndarray]:
"""
Make the state compatible with the controller.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state.
Returns:
List[np.ndarray]: A list containing the current position, current orientation, current linear velocity, and current angular velocity.
"""
current_position = state["position"]
current_position[-1] = 0
current_orientation = state["quaternion"]
current_linear_velocity = state["linear_velocity"]
current_angular_velocity = state["angular_velocity"]
return (
current_position,
current_orientation,
current_linear_velocity,
current_angular_velocity,
)
def getAction(
self,
obs_state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = True, **kwargs
) -> np.ndarray:
"""
Get the action.
Args:
obs_state (Dict[str, np.ndarray]): A dictionary containing the state.
is_deterministic (bool, optional): A boolean containing whether the action is deterministic. Defaults to True.
Returns:
np.ndarray: An array containing the action.
"""
return self.update(*self.makeState4Controller(obs_state))
def update(
self,
current_position: np.ndarray,
current_orientation: np.ndarray,
current_velocity: np.ndarray,
current_angular_velocity: np.ndarray,
disturbance: np.ndarray = None,
) -> None:
"""
Update the controller.
Args:
current_position (np.ndarray): An array containing the current position.
current_orientation (np.ndarray): An array containing the current orientation.
current_velocity (np.ndarray): An array containing the current linear velocity.
current_angular_velocity (np.ndarray): An array containing the current angular velocity.
disturbance (np.ndarray, optional): An array containing the disturbance. Defaults to None.
"""
# Calculate errors
position_error = self.target_position - current_position
orientation_error = self.target_orientation - current_orientation
velocity_error = self.target_linear_velocity - current_velocity
angvel_error = self.target_angular_velocity - current_angular_velocity
self.opti_states = np.concatenate(
(
current_position,
current_velocity,
current_orientation,
current_angular_velocity,
),
axis=None,
)
if disturbance == None:
disturbance = np.random.rand(8) * 0.000
self.disturbance = disturbance
# Combine errors into the state vector (planar)
self.state = np.array(
[
position_error[0],
position_error[1],
velocity_error[0],
velocity_error[1],
orientation_error[0],
orientation_error[3],
angvel_error[2],
]
)
# Optimal U
original_u = self.controlCost()
# filter to zero values of u that are less than 0.5
intermediate_u = np.where(np.abs(original_u) < 0.25, 0.0, original_u)
if np.max(intermediate_u) == 0.0:
normalized_array = np.zeros(self.thruster_count)
else:
normalized_array = (intermediate_u - np.min(intermediate_u)) / (
np.max(intermediate_u) - np.min(intermediate_u)
)
# ROund the normalized array to the nearest integer biasing the center to 0.25
final_U = np.round(normalized_array - 0.25).astype(int)
# Round the normalized array to the nearest integer
self.thrusters = final_U
return self.thrusters
| 23,820 | Python | 35.760802 | 150 | 0.540638 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/__init__.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
| 290 | Python | 28.099997 | 79 | 0.648276 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/RL_games_model_4_mujoco.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict
from gym import spaces
import numpy as np
import torch
import yaml
from rl_games.algos_torch.players import (
BasicPpoPlayerContinuous,
BasicPpoPlayerDiscrete,
)
class RLGamesModel:
"""
This class implements a wrapper for the RLGames model.
It is used to interface the RLGames model with the MuJoCo environment.
It currently only supports PPO agents."""
def __init__(
self,
config: Dict = None,
config_path: str = None,
model_path: str = None,
**kwargs
):
"""
Initialize the RLGames model.
Args:
config (Dict, optional): A dictionary containing the configuration of the RLGames model. Defaults to None.
config_path (str, optional): A string containing the path to the configuration file of the RLGames model. Defaults to None.
model_path (str, optional): A string containing the path to the model of the RLGames model. Defaults to None.
**kwargs: Additional arguments."""
self.obs = dict(
{
"state": torch.zeros((1, 10), dtype=torch.float32, device="cuda"),
"transforms": torch.zeros(5, 8, device="cuda"),
"masks": torch.zeros(8, dtype=torch.float32, device="cuda"),
"masses": torch.zeros(3, dtype=torch.float32, device="cuda"),
}
)
# Build model using the configuration files
if config is None:
self.loadConfig(config_path)
else:
self.cfg = config
self.buildModel()
self.restore(model_path)
# Default target and task values
self.mode = 0
self.position_target = [0, 0, 0]
self.orientation_target = [1, 0, 0, 0]
self.linear_velocity_target = [0, 0, 0]
self.angular_velocity_target = [0, 0, 0]
self.obs_state = torch.zeros((1, 10), dtype=torch.float32, device="cuda")
def buildModel(self) -> None:
"""
Build the RLGames model."""
act_space = spaces.Tuple([spaces.Discrete(2)] * 8)
obs_space = spaces.Dict(
{
"state": spaces.Box(np.ones(10) * -np.Inf, np.ones(10) * np.Inf),
"transforms": spaces.Box(low=-1, high=1, shape=(8, 5)),
"masks": spaces.Box(low=0, high=1, shape=(8,)),
"masses": spaces.Box(low=-np.Inf, high=np.Inf, shape=(3,)),
}
)
self.player = BasicPpoPlayerDiscrete(
self.cfg, obs_space, act_space, clip_actions=False, deterministic=True
)
def loadConfig(self, config_name: str) -> None:
"""
Load the configuration file of the RLGames model.
Args:
config_name (str): A string containing the path to the configuration file of the RLGames model.
"""
with open(config_name, "r") as stream:
self.cfg = yaml.safe_load(stream)
def restore(self, model_name: str) -> None:
"""
Restore the weights of the RLGames model.
Args:
model_name (str): A string containing the path to the checkpoint of an RLGames model matching the configuation file.
"""
self.player.restore(model_name)
def setTarget(
self,
target_position=None,
target_heading=None,
target_linear_velocity=None,
target_angular_velocity=None,
mode=None,
) -> None:
"""
Set the targets of the agent. mode is task flag.
Args:
target_position (list, optional): A list containing the target position. Defaults to None.
target_heading (list, optional): A list containing the target heading. Defaults to None.
target_linear_velocity (list, optional): A list containing the target linear velocity. Defaults to None.
target_angular_velocity (list, optional): A list containing the target angular velocity. Defaults to None.
mode (int, optional): An integer indicating the agent's task. Defaults to None.
"""
if mode == 0:
self.position_target = target_position
self.mode = mode
elif mode == 1:
self.position_target = target_position
self.orientation_target = target_heading
self.mode = mode
elif mode == 2:
self.linear_velocity_target = target_linear_velocity
self.mode = mode
elif mode == 3:
self.linear_velocity_target = target_linear_velocity
self.angular_velocity_target = target_angular_velocity
self.mode = mode
elif mode == 4:
self.linear_velocity_target = target_linear_velocity
self.orientation_target = target_heading
self.mode = mode
elif mode == 6:
#TODO: remove hardcoding
fp_footprint_radius = 0.31+0.01
siny_cosp = 2 * target_heading[0] * target_heading[3]
cosy_cosp = 1 - 2 * (target_heading[3] * target_heading[3])
target_heading_angle = np.arctan2(siny_cosp, cosy_cosp)
target_position_clone = target_position.copy()
target_position_clone[0] += fp_footprint_radius * np.cos(target_heading_angle)
target_position_clone[1] += fp_footprint_radius * np.sin(target_heading_angle)
self.position_target = target_position_clone
self.orientation_target = target_heading
self.mode = mode
else:
raise ValueError("Please specify a task flag.")
def generate_task_data(self, state: Dict[str, np.ndarray]) -> None:
"""
Generate the task data used by the agent.
The task flag is used to determine the format of the task data.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
"""
if self.mode == 0:
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
0,
0,
]
elif self.mode == 1:
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.arctan2(
np.sin(heading_target - heading_system),
np.cos(heading_target - heading_system),
)
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
np.cos(heading_error),
np.sin(heading_error),
]
elif self.mode == 2:
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
0,
0,
]
elif self.mode == 3:
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
self.angular_velocity_target[2] - state["angular_velocity"][2],
0,
]
elif self.mode == 4:
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.arctan2(
np.sin(heading_target - heading_system),
np.cos(heading_target - heading_system),
)
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
np.cos(heading_error),
np.sin(heading_error),
]
elif self.mode == 6:
#TODO: remove hardcoding
target_to_cone_dist = -2.0
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
anchor_positions = self.position_target.copy()
anchor_positions[0] += target_to_cone_dist * np.cos(heading_target)
anchor_positions[1] += target_to_cone_dist * np.sin(heading_target)
goal_headings = np.arctan2(
anchor_positions[1] - state["position"][1],
anchor_positions[0] - state["position"][0]
)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.abs(
np.arctan2(
np.sin(goal_headings - heading_system),
np.cos(goal_headings - heading_system),
)
)
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
np.cos(heading_error),
np.sin(heading_error),
]
def makeObservationBuffer(self, state: Dict[str, np.ndarray]) -> None:
"""
Make the observation buffer used by the agent.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
"""
self.generate_task_data(state)
siny_cosp = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
self.obs_state[0, :2] = torch.tensor(
[cosy_cosp, siny_cosp], dtype=torch.float32, device="cuda"
)
self.obs_state[0, 2:4] = torch.tensor(
state["linear_velocity"][:2], dtype=torch.float32, device="cuda"
)
self.obs_state[0, 4] = state["angular_velocity"][2]
self.obs_state[0, 5] = self.mode
self.obs_state[0, 6:] = torch.tensor(
self.target, dtype=torch.float32, device="cuda"
)
def getAction(self, state, is_deterministic=True, **kwargs) -> np.ndarray:
"""
Get the action of the agent.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
is_deterministic (bool): A boolean indicating whether the action should be deterministic or not.
**kwargs: Additional arguments.
Returns:
np.ndarray: The action of the agent."""
self.makeObservationBuffer(state)
self.obs["state"] = self.obs_state
actions = (
self.player.get_action(self.obs.copy(), is_deterministic=is_deterministic)
.cpu()
.numpy()
)
return actions | 13,598 | Python | 38.64723 | 135 | 0.542874 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/hl_controllers.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import List, Tuple, Dict, Union
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
import datetime
from mujoco_envs.controllers.discrete_LQR_controller import (
DiscreteController,
)
from mujoco_envs.controllers.RL_games_model_4_mujoco import (
RLGamesModel,
)
class BaseController:
"""
Base class for high-level controllers."""
def __init__(self, dt: float, save_dir: str = "mujoco_experiment") -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
"""
self.save_dir = os.path.join(save_dir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
self.dt = dt
self.time = 0
self.initializeLoggers()
self.csv_datas = []
def initializeLoggers(self) -> None:
"""
Initializes the loggers for the simulation.
Allowing for the simulation to be replayed/plotted."""
self.logs = {}
self.logs["timevals"] = []
self.logs["angular_velocity"] = []
self.logs["linear_velocity"] = []
self.logs["position"] = []
self.logs["quaternion"] = []
self.logs["actions"] = []
def updateLoggers(
self, state: Dict[str, np.ndarray], action: np.ndarray, time: float = None
) -> None:
"""
Updates the loggers for the simulation.
Args:
state (Dict[str, np.ndarray]): State of the system.
action (np.ndarray): Action taken by the controller."""
self.logs["timevals"].append(self.time)
self.logs["position"].append(state["position"])
self.logs["quaternion"].append(state["quaternion"])
self.logs["angular_velocity"].append(state["angular_velocity"])
self.logs["linear_velocity"].append(state["linear_velocity"])
self.logs["actions"].append(action)
if time is not None:
self.time = time
else:
self.time += self.dt
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return False
def getGoal(self) -> None:
"""
Returns the current goal of the controller."""
raise NotImplementedError
def setGoal(self) -> None:
"""
Sets the goal of the controller."""
raise NotImplementedError
def getAction(self, **kwargs) -> np.ndarray:
"""
Gets the action from the controller."""
raise NotImplementedError
def plotSimulation(
self, dpi: int = 120, width: int = 600, height: int = 800
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 120.
width (int, optional): Width of the figure. Defaults to 600.
height (int, optional): Height of the figure. Defaults to 800."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"])
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], np.abs(self.logs["position"]))
ax[0].set_xlabel("time (seconds)")
ax[0].set_ylabel("meters")
_ = ax[0].set_title("position")
ax[0].set_yscale("log")
ax[1].plot(
np.array(self.logs["position"])[:, 0], np.array(self.logs["position"])[:, 1]
)
ax[1].set_xlabel("meters")
ax[1].set_ylabel("meters")
_ = ax[1].set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
def saveSimulationData(self, suffix: str = "") -> None:
"""
Saves the simulation data.
Args:
suffix (str, optional): Suffix to add to the file name. Defaults to ""."""
xyz = ["x", "y", "z"]
wxyz = ["w", "x", "y", "z"]
try:
os.makedirs(self.save_dir, exist_ok=True)
csv_data = pd.DataFrame()
for key in self.logs.keys():
if len(self.logs[key]) != 0:
if key == "actions":
data = np.array(self.logs[key])
for i in range(data.shape[1]):
csv_data["t_" + str(i)] = data[:, i]
else:
data = np.array(self.logs[key])
if len(data.shape) > 1:
if data.shape[1] == 4:
var_name = wxyz
else:
var_name = xyz
for i in range(data.shape[1]):
csv_data[var_name[i] + "_" + key] = data[:, i]
else:
csv_data[key] = data
csv_data.to_csv(os.path.join(self.save_dir, "exp_logs" + suffix + ".csv"))
self.csv_datas.append(csv_data)
except Exception as e:
print("Saving failed: ", e)
def plotBatch(self, dpi: int = 120, width: int = 600, height: int = 800) -> None:
"""
Plots a batch of simulations.
Args:
dpi (int, optional): Dots per inch. Defaults to 120.
width (int, optional): Width of the figure. Defaults to 600.
height (int, optional): Height of the figure. Defaults to 800."""
figsize = (width / dpi, height / dpi)
fig = plt.figure(figsize=figsize)
for csv_data in self.csv_datas:
plt.plot(csv_data["x_position"], csv_data["y_position"])
plt.axis("equal")
plt.xlabel("meters")
plt.ylabel("meters")
plt.tight_layout()
fig.savefig(os.path.join(self.save_dir, "positions.png"))
class PositionController(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
goals_x: List[float],
goals_y: List[float],
position_distance_threshold: float = 0.03,
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
goals_x (List[float]): List of x coordinates of the goals.
goals_y (List[float]): List of y coordinates of the goals.
position_distance_threshold (float, optional): Distance threshold for the position. Defaults to 0.03.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.model = model
self.goals = np.array([goals_x, goals_y, [0] * len(goals_x)]).T
self.current_goal = self.goals[0]
self.distance_threshold = position_distance_threshold
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time: float) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["position_target"].append(self.current_goal[:2])
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
"""
Checks if the goal is reached.
Args:
state (Dict[str, np.ndarray]): State of the system.
Returns:
bool: True if the goal is reached, False otherwise."""
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.distance_threshold:
return True
def getGoal(self) -> np.ndarray:
"""
Returns the current goal."""
return self.current_goal
def setGoal(self, goal) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return len(self.goals) == 0
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
self.model.setTarget(target_position=self.current_goal, mode=0)
def getAction(
self,
state,
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
Returns:
np.ndarray: Action taken by the controller."""
if self.isGoalReached(state):
if not mute:
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.setTarget()
actions = self.model.getAction(state, is_deterministic=is_deterministic)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class PoseController(BaseController):
"""
Controller for the pose of the robot."""
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
goals_x: List[float],
goals_y: List[float],
goals_theta: List[float],
position_distance_threshold: float = 0.03,
orientation_distance_threshold: float = 0.03,
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
goals_x (List[float]): List of x coordinates of the goals.
goals_y (List[float]): List of y coordinates of the goals.
goals_theta (List[float]): List of theta coordinates of the goals.
position_distance_threshold (float, optional): Distance threshold for the position. Defaults to 0.03.
orientation_distance_threshold (float, optional): Distance threshold for the orientation. Defaults to 0.03.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
# Discrete controller
self.model = model
# Creates an array goals
if goals_theta is None:
goals_theta = np.zeros_like(goals_x)
self.goals = np.array([goals_x, goals_y, goals_theta]).T
self.current_goal = self.goals[0]
self.current_goal_controller = np.zeros((3), dtype=np.float32)
self.current_goal_controller = self.current_goal
self.position_distance_threshold = position_distance_threshold
self.orientation_distance_threshold = orientation_distance_threshold
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, state, actions, time: float = None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["position_target"].append(self.current_goal[:2])
self.logs["heading_target"].append(self.current_goal[-1])
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
"""
Checks if the goal is reached.
Args:
state (Dict[str, np.ndarray]): State of the system.
Returns:
bool: True if the goal is reached, False otherwise."""
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.position_distance_threshold:
return True
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.current_goal
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return len(self.goals) == 0
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
position_goal = self.current_goal
yaw = self.current_goal[2]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_position=position_goal, target_heading=orientation_goal, mode=1
)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
Returns:
np.ndarray: Action taken by the controller."""
if self.isGoalReached(state):
if not mute:
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class DockController(PoseController):
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
position_goal = self.current_goal
yaw = self.current_goal[2]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_position=position_goal, target_heading=orientation_goal, mode=6
)
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class TrajectoryTracker:
"""
A class to generate and track trajectories."""
def __init__(
self, lookahead: float = 0.25, closed: bool = False, offset=(0, 0), **kwargs
):
"""
Initializes the trajectory tracker.
Args:
lookahead (float, optional): Lookahead distance. Defaults to 0.25.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to False.
offset (tuple, optional): Offset of the trajectory. Defaults to (0,0).
**kwargs: Additional arguments."""
self.current_point = -1
self.lookhead = lookahead
self.closed = closed
self.is_done = False
self.offset = np.array(offset)
def generateCircle(self, radius: float = 2, num_points: int = 360 * 10):
"""
Generates a circle trajectory.
Args:
radius (float, optional): Radius of the circle. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10."""
theta = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
self.positions = (
np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
)
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
self.angles = np.arctan2(self.angles[:, 1], self.angles[:, 0])
def generateSquare(self, h: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates a square trajectory.
Args:
h (float, optional): Height of the square. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10."""
points_per_side = num_points // 4
s1y = np.linspace(-h / 2, h / 2, num_points, endpoint=False)
s1x = np.ones_like(s1y) * h / 2
u1 = np.ones_like(s1y)
v1 = np.zeros_like(s1y)
s2x = np.linspace(h / 2, -h / 2, num_points, endpoint=False)
s2y = np.ones_like(s2x) * h / 2
u2 = np.zeros_like(s2x)
v2 = -np.ones_like(s2x)
s3y = np.linspace(h / 2, -h / 2, num_points, endpoint=False)
s3x = np.ones_like(s3y) * (-h / 2)
u3 = -np.ones_like(s3y)
v3 = np.zeros_like(s3y)
s4x = np.linspace(-h / 2, h / 2, num_points, endpoint=False)
s4y = np.ones_like(s4x) * (-h / 2)
u4 = np.zeros_like(s4x)
v4 = np.ones_like(s4x)
self.positions = (
np.vstack(
[np.hstack([s1x, s2x, s3x, s4x]), np.hstack([s1y, s2y, s3y, s4y])]
).T
+ self.offset
)
self.u = np.hstack([u1, u2, u3, u4]).T
self.v = np.hstack([v1, v2, v3, v4]).T
self.angles = np.arctan2(self.u, self.v)
def generateSpiral(
self,
start_radius: float = 0.5,
end_radius: float = 2,
num_loop: float = 5,
num_points: int = 360 * 20,
) -> None:
"""
Generates a spiral trajectory.
Args:
start_radius (float, optional): Start radius of the spiral. Defaults to 0.5.
end_radius (float, optional): End radius of the spiral. Defaults to 2.
num_loop (float, optional): Number of loops. Defaults to 5.
num_points (int, optional): Number of points. Defaults to 360*20."""
radius = np.linspace(
start_radius, end_radius, num_points, endpoint=(not self.closed)
)
theta = np.linspace(
0, 2 * np.pi * num_loop, num_points, endpoint=(not self.closed)
)
self.positions = (
np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
)
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
self.angles = np.arctan2(self.angles[:, 1], self.angles[:, 0])
def generateInfinite(self, a: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates an infinite (lemniscate of Bernoulli) trajectory.
Args:
a (float, optional): Controls the size of the lemniscate. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10.
"""
t = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
x = (a * np.cos(t)) / (1 + np.sin(t) ** 2)
y = (a * np.sin(t) * np.cos(t)) / (1 + np.sin(t) ** 2)
self.positions = np.array([x, y]).T + self.offset
# Derive angles based on the direction of movement across points for consistency with other functions
directions = np.diff(self.positions, axis=0, append=self.positions[0:1])
self.angles = (
np.array([directions[:, 1], -directions[:, 0]]).T
/ np.linalg.norm(directions, axis=1)[:, None]
)
self.angles = np.arctan2(self.angles[:, 0], self.angles[:, 1])
def generateInfinite(self, a: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates an infinite (lemniscate of Bernoulli) trajectory.
Args:
a (float, optional): Controls the size of the lemniscate. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10.
"""
t = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
x = (a * np.cos(t)) / (1 + np.sin(t) ** 2)
y = (a * np.sin(t) * np.cos(t)) / (1 + np.sin(t) ** 2)
self.positions = np.array([x, y]).T + self.offset
# Derive angles based on the direction of movement across points for consistency with other functions
directions = np.diff(self.positions, axis=0, append=self.positions[0:1])
self.angles = (
np.array([directions[:, 1], -directions[:, 0]]).T
/ np.linalg.norm(directions, axis=1)[:, None]
)
def getTrackingPointIdx(self, position: np.ndarray) -> None:
"""
Gets the tracking point index.
The tracking point is the point the robot is currently locked on.
Args:
position (np.ndarray): Current position of the robot."""
distances = np.linalg.norm(self.positions - position, axis=1)
if self.current_point == -1:
self.current_point = 0
else:
indices = np.where(distances < self.lookhead)[0]
if len(indices) > 0:
indices = indices[indices < 60]
if len(indices) > 0:
self.current_point = np.max(indices)
def rollTrajectory(self) -> None:
"""
Rolls the trajectory, so that the current point is the first point."""
if self.closed:
self.positions = np.roll(self.positions, -self.current_point, axis=0)
self.angles = np.roll(self.angles, -self.current_point, axis=0)
self.current_point = 0
else:
self.positions = self.positions[self.current_point :]
self.angles = self.angles[self.current_point :]
self.current_point = 0
if self.positions.shape[0] <= 1:
self.is_done = True
def isDone(self):
"""
Checks if the trajectory is done."""
return self.is_done
def getPointForTracking(self) -> List[np.ndarray]:
"""
Gets the position the tracker is currently locked on.
Returns:
List[np.ndarray]: Position being tracked."""
position = self.positions[self.current_point]
angle = self.angles[self.current_point]
self.rollTrajectory()
return position, angle
def get_target_position(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.target_position
def computeVelocityVector(
self, target_position: np.ndarray, position: np.ndarray
) -> np.ndarray:
"""
Computes the velocity vector.
That is the vector that will enable the robot to reach the position being tracked.
Args:
target_position (np.ndarray): Position being tracked.
position (np.ndarray): Current position of the robot."""
diff = target_position - position
return diff / np.linalg.norm(diff)
def getVelocityVector(self, position: np.ndarray) -> np.ndarray:
"""
Gets the velocity vector.
Args:
position (np.ndarray): Current position of the robot.
Returns:
np.ndarray: Velocity vector."""
self.getTrackingPointIdx(position)
self.target_position, target_angle = self.getPointForTracking()
velocity_vector = self.computeVelocityVector(self.target_position, position)
return velocity_vector, target_angle
class VelocityTracker(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
target_tracking_velocity: float = 0.25,
lookahead_dist: float = 0.15,
closed: bool = True,
x_offset: float = 0,
y_offset: float = 0,
radius: float = 1.5,
height: float = 1.5,
start_radius: float = 0.5,
end_radius: float = 2.0,
num_loops: int = 4,
trajectory_type: str = "circle",
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
target_tracking_velocity (float, optional): Target tracking velocity. Defaults to 0.25.
lookahead_dist (float, optional): Lookahead distance. Defaults to 0.15.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to True.
x_offset (float, optional): x offset of the trajectory. Defaults to 0.
y_offset (float, optional): y offset of the trajectory. Defaults to 0.
radius (float, optional): Radius of the trajectory. Defaults to 1.5.
height (float, optional): Height of the trajectory. Defaults to 1.5.
start_radius (float, optional): Start radius of the trajectory. Defaults to 0.5.
end_radius (float, optional): End radius of the trajectory. Defaults to 2.0.
num_loops (int, optional): Number of loops. Defaults to 4.
trajectory_type (str, optional): Type of trajectory. Defaults to "circle".
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.tracker = TrajectoryTracker(
lookahead=lookahead_dist, closed=closed, offset=(x_offset, y_offset)
)
if trajectory_type.lower() == "square":
self.tracker.generateSquare(h=height)
elif trajectory_type.lower() == "circle":
self.tracker.generateCircle(radius=radius)
elif trajectory_type.lower() == "spiral":
self.tracker.generateSpiral(
start_radius=start_radius, end_radius=end_radius, num_loop=num_loops
)
elif trajectory_type.lower() == "infinite":
self.tracker.generateInfinite(a=radius)
else:
raise ValueError(
"Unknown trajectory type. Must be square, circle or spiral."
)
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.velocity_goal = [0, 0, 0]
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time: float = None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["velocity_goal"].append(self.velocity_goal[:2])
self.logs["position_target"].append(self.getTargetPosition())
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.velocity_goal
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.target_tracking_velocity = goal
def getTargetPosition(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.tracker.get_target_position()
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return self.tracker.is_done
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
self.model.setTarget(target_linear_velocity=self.velocity_goal, mode=2)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
"""
self.velocity_vector, _ = self.tracker.getVelocityVector(state["position"][:2])
self.velocity_goal[0] = self.velocity_vector[0] * self.target_tracking_velocity
self.velocity_goal[1] = self.velocity_vector[1] * self.target_tracking_velocity
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 135, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 135.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
linear_velocity = np.array(self.logs["linear_velocity"])
target_velocity = np.array(self.logs["velocity_goal"])
ax.plot(
self.logs["timevals"],
linear_velocity[:, 0],
label="x linear velocity",
)
ax.plot(
self.logs["timevals"],
linear_velocity[:, 1],
label="y linear velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 0],
label="x target velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 1],
label="y target velocity",
)
ax.legend()
ax.set_xlabel("time (seconds)")
ax.set_ylabel("Linear velocities (m/s)")
_ = ax.set_title("Linear velocity tracking")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="trajectory",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class VelocityHeadingTracker(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
target_tracking_velocity: float = 0.25,
lookahead_dist: float = 0.15,
closed: bool = True,
x_offset: float = 0,
y_offset: float = 0,
radius: float = 1.5,
height: float = 1.5,
start_radius: float = 0.5,
end_radius: float = 2.0,
num_loops: int = 4,
trajectory_type: str = "circle",
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
target_tracking_velocity (float, optional): Target tracking velocity. Defaults to 0.25.
lookahead_dist (float, optional): Lookahead distance. Defaults to 0.15.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to True.
x_offset (float, optional): x offset of the trajectory. Defaults to 0.
y_offset (float, optional): y offset of the trajectory. Defaults to 0.
radius (float, optional): Radius of the trajectory. Defaults to 1.5.
height (float, optional): Height of the trajectory. Defaults to 1.5.
start_radius (float, optional): Start radius of the trajectory. Defaults to 0.5.
end_radius (float, optional): End radius of the trajectory. Defaults to 2.0.
num_loops (int, optional): Number of loops. Defaults to 4.
trajectory_type (str, optional): Type of trajectory. Defaults to "circle".
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.tracker = TrajectoryTracker(
lookahead=lookahead_dist, closed=closed, offset=(x_offset, y_offset)
)
if trajectory_type.lower() == "square":
self.tracker.generateSquare(h=height)
elif trajectory_type.lower() == "circle":
self.tracker.generateCircle(radius=radius)
elif trajectory_type.lower() == "spiral":
self.tracker.generateSpiral(
start_radius=start_radius, end_radius=end_radius, num_loop=num_loops
)
elif trajectory_type.lower() == "infinite":
self.tracker.generateInfinite(a=radius)
else:
raise ValueError(
"Unknown trajectory type. Must be square, circle or spiral."
)
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.velocity_goal = [0, 0, 0]
self.target_heading = [0]
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["heading_target"] = []
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time=None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["velocity_goal"].append(self.velocity_goal[:2])
self.logs["heading_target"].append(self.target_heading[0])
self.logs["position_target"].append(self.getTargetPosition())
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.velocity_goal + self.target_heading
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.target_tracking_velocity = goal[:3]
self.target_heading = goal[3]
def getTargetPosition(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.tracker.get_target_position()
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return self.tracker.is_done
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
yaw = self.target_heading[0]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_linear_velocity=self.velocity_goal, target_heading=orientation_goal, mode=4
)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
"""
self.velocity_vector, target_heading = self.tracker.getVelocityVector(
state["position"][:2]
)
self.velocity_goal[0] = self.velocity_vector[0] * self.target_tracking_velocity
self.velocity_goal[1] = self.velocity_vector[1] * self.target_tracking_velocity
self.target_heading[0] = target_heading
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 135, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 135.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
linear_velocity = np.array(self.logs["linear_velocity"])
target_velocity = np.array(self.logs["velocity_goal"])
ax.plot(
self.logs["timevals"],
linear_velocity[:, 0],
label="x linear velocity",
)
ax.plot(
self.logs["timevals"],
linear_velocity[:, 1],
label="y linear velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 0],
label="x target velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 1],
label="y target velocity",
)
ax.legend()
ax.set_xlabel("time (seconds)")
ax.set_ylabel("Linear velocities (m/s)")
_ = ax.set_title("Linear velocity tracking")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="trajectory",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
orientations = np.array(self.logs["quaternion"])
positions = np.array(self.logs["position"])
target_positions = np.array(self.logs["position_target"])
v = 2 * (
orientations[:, 0] * orientations[:, 3]
+ orientations[:, 1] * orientations[:, 2]
)
u = 1 - 2 * (
orientations[:, 2] * orientations[:, 2]
+ orientations[:, 3] * orientations[:, 3]
)
target_headings = np.array(self.logs["heading_target"])
u_target = np.cos(target_headings)
v_target = np.sin(target_headings)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.quiver(
target_positions[:, 0],
target_positions[:, 1],
u_target,
v_target,
label="reference_trajectory",
color="r",
)
ax.quiver(
positions[:, 0],
positions[:, 1],
u,
v,
label="system trajectory",
color="b",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class HLControllerFactory:
"""
Factory for high-level controllers."""
def __init__(self):
self.registered_controllers = {}
def registerController(
self,
name: str,
controller: Union[PositionController, PoseController, VelocityTracker],
):
"""
Registers a controller.
Args:
name (str): Name of the controller.
controller (Union[PositionController, PoseController, VelocityTracker]): Controller class.
"""
self.registered_controllers[name] = controller
def parseControllerConfiguration(self, cfg: Dict):
"""
Parses the controller configuration.
Args:
cfg (Dict): Configuration dictionary."""
return cfg["hl_task"], cfg["hl_task"]["name"]
def __call__(
self, cfg: Dict, model: Union[RLGamesModel, DiscreteController], dt: float
):
"""
Creates a controller.
Args:
cfg (Dict): Configuration dictionary.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
dt (float): Simulation time step."""
new_cfg, mode = self.parseControllerConfiguration(cfg)
assert mode in list(self.registered_controllers.keys()), "Unknown hl_task mode."
return self.registered_controllers[mode](dt, model, **new_cfg)
"""
Register the controllers."""
hlControllerFactory = HLControllerFactory()
hlControllerFactory.registerController("position", PositionController)
hlControllerFactory.registerController("pose", PoseController)
hlControllerFactory.registerController("dock", DockController)
hlControllerFactory.registerController("linear_velocity", VelocityTracker)
hlControllerFactory.registerController(
"linear_velocity_heading", VelocityHeadingTracker
) | 51,795 | Python | 33.692565 | 119 | 0.561 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/environments/disturbances.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict, Tuple
import numpy as np
import math
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
class RandomSpawn:
"""
Randomly spawns the robot in the environment."""
def __init__(self, cfg: Dict[str, float]) -> None:
"""
Initialize the random spawn strategy.
Args:
cfg (dict): A dictionary containing the configuration of the random spawn disturbance.
"""
self._rng = np.random.default_rng(seed=cfg["seed"])
self._max_spawn_dist = cfg["max_spawn_dist"]
self._min_spawn_dist = cfg["min_spawn_dist"]
self._kill_dist = cfg["kill_dist"]
def getInitialCondition(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Generates a random initial condition for the robot.
Returns:
Tuple[np.ndarray, np.ndarray]: A tuple containing the initial position and orientation of the robot.
"""
theta = self._rng.uniform(-np.pi, np.pi, 1)
r = self._rng.uniform(self._min_spawn_dist, self._max_spawn_dist)
initial_position = [np.cos(theta) * r, np.sin(theta) * r]
heading = self._rng.uniform(-np.pi, np.pi, 1)
initial_orientation = [np.cos(heading * 0.5), 0, 0, np.sin(heading * 0.5)]
return initial_position, initial_orientation
class RandomKillThrusters:
"""
Randomly kills thrusters."""
def __init__(self, cfg: Dict[str, float]) -> None:
"""
Initialize the random kill thrusters strategy.
Args:
cfg (dict): A dictionary containing the configuration of the random kill thrusters disturbance.
"""
self._rng = np.random.default_rng(seed=42) # cfg["seed"])
self._num_thrusters_to_kill = cfg["num_thrusters_to_kill"]
self.killed_thrusters_id = []
self.killed_mask = np.ones([8])
def generate_thruster_kills(self) -> None:
"""
Generates the thrusters to kill."""
self.killed_thrusters_id = self._rng.choice(
8, self._num_thrusters_to_kill, replace=False
)
class MassDistributionDisturbances:
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: MassDistributionDisturbanceParameters,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.mass_sampler = CurriculumSampler(parameters.mass_curriculum)
self.CoM_sampler = CurriculumSampler(parameters.com_curriculum)
self.parameters = parameters
self.platforms_mass = 5.32
self.platforms_CoM = np.zeros((2), dtype=np.float32)
def randomize_masses(self, step: int = 100000) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
self.platforms_mass = self.mass_sampler.sample(1, step).numpy()[0]
r = self.CoM_sampler.sample(1, step).numpy()[0]
theta = self.rand.uniform((1), dtype=np.float32) * math.pi * 2
self.platforms_CoM[0] = np.cos(theta) * r
self.platforms_CoM[1] = np.sin(theta) * r
def get_masses(self) -> Tuple[float, np.ndarray]:
"""
Returns the masses and CoM of the platforms.
Returns:
Tuple(float, np.ndarray): The masses and CoM of the platforms.
"""
return (self.platforms_mass, self.platforms_CoM)
class ForceDisturbance:
"""
Creates disturbances by applying random forces.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: ForceDisturbanceParameters,
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.parameters = parameters
self.force_sampler = CurriculumSampler(self.parameters.force_curriculum)
self.forces = np.zeros(3, dtype=np.float32)
self.max_forces = 0
self._floor_x_freq = 0
self._floor_y_freq = 0
self._floor_x_offset = 0
self._floor_y_offset = 0
def generate_forces(self, step: int = 100000) -> None:
"""
Generates the forces using a sinusoidal pattern or not.
Args:
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_y_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_x_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._floor_y_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._max_forces = self.force_sampler.sample(1, step).numpy()[0]
else:
r = self.force_sampler.sample(1, step).numpy()[0]
theta = self.rng.uniform(0, 1, 1) * math.pi * 2
self.forces[0] = np.cos(theta) * r
self.forces[1] = np.sin(theta) * r
def get_floor_forces(self, root_pos: np.ndarray) -> np.ndarray:
"""
Computes the forces given the current state of the robot.
Args:
root_pos (np.ndarray): The position of the root of the robot.
Returns:
np.ndarray: The floor forces.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[0] = (
np.sin(root_pos[0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[1] = (
np.sin(root_pos[1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
return self.forces
class TorqueDisturbance:
"""
Creates disturbances by applying a torque to its center.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: TorqueDisturbanceParameters,
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.parameters = parameters
self.torque_sampler = CurriculumSampler(self.parameters.torque_curriculum)
self.torques = np.zeros(3, dtype=np.float32)
self.max_torques = 0
self._freq = 0
self._offset = 0
def generate_torques(self, step: int = 100000) -> None:
"""
Generates the torques using a sinusoidal pattern or not.
Args:
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_x_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._max_torques = self.torque_sampler.sample(1, step).numpy()[0]
else:
r = self.torque_sampler.sample(1, step).numpy()[0]
self.torques[2] = r
def get_torque_disturbance(self, root_pos: np.ndarray) -> np.ndarray:
"""
Computes the torques given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque disturbance."""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 2] = (
np.sin(root_pos * self._freq + self._offset) * self._max_torques
)
return self.torques
class NoisyObservations:
"""
Adds noise to the observations of the robot.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: NoisyObservationsParameters,
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.rng = rng
self.position_sampler = CurriculumSampler(parameters.position_curriculum)
self.velocity_sampler = CurriculumSampler(parameters.velocity_curriculum)
self.orientation_sampler = CurriculumSampler(parameters.orientation_curriculum)
self.parameters = parameters
def add_noise_on_pos(self, pos: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the position of the robot.
Args:
pos (np.ndarray): The position of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The position of the robot with noise.
"""
if self.parameters.enable_position_noise:
pos += self.position_sampler.sample(1, step).numpy()[0]
return pos
def add_noise_on_vel(self, vel: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the velocity of the robot.
Args:
vel (np.ndarray): The velocity of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The velocity of the robot with noise.
"""
if self.parameters.enable_velocity_noise:
vel += self.velocity_sampler.sample(1, step).numpy()[0]
return vel
def add_noise_on_heading(self, heading: np.ndarray, step: int = 0) -> np.ndarray:
"""
Adds noise to the heading of the robot.
Args:
heading (np.ndarray): The heading of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The heading of the robot with noise.
"""
if self.parameters.enable_orientation_noise:
heading += self.orientation_sampler.sample(1, step).numpy()[0]
return heading
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
rng: np.random.default_rng,
parameters: NoisyActionsParameters,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
"""
self.rng = rng
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
self.parameters = parameters
def add_noise_on_act(self, act: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the actions of the robot.
Args:
act (np.ndarray): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The actions of the robot with noise.
"""
if self.parameters.enable:
act += self.action_sampler.sample(1, step).numpy()[0]
return act
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(self, parameters: dict, seed: int = 42) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
"""
self.rng = np.random.default_rng(seed=seed)
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.rng,
self.parameters.mass_disturbance,
)
self.force_disturbances = ForceDisturbance(
self.rng,
self.parameters.force_disturbance,
)
self.torque_disturbances = TorqueDisturbance(
self.rng,
self.parameters.torque_disturbance,
)
self.noisy_observations = NoisyObservations(
self.rng,
self.parameters.observations_disturbance,
)
self.noisy_actions = NoisyActions(
self.rng,
self.parameters.actions_disturbance,
)
| 13,434 | Python | 30.912114 | 112 | 0.585901 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/environments/__init__.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
| 290 | Python | 28.099997 | 79 | 0.648276 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/environments/mujoco_base_env.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict, Union, List, Tuple
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import mujoco
import math
import os
from omniisaacgymenvs.mujoco_envs.environments.disturbances import (
Disturbances,
RandomKillThrusters,
RandomSpawn,
)
def parseEnvironmentConfig(
cfg: Dict[str, Union[float, int, Dict]]
) -> Dict[str, Union[float, int, Dict]]:
"""
Parses the environment configuration from the config file.
Args:
cfg (Dict[str, Union[float, int, Dict]]): The configuration dictionary.
Returns:
Dict[str, Union[float, int, Dict]]: The parsed configuration dictionary."""
new_cfg = {}
new_cfg["disturbances"] = cfg["task"]["env"]["disturbances"]
new_cfg["spawn_parameters"] = {}
new_cfg["spawn_parameters"]["seed"] = cfg["seed"]
try:
new_cfg["spawn_parameters"]["max_spawn_dist"] = cfg["task"]["env"][
"task_parameters"
]["max_spawn_dist"]
except:
new_cfg["spawn_parameters"]["max_spawn_dist"] = 5.0
try:
new_cfg["spawn_parameters"]["min_spawn_dist"] = cfg["task"]["env"][
"task_parameters"
]["min_spawn_dist"]
except:
new_cfg["spawn_parameters"]["min_spawn_dist"] = 5.0
new_cfg["spawn_parameters"]["kill_dist"] = cfg["task"]["env"]["task_parameters"][
"kill_dist"
]
new_cfg["step_time"] = cfg["task"]["sim"]["dt"]
new_cfg["duration"] = (
cfg["task"]["env"]["maxEpisodeLength"] * cfg["task"]["sim"]["dt"] * cfg["task"]["env"]["controlFrequencyInv"]
)
new_cfg["inv_play_rate"] = cfg["task"]["env"]["controlFrequencyInv"]
new_cfg["platform"] = cfg["task"]["env"]["platform"]
new_cfg["platform"]["seed"] = cfg["seed"]
new_cfg["run_batch"] = cfg["hl_task"]["run_batch"]
new_cfg["max_episode_length"] = cfg["task"]["env"]["maxEpisodeLength"]
return new_cfg
class MuJoCoFloatingPlatform:
"""
A class for the MuJoCo Floating Platform environment."""
def __init__(
self,
step_time: float = 0.02,
duration: float = 60.0,
inv_play_rate: int = 10,
spawn_parameters: Dict[str, float] = None,
platform: Dict[str, Union[bool, dict, float, str, int]] = None,
disturbances: Dict[str, Union[bool, float]] = None,
run_batch: int = 1,
max_episode_length: int = 500,
**kwargs
) -> None:
"""
Initializes the MuJoCo Floating Platform environment.
Args:
step_time (float, optional): The time between steps in the simulation (seconds). Defaults to 0.02.
duration (float, optional): The duration of the simulation (seconds). Defaults to 60.0.
inv_play_rate (int, optional): The inverse of the play rate. Defaults to 10.
spawn_parameters (Dict[str, float], optional): A dictionary containing the spawn parameters. Defaults to None.
platform (Dict[str, Union[bool,dict,float,str,int]], optional): A dictionary containing the platform parameters. Defaults to None.
disturbances (Dict[str, Union[bool, float]], optional): A dictionary containing the disturbances parameters. Defaults to None.
**kwargs: Additional arguments."""
self.inv_play_rate = inv_play_rate
self.platform = platform
self.run_batch = run_batch
self.max_episode_length = max_episode_length
self.DR = Disturbances(disturbances, platform["seed"])
self.TK = RandomKillThrusters(
{
"num_thrusters_to_kill": platform["randomization"]["max_thruster_kill"]
* platform["randomization"]["kill_thrusters"],
"seed": platform["seed"],
}
)
self.RS = RandomSpawn(spawn_parameters)
self.createModel()
self.initializeModel()
self.setupPhysics(step_time, duration)
self.initForceAnchors()
self.reset()
self.csv_datas = []
def reset(
self,
initial_position: List[float] = [0, 0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
) -> None:
"""
Resets the simulation.
Args:
initial_position (list, optional): The initial position of the body. Defaults to [0,0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
self.initializeModel()
self.resetPosition(
initial_position=initial_position, initial_orientation=initial_orientation
)
self.DR.force_disturbances.generate_forces()
self.DR.torque_disturbances.generate_torques()
self.TK.generate_thruster_kills()
def initializeModel(self) -> None:
"""
Initializes the mujoco model for the simulation."""
self.data = mujoco.MjData(self.model)
mujoco.mj_forward(self.model, self.data)
self.body_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "top")
def setupPhysics(self, step_time: float, duration: float) -> None:
"""
Sets up the physics parameters for the simulation.
Args:
step_time (float): The time between steps in the simulation (seconds).
duration (float): The duration of the simulation (seconds)."""
self.model.opt.timestep = step_time
self.model.opt.gravity = [0, 0, 0]
self.duration = duration
def createModel(self) -> None:
"""
A YAML style string that defines the MuJoCo model for the simulation.
The mass is set to 5.32 kg, the radius is set to 0.31 m.
The initial position is set to (3, 3, 0.4) m."""
self.radius = self.platform["core"]["radius"]
self.mass = self.platform["core"]["mass"]
sphere_p1 = """
<mujoco model="tippe top">
<option integrator="RK4"/>
<asset>
<texture name="grid" type="2d" builtin="checker" rgb1=".1 .2 .3"
rgb2=".2 .3 .4" width="300" height="300"/>
<material name="grid" texture="grid" texrepeat="8 8" reflectance=".2"/>
</asset>
<worldbody>
<geom size="10.0 10.0 .01" type="plane" material="grid"/>
<light pos="0 0 10.0"/>
<camera name="closeup" pos="0 -3 2" xyaxes="1 0 0 0 1 2"/>
<body name="top" pos="0 0 .4">
<freejoint/>
"""
sphere_p2 = (
'<geom name="ball" type="sphere" size="'
+ str(self.radius)
+ '" mass="'
+ str(self.mass)
+ '"/>'
)
sphere_p3 = """
</body>
</worldbody>
<keyframe>
<key name="idle" qpos="3 3 0.4 1 0 0 0" qvel="0 0 0 0 0 0" />
</keyframe>
</mujoco>
"""
sphere = "\n".join([sphere_p1, sphere_p2, sphere_p3])
self.model = mujoco.MjModel.from_xml_string(sphere)
def initForceAnchors(self) -> None:
""" "
Defines where the forces are applied relatively to the center of mass of the body.
self.forces: 8x3 array of forces, indicating the direction of the force.
self.positions: 8x3 array of positions, indicating the position of the force."""
self.max_thrust = self.platform["configuration"]["thrust_force"]
self.forces = np.array(
[
[1, -1, 0],
[-1, 1, 0],
[1, 1, 0],
[-1, -1, 0],
[-1, 1, 0],
[1, -1, 0],
[-1, -1, 0],
[1, 1, 0],
]
)
# Normalize the forces.
self.forces = self.forces / np.linalg.norm(self.forces, axis=1).reshape(-1, 1)
# Multiply by the max thrust.
self.forces = self.forces * self.max_thrust
self.positions = (
np.array(
[
[1, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[-1, 1, 0],
[-1, -1, 0],
[-1, -1, 0],
[1, -1, 0],
[1, -1, 0],
]
)
* 0.2192
)
def resetPosition(
self,
initial_position: List[float] = [0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
) -> None:
"""
Resets the position of the body and sets its velocity to 0.
Resets the timer as well.
Args:
initial_position (list, optional): The initial position of the body. Defaults to [0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
mujoco.mj_resetDataKeyframe(self.model, self.data, 0)
self.data.qpos[:2] = initial_position[:2]
self.data.qpos[3:7] = initial_orientation
self.data.qvel = 0
def applyForces(self, action: np.ndarray) -> None:
"""
Applies the forces to the body.
Args:
action (np.ndarray): The actions to apply to the body."""
self.data.qfrc_applied[...] = 0 # Clear applied forces.
rmat = self.data.xmat[self.body_id].reshape(3, 3) # Rotation matrix.
p = self.data.xpos[self.body_id] # Position of the body.
# Compute the number of thrusters fired, split the pressure between the nozzles.
factor = max(np.sum(action), 1)
# For each thruster, apply a force if needed.
for i in range(8):
if (
self.TK.killed_thrusters_id is not None
and i in self.TK.killed_thrusters_id
):
continue
# The force applied is the action value (1 or 0), divided by the number of thrusters fired (factor),
force = self.DR.noisy_actions.add_noise_on_act(action[i])
force = force * (1.0 / factor) * self.forces[i]
# If the force is not zero, apply the force.
if np.sum(np.abs(force)) > 0:
force = np.matmul(rmat, force) # Rotate the force to the global frame.
p2 = (
np.matmul(rmat, self.positions[i]) + p
) # Compute the position of the force.
mujoco.mj_applyFT(
self.model,
self.data,
force,
[0, 0, 0],
p2,
self.body_id,
self.data.qfrc_applied,
) # Apply the force.
uf_forces = self.DR.force_disturbances.get_floor_forces(self.data.qpos[:2])
td_forces = self.DR.torque_disturbances.get_torque_disturbance(
self.data.qpos[:2]
)
mujoco.mj_applyFT(
self.model,
self.data,
uf_forces,
td_forces,
self.data.qpos[:3],
self.body_id,
self.data.qfrc_applied,
) # Apply the force.
def getObs(self) -> Dict[str, np.ndarray]:
"""
returns an up to date observation buffer.
Returns:
Dict[str, np.ndarray]: A dictionary containing the state of the simulation.
"""
state = {}
state["angular_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.data.qvel[3:6].copy()
)
state["linear_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.data.qvel[0:3].copy()
)
state["position"] = self.DR.noisy_observations.add_noise_on_pos(
self.data.qpos[0:3].copy()
)
state["quaternion"] = self.data.qpos[3:].copy()
return state
def runLoop(
self,
model,
initial_position: List[float] = [0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
):
"""
Runs the simulation loop.
Args:
model (object): The model of the controller.
initial_position (list, optional): The initial position of the body. Defaults to [0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
print(self.run_batch)
if self.run_batch > 1:
self.run_batch_evaluation(model)
else:
self.run_single_evaluation(model, initial_position, initial_orientation)
def run_single_evaluation(self, model, initial_position, initial_orientation):
self.reset(
initial_position=initial_position, initial_orientation=initial_orientation
)
done = False
while (self.duration > self.data.time) and (not done):
state = self.getObs() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
done = model.isDone()
model.saveSimulationData()
model.plotSimulation()
def run_batch_evaluation(self, model):
"""
Runs the simulation loop.
Args:
model (object): The model of the controller.
"""
print("Running the simulations.")
for i in range(self.run_batch):
# Runs the simulation
print("Running simulation " + str(i) + " of " + str(self.run_batch) + ".")
initial_position, initial_orientation = self.RS.getInitialCondition()
self.reset(
initial_position=initial_position,
initial_orientation=initial_orientation,
)
model.initializeLoggers()
step = 0
while self.max_episode_length > step:
state = self.getObs() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state, mute=True)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
step += 1
# Saves the simulation data
model.saveSimulationData(suffix=str(i))
model.plotBatch()
| 14,827 | Python | 34.389021 | 142 | 0.547177 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/position_controller_RL.py | from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoPositionControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target)
def applyFriction(self, fdyn=0.1, fstat=0.1, tdyn=0.05, tstat=0.0):
lin_vel = self.data.qvel[:3]
lin_vel_norm = np.linalg.norm(lin_vel)
ang_vel = self.data.qvel[-1]
forces = self.data.qfrc_applied[:3]
forces_norm = np.linalg.norm(forces)
torques = self.data.qfrc_applied[3:]
torques_norm = np.linalg.norm(torques)
#if (forces_norm > fstat) or (torques_norm > tstat):
if lin_vel_norm > 0.001:
lin_vel_normed = np.array(lin_vel) / lin_vel_norm
force = -lin_vel_normed * fdyn
force[-1] = 0
mujoco.mj_applyFT(self.model, self.data, list(force), [0,0,0], self.data.qpos[:3], self.body_id, self.data.qfrc_applied)
if ang_vel > 0.001:
torque = - np.sign(ang_vel) * tdyn
mujoco.mj_applyFT(self.model, self.data, [0,0,0], [0,0,torque], self.data.qpos[:3], self.body_id, self.data.qfrc_applied)
#else:
# self.data.qfrc_applied[:3] = 0
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class PositionController:
def __init__(self, model: RLGamesModel, goal_x: List[float], goal_y: List[float], distance_threshold: float = 0.03) -> None:
self.model = model
self.goals = np.array([goal_x, goal_y]).T
self.current_goal = self.goals[0]
self.distance_threshold = distance_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state):
dist = np.linalg.norm(self.current_goal - state["position"])
if dist < self.distance_threshold:
return True
def getGoal(self):
return self.current_goal
def setGoal(self, goal):
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self):
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 0
self.obs_state[0,6:8] = torch.tensor(self.current_goal - state["position"], dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic: bool = True):
if self.isGoalReached(state):
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.makeObservationBuffer(state)
return self.model.getAction(self.obs_state, is_deterministic=is_deterministic)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform. In world frame, meters.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform. In world frame, meters.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PositionController(model, args.goal_x, args.goal_y)
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir) | 10,188 | Python | 50.459596 | 318 | 0.642226 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/pose_controller_DC.py | from typing import Callable, NamedTuple, Optional, Union, List, Dict
from scipy.linalg import solve_discrete_are
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import numpy as np
import argparse
import scipy.io
import mujoco
import torch
import os
import cvxpy as cp
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
class MuJoCoPoseControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target[:2])
self.logs["heading_target"].append(target[-1])
def updateState(self) -> Dict[str, np.ndarray]:
"""
Updates the loggers with the current state of the simulation."""
state = {}
state["angular_velocity"] = self.ON.add_noise_on_vel(self.data.qvel[3:6].copy())
state["linear_velocity"] = self.ON.add_noise_on_vel(self.data.qvel[0:3].copy())
state["position"] = self.ON.add_noise_on_pos(self.data.qpos[0:3].copy())
state["quaternion"] = self.data.qpos[3:].copy()
return state
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(self.logs["timevals"], np.array(self.logs["actions"]), label="system action")
plt.tight_layout()
if save:
#try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "actions.png"))
#except Exception as e:
#print("Saving failed: ", e)
class DiscreteController:
"""
Discrete pose controller for the Floating Platform."""
def __init__(self, target_position: List[float], target_orientation: List[float], thruster_count:int=8, dt:float=0.02, Mod:MuJoCoFloatingPlatform=None, control_type = 'LQR') -> None:
self.target_position = np.array(target_position)
self.target_orientation = np.array(target_orientation)
self.thruster_count = thruster_count
self.thrusters = np.zeros(thruster_count) # Initialize all thrusters to off
self.dt = dt
self.FP = Mod
self.control_type = control_type
self.opti_states = None
# control parameters
self.Q = np.diag([1,1,5,5,1,1,1]) # State cost matrix
self.R = np.diag([0.01] * self.thruster_count) # Control cost matrix
self.W = np.diag([0.1] * 7) # Disturbance weight matrix
self.find_gains()
def find_gains(self,r0=None):
# Compute linearized system matrices A and B based on your system dynamics
self.A, self.B = self.compute_linearized_system(r0) # Compute linearized system matrices
self.make_planar_compatible()
if self.control_type == 'H-inf':
self.compute_hinfinity_gains()
elif self.control_type == 'LQR':
self.compute_lqr_gains()
else:
raise ValueError("Invalid control type specified.")
def compute_lqr_gains(self):
self.P = solve_discrete_are(self.A, self.B, self.Q, self.R)
self.L = np.linalg.inv(self.R + self.B.T @ self.P @ self.B) @ self.B.T @ self.P @ self.A
def compute_hinfinity_gains(self):
X = cp.Variable((self.A.shape[0], self.A.shape[0]), symmetric=True)
gamma = cp.Parameter(nonneg=True) # Define gamma as a parameter
regularization_param = 1e-6
# Regularize matrix using the pseudo-inverse
A_regularized = self.A @ np.linalg.inv(self.A.T @ self.A + regularization_param * np.eye(self.A.shape[1]))
B_regularized = self.B @ np.linalg.inv(self.B.T @ self.B + regularization_param * np.eye(self.B.shape[1]))
# Define the constraints using regularized matrices
constraints = [X >> np.eye(A_regularized.shape[1])] # X >= 0
# Define a relaxation factor
relaxation_factor = 1 # Adjust this value based on your experimentation
# Linear matrix inequality constraint with relaxation
constraints += [cp.bmat([[A_regularized.T @ X @ A_regularized - X + self.Q, A_regularized.T @ X @ B_regularized],
[B_regularized.T @ X @ A_regularized, B_regularized.T @ X @ B_regularized - (gamma**2) * relaxation_factor * np.eye(B_regularized.shape[1])]]) << 0]
objective = cp.Minimize(gamma)
prob = cp.Problem(objective, constraints)
# Set the value of the parameter gamma
gamma.value = 1.0 # You can set the initial value based on your problem
prob.solve()
if prob.status == cp.OPTIMAL:
self.L = np.linalg.inv(self.B.T @ X.value @ self.B + gamma.value**2 * np.eye(self.B.shape[1])) @ self.B.T @ X.value @ self.A
breakpoint()
else:
raise Exception("H-infinity control design failed.")
def set_target(self, target_position: List[float], target_orientation: List[float]) -> None:
"""
Sets the target position and orientation."""
self.target_position = np.array(target_position)
self.target_orientation = np.array(target_orientation)
def compute_linearized_system(self, r0=None) -> None:
"""
Compute linearized system matrices A and B.
With A the state transition matrix.
With B the control input matrix."""
if r0 is None:
r0 = np.concatenate((self.FP.data.qpos[:3],self.FP.data.qvel[:3], self.FP.data.qpos[3:], self.FP.data.qvel[3:]),axis =None)
t_int = 0.2 # time-interval at 5Hz
A = self.f_STM(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id)
#Aan = self.f_STM(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id)
B = self.f_B(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id,self.thruster_count)
return A, B
def make_planar_compatible(self) -> None:
"""
Remove elements of the STM to make it planar compatible.
Required states #[x,y,vx,vy,qw,qz,wz]."""
a = self.A
b = self.B
a = np.delete(a, 11, axis=0) # Remove row: wy
a = np.delete(a, 10, axis=0) # Remove row: wx
a = np.delete(a, 8, axis=0) # Remove row: qy
a = np.delete(a, 7, axis=0) # Remove row: qz
a = np.delete(a, 5, axis=0) # Remove row: vz
a = np.delete(a, 2, axis=0) # Remove row: z
a = np.delete(a, 11, axis=1) # Remove col: wy
a = np.delete(a, 10, axis=1) # Remove col: wx
a = np.delete(a, 8, axis=1) # Remove col: qy
a = np.delete(a, 7, axis=1) # Remove col: qz
a = np.delete(a, 5, axis=1) # Remove col: vz
a = np.delete(a, 2, axis=1) # Remove col: z
b = np.delete(b, 11, axis=0) # Remove row: wy
b = np.delete(b, 10, axis=0) # Remove row: wx
b = np.delete(b, 8, axis=0) # Remove row: qy
b = np.delete(b, 7, axis=0) # Remove row: qz
b = np.delete(b, 5, axis=0) # Remove row: vz
b = np.delete(b, 2, axis=0) # Remove row: z
b[b == 0] = 1e-4
self.A = a
self.B = b
return None
def f_STM(self, r0:np.ndarray, t_int: float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing."""
IC_temp0 = r0
force = [0.0,0.0,0.0]
torque = [0.0,0.0,0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
current_time = data.time
for k in range(np.size(r0)):
delta = max(1e-3,IC_temp0[k]/100)
delta_vec = np.zeros(np.size(r0))
delta_vec[k] = delta
IC_temp_pos = np.add(IC_temp0,delta_vec)
IC_temp_neg = np.subtract(IC_temp0,delta_vec)
# Positive direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_pos[0:3]
data.qvel[:3] = IC_temp_pos[3:6]
data.qpos[3:] = IC_temp_pos[6:10]
data.qvel[3:] = IC_temp_pos[10:13]
mujoco.mj_applyFT(model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied)
mujoco.mj_step(model, data)
ans_pos = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
#print('final_time', data.time)
# Negative direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_neg[0:3]
data.qvel[:3] = IC_temp_neg[3:6]
data.qpos[3:] = IC_temp_neg[6:10]
data.qvel[3:] = IC_temp_neg[10:13]
mujoco.mj_applyFT(model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied)
mujoco.mj_step(model, data)
ans_neg = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
#print('final_time', data.time)
if k==0:
STM = np.subtract(ans_pos,ans_neg)/(2*delta)
else :
temp = np.subtract(ans_pos,ans_neg)/(2*delta)
STM = np.vstack((STM,temp))
STM = STM.transpose()
STM[6,6] = 1.0
data.time = current_time
model.opt.timestep = default_tstep
return STM
def f_STM_analytical(self, r0:np.ndarray, t_int:float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing."""
IC_temp0 = r0
STM = np.eye(np.size(r0))
w1 = IC_temp0[10]
w2 = IC_temp0[11]
w3 = IC_temp0[12]
qw = IC_temp0[6]
qx = IC_temp0[7]
qy = IC_temp0[8]
qz = IC_temp0[9]
STM[0,3] = t_int
STM[1,4] = t_int
STM[2,5] = t_int
STM[6,6] = 1
STM[6,7] = -0.5*w1*t_int
STM[6,8] = -0.5*w2*t_int
STM[6,9] = -0.5*w3*t_int
STM[6,10] = -0.5*qx*t_int
STM[6,11] = -0.5*qy*t_int
STM[6,12] = -0.5*qz*t_int
STM[7,6] = 0.5*w1*t_int
STM[7,7] = 1
STM[7,8] = 0.5*w3*t_int
STM[7,9] = -0.5*w2*t_int
STM[7,10] = 0.5*qw*t_int
STM[7,11] = -0.5*qz*t_int
STM[7,12] = 0.5*qy*t_int
STM[8,6] = 0.5*w2*t_int
STM[8,7] = -0.5*w3*t_int
STM[8,8] = 1
STM[8,9] = 0.5*w1*t_int
STM[8,10] = 0.5*qz*t_int
STM[8,11] = 0.5*qw*t_int
STM[8,12] = -0.5*qx*t_int
STM[9,6] = 0.5*w3*t_int
STM[9,7] = -0.5*w2*t_int
STM[9,8] = -0.5*w1*t_int
STM[9,9] = 1
STM[9,10] = -0.5*qy*t_int
STM[9,11] = 0.5*qx*t_int
STM[9,12] = 0.5*qw*t_int
return STM
def f_B(self, r0: np.ndarray, t_int: float, model, data, body_id, number_thrust: int) -> None:
"""
Identify B matrix of linearized system through finite differencing."""
IC_temp0 = r0
force = [0.0,0.0,0.0]
torque = [0.0,0.0,0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
u = np.zeros(number_thrust)
current_time = data.time
#for k in range(np.size(u)):
for k in range(np.size(u)):
delta = 0.01
delta_vec = np.zeros(np.size(u))
delta_vec[k] = delta
# Positive direction
u_plus = np.add(u,delta_vec)
force_plus = u_plus[k] * self.FP.forces[k]# * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3,3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_plus = np.matmul(rmat, force_plus) # Rotate the force to the body frame.
p2 = np.matmul(rmat, self.FP.positions[k]) + p # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(model, data, force_plus, torque, p2, body_id, data.qfrc_applied) # Apply the force.
mujoco.mj_step(model, data)
ans_pos = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
# Negative direction
u_minus = np.subtract(u,delta_vec)
force_minus = u_minus[k] * self.FP.forces[k] * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3,3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_minus = np.matmul(rmat, force_minus) # Rotate the force to the body frame.
p2 = np.matmul(rmat, self.FP.positions[k]) + p # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(model, data, force_minus, torque, p2, body_id, data.qfrc_applied) # Apply the force.
mujoco.mj_step(model, data)
ans_neg = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
if k==0:
B = np.subtract(ans_pos,ans_neg)/(2*delta)
else :
temp = np.subtract(ans_pos,ans_neg)/(2*delta)
B = np.vstack((B,temp))
B = B.transpose()
model.opt.timestep = default_tstep
data.time = current_time
return B
def control_cost(self) -> np.ndarray:
# Cost function to be minimized for control input optimization
if self.control_type == 'H-inf':
control_input = np.array(self.L @ self.state) + self.disturbance
elif self.control_type == 'LQR':
self.find_gains(r0=self.opti_states)
control_input = np.array(self.L @ self.state)
else:
raise ValueError("Invalid control type specified.")
return control_input
def update(self, current_position: np.ndarray, current_orientation: np.ndarray, current_velocity: np.ndarray, current_angular_velocity:np.ndarray, disturbance:np.ndarray = None):
# Calculate errors
position_error = self.target_position - current_position
orientation_error = self.target_orientation - current_orientation
velocity_error = np.array([0.0, 0.0, 0.0]) - current_velocity
angvel_error = np.array([0.0, 0.0, 0.0]) - current_angular_velocity
self.opti_states = np.concatenate((current_position, current_velocity, current_orientation, current_angular_velocity), axis=None)
if disturbance == None:
disturbance = np.random.rand(8) * 0.000 # Example disturbance
self.disturbance = disturbance
# Combine errors into the state vector
self.state = np.array([position_error[0], position_error[1], velocity_error[0], velocity_error[1], orientation_error[0], orientation_error[3], angvel_error[2]])
# Optimal U
original_u = self.control_cost()
# filter to zero values of u that are less than 0.5
intermediate_u = np.where(np.abs(original_u) < .25, 0.0, original_u)
if np.max(intermediate_u) == 0.0:
normalized_array = np.zeros(self.thruster_count)
else:
normalized_array = (intermediate_u - np.min(intermediate_u)) / (np.max(intermediate_u) - np.min(intermediate_u))
# ROund the normalized array to the nearest integer biasing the center to 0.25
final_U = np.round(normalized_array - 0.25).astype(int)
# Round the normalized array to the nearest integer
self.thrusters = final_U
return self.thrusters
class PoseController:
"""
Controller for the pose of the robot."""
def __init__(self, model: DiscreteController, goal_x: List[float], goal_y: List[float], goal_theta: List[float], distance_threshold: float = 0.03) -> None:
# Discrete controller
self.model = model
# Creates an array goals
if goal_theta is None:
goal_theta = np.zeros_like(goal_x)
self.goals = np.array([goal_x, goal_y, goal_theta]).T
self.current_goal = self.goals[0]
self.current_goal_controller = np.zeros((3), dtype=np.float32)
self.current_goal_controller[:2] = self.current_goal[:2]
self.distance_threshold = distance_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.distance_threshold:
return True
def getGoal(self) -> np.ndarray:
return self.current_goal
def setGoal(self, goal:np.ndarray) -> None:
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
q = state["quaternion"]
siny_cosp = 2 * (q[0] * q[3] + q[1] * q[2])
cosy_cosp = 1 - 2 * (q[2] * q[2] + q[3] * q[3])
self.obs_state[0,:2] = torch.tensor([cosy_cosp, siny_cosp], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"][:2], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"][-1]
self.obs_state[0,5] = 1
self.obs_state[0,6:8] = torch.tensor(self.current_goal[:2] - state["position"][:2], dtype=torch.float32, device="cuda")
heading = np.arctan2(siny_cosp, cosy_cosp)
heading_error = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
self.obs_state[0,8] = torch.tensor(np.cos(heading_error), dtype=torch.float32, device="cuda")
self.obs_state[0,9] = torch.tensor(np.sin(heading_error), dtype=torch.float32, device="cuda")
def makeState4Controller(self, state: Dict[str, np.ndarray]) -> List[np.ndarray]:
self.makeObservationBuffer(state)
current_position = state["position"]
current_position[-1] = 0
current_orientation = state["quaternion"]
current_linear_velocity = state["linear_velocity"]
current_angular_velocity = state["angular_velocity"]
return current_position, current_orientation, current_linear_velocity, current_angular_velocity
def getAction(self, state: Dict[str, np.ndarray], **kwargs) -> np.ndarray:
if self.isGoalReached(state):
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1,:2]
self.current_goal_controller[:2] = self.current_goal
self.goals = self.goals[1:]
self.model.find_gains(r0=self.opti_states)
else:
self.goals = []
current_position, current_orientation, current_linear_velocity, current_angular_velocity = self.makeState4Controller(state)
self.model.set_target(self.current_goal_controller, [1,0,0,0])
return self.model.update(current_position, current_orientation, current_linear_velocity, current_angular_velocity)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_theta", type=float, nargs="+", default=None, help="List of headings for the goals to be reached by the platform. In world frame, radiants.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Creates the environment
print(1.0/args.sim_rate)
env = MuJoCoPoseControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Instantiates the Discrete Controller (DC)
model = DiscreteController([2.5,-1.5,0.],[1,0,0,0], Mod=env, control_type='LQR') # control type: 'H-inf' or 'LQR' | H-inf not stable at many locations
# Creates the velocity tracker
position_controller = PoseController(model, args.goal_x, args.goal_y, args.goal_theta)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir)
| 26,942 | Python | 45.373494 | 318 | 0.575644 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/linear_velocity_tracker_RL.py | from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoVelTracking(MuJoCoFloatingPlatform):
"""
The environment for the velocity tracking task inside Mujoco."""
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["position_target"] = []
def updateLoggers(self, goal: np.ndarray, target: np.ndarray) -> None:
"""
Updates the loggers."""
super().updateLoggers()
self.logs["velocity_goal"].append(goal)
self.logs["position_target"].append(target)
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal(), model.getTargetPosition())
def plotSimulation(self, dpi:int = 135, width:int = 1000, height:int = 1000, save:bool = False, save_dir:str = "velocity_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax.plot(self.logs["timevals"], self.logs["velocity_goal"], label="target velocities")
ax.legend()
ax.set_xlabel('time (seconds)')
ax.set_ylabel('Linear velocities (m/s)')
_ = ax.set_title('Linear velocity tracking')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir,"velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="trajectory")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('x (meters)')
ax.set_ylabel('y (meters)')
ax.axis("equal")
_ = ax.set_title('Trajectory in xy plane')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class TrajectoryTracker:
"""
A class to generate and track trajectories."""
def __init__(self, lookahead:float = 0.25, closed:bool = False, offset = (0,0)):
self.current_point = -1
self.lookhead = lookahead
self.closed = closed
self.is_done = False
self.offset = np.array(offset)
def generateCircle(self, radius:float = 2, num_points:int = 360*10):
theta = np.linspace(0, 2*np.pi, num_points, endpoint=(not self.closed))
self.positions = np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
def generateSquare(self, h:float = 2, num_points:int = 360*10) -> None:
points_per_side = num_points // 4
s1y = np.linspace(-h/2,h/2, num_points, endpoint=False)
s1x = np.ones_like(s1y)*h/2
s2x = np.linspace(h/2,-h/2, num_points, endpoint=False)
s2y = np.ones_like(s2x) * h/2
s3y = np.linspace(h/2,-h/2, num_points, endpoint=False)
s3x = np.ones_like(s3y) * (-h/2)
s4x = np.linspace(-h/2,h/2, num_points, endpoint=False)
s4y = np.ones_like(s4x) * (-h/2)
self.positions = np.vstack([np.hstack([s1x,s2x,s3x,s4x]), np.hstack([s1y,s2y,s3y,s4y])]).T + self.offset
self.angles = np.ones_like(self.positions)#np.array([-np.sin(theta), np.cos(theta)]).T
def generateSpiral(self, start_radius:float = 0.5, end_radius:float = 2, num_loop:float = 5, num_points: int = 360*20) -> None:
radius = np.linspace(start_radius, end_radius, num_points, endpoint=(not self.closed))
theta = np.linspace(0, 2*np.pi*num_loop, num_points, endpoint=(not self.closed))
self.positions = np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
def getTrackingPointIdx(self, position:np.ndarray) -> None:
distances = np.linalg.norm(self.positions - position, axis=1)
if self.current_point == -1:
self.current_point = 0
else:
indices = np.where(distances < self.lookhead)[0]
if len(indices) > 0:
indices = indices[indices < 60]
if len(indices) > 0:
self.current_point = np.max(indices)
def rollTrajectory(self) -> None:
if self.closed:
self.positions = np.roll(self.positions, -self.current_point, axis=0)
self.angles = np.roll(self.angles, -self.current_point, axis=0)
self.current_point = 0
else:
self.positions = self.positions[self.current_point:]
self.angles = self.angles[self.current_point:]
self.current_point = 0
if self.positions.shape[0] <= 1:
self.is_done = True
def getPointForTracking(self) -> List[np.ndarray]:
position = self.positions[self.current_point]
angle = self.angles[self.current_point]
self.rollTrajectory()
return position, angle
def get_target_position(self) -> np.ndarray:
return self.target_position
def computeVelocityVector(self, target_position:np.ndarray, position:np.ndarray) -> np.ndarray:
diff = target_position - position
return diff / np.linalg.norm(diff)
def getVelocityVector(self, position:np.ndarray) -> np.ndarray:
self.getTrackingPointIdx(position)
self.target_position, target_angle = self.getPointForTracking()
velocity_vector = self.computeVelocityVector(self.target_position, position)
return velocity_vector
class VelocityTracker:
def __init__(self, trajectory_tracker: TrajectoryTracker, model: RLGamesModel, target_tracking_velocity:float = 0.25):
self.trajectory_tracker = trajectory_tracker
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def getGoal(self):
return self.velocity_vector*self.target_tracking_velocity
def setGoal(self, goal):
self.target_tracking_velocity = goal
def getObs(self):
return self.obs_state.cpu().numpy()
def getTargetPosition(self):
return self.trajectory_tracker.get_target_position()
def isDone(self):
return self.trajectory_tracker.is_done
def makeObservationBuffer(self, state, velocity_vector):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 2
self.obs_state[0,6:8] = torch.tensor(velocity_vector, dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic=True):
self.velocity_vector = self.trajectory_tracker.getVelocityVector(state["position"])
velocity_goal = self.velocity_vector*self.target_tracking_velocity - state["linear_velocity"]
self.makeObservationBuffer(state, velocity_goal)
action = self.model.getAction(self.obs_state, is_deterministic=is_deterministic)
return action
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--trajectory_type", type=str, default="Circle", help="The type of trajectory to be generated. Options are: Circle, Square, Spiral.")
parser.add_argument("--trajectory_x_offset", type=float, default=0, help="The offset of the trajectory along the x axis. In meters.")
parser.add_argument("--trajectory_y_offset", type=float, default=0, help="The offset of the trajectory along the y axis. In meters.")
parser.add_argument("--radius", type=float, default=1.5, help="The radius of the circle trajectory. In meters.")
parser.add_argument("--height", type=float, default=3.0, help="The height of the square trajectory. In meters.")
parser.add_argument("--start_radius", type=float, default=0.5, help="The starting radius for the spiral for the spiral trajectory. In meters.")
parser.add_argument("--end_radius", type=float, default=2.0, help="The final radius for the spiral trajectory. In meters.")
parser.add_argument("--num_loop", type=float, default=5.0, help="The number of loops the spiral trajectory should make. Must be greater than 0.")
parser.add_argument("--closed", type=bool, default=True, help="Whether the trajectory is closed (it forms a loop) or not.")
parser.add_argument("--lookahead_dist", type=float, default=0.15, help="How far the velocity tracker looks to generate the velocity vector that will track the trajectory. In meters.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--tracking_velocity", type=float, default=0.25, help="The tracking velocity. In meters per second.")
parser.add_argument("--save_dir", type=str, default="velocity_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.num_loop > 0, "The number of loops must be greater than 0."
assert args.lookahead_dist > 0, "The lookahead distance must be greater than 0."
assert args.radius > 0, "The radius must be greater than 0."
assert args.start_radius > 0, "The start radius must be greater than 0."
assert args.end_radius > 0, "The end radius must be greater than 0."
assert args.height > 0, "The height must be greater than 0."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.tracking_velocity > 0, "The tracking velocity must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Creates the trajectory tracker
tracker = TrajectoryTracker(lookahead=args.lookahead_dist, closed=args.closed, offset=(args.trajectory_x_offset, args.trajectory_y_offset))
if args.trajectory_type.lower() == "square":
tracker.generateSquare(h=args.height)
elif args.trajectory_type.lower() == "circle":
tracker.generateCircle(radius=args.radius)
elif args.trajectory_type.lower() == "spiral":
tracker.generateSpiral(start_radius=args.start_radius, end_radius=args.end_radius, num_loop=args.num_loop)
else:
raise ValueError("Unknown trajectory type. Must be square, circle or spiral.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
velocity_tracker = VelocityTracker(tracker, model)
# Creates the environment
env = MuJoCoVelTracking(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(velocity_tracker, [0,0])
# Plots the simulation
env.plotSimulation(save=True, save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(args.save_dir) | 14,937 | Python | 53.123188 | 318 | 0.659035 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/pose_controller_RL.py | from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoPositionControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target[:2])
self.logs["heading_target"].append(target[-1])
def runLoop(self, model, initial_position=[0,0], initial_orientation=[1,0,0,0]) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.reset(initial_position=initial_position, initial_orientation=initial_orientation)
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def runLoopForNSteps(self, model, initial_position=[0,0], initial_orientation=[1,0,0,0], max_steps=502) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.reset(initial_position=initial_position, initial_orientation=initial_orientation)
i = 0
while i < max_steps:
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
i += 1
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
print(save_dir)
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class PoseController:
def __init__(self, model: RLGamesModel, goal_x: List[float], goal_y: List[float], goal_theta: List[float], distance_threshold: float = 0.03, heading_threshold: float = 0.03) -> None:
self.model = model
self.goals = np.array([goal_x, goal_y, goal_theta]).T
if goal_theta is None:
goal_theta = np.zeros_like(goal_x)
self.current_goal = self.goals[0]
self.distance_threshold = distance_threshold
self.heading_threshold = heading_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state):
dist = np.linalg.norm(self.current_goal[:2] - state["position"])
ang = np.linalg.norm(self.current_goal[2:] - state["orientation"])
heading = np.arctan2(state["orientation"][1], state["orientation"][0])
ang = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
dist_cd = False
ang_cd = False
if dist < self.distance_threshold:
dist_cd = True
if ang < self.heading_threshold:
ang_cd = True
return dist_cd and ang_cd
def getGoal(self):
return self.current_goal
def setGoal(self, goal):
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self):
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 1
self.obs_state[0,6:8] = torch.tensor(self.current_goal[:2] - state["position"], dtype=torch.float32, device="cuda")
heading = np.arctan2(state["orientation"][1], state["orientation"][0])
heading_error = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
self.obs_state[0,8] = torch.tensor(np.cos(heading_error), dtype=torch.float32, device="cuda")
self.obs_state[0,9] = torch.tensor(np.sin(heading_error), dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic: bool = True):
if self.isGoalReached(state):
#print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.makeObservationBuffer(state)
return self.model.getAction(self.obs_state,is_deterministic=is_deterministic)
def runBatchEvaluation(args, cfg=default_cfg):
horizon = 500
#cfg["maxEpisodeLength"] = horizon + 2
#cfg["platform_mass"] = 5.32
#cfg["clipObservations"]["state"] = 20.0
cfg["max_spawn_dist"] = 4.0
cfg["min_spawn_dist"] = 3.0
#cfg["kill_dist"] = 6.0
cfg["num_envs"] = 256
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PoseController(model, [0], [0], [0])
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
for i in range(cfg["num_envs"]):
# Runs the simulation
initial_position, initial_orientation = env.RS.getInitialCondition()
env.runLoopForNSteps(position_controller, initial_position=initial_position, initial_orientation=initial_orientation)
# Plots the simulation
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir, suffix=str(i))
env.plotBatch(save_dir = args.save_dir)
def runSingleEvaluation(args):
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PoseController(model, args.goal_x, args.goal_y, args.goal_theta)
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_theta", type=float, nargs="+", default=None, help="List of headings for the goals to be reached by the platform. In world frame, radiants.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
parser.add_argument("--run_batch", type=bool, default=False, help="If mujoco should be run in batch mode, it's useful to evaluate models. True will enable batch mode.")
parser.add_argument("--num_evals", type=int, default=256, help="The number of experiments that should be ran when in batch mode.")
parser.add_argument("--num_steps", type=int, default=502, help="The number of steps the simulation should run for in batch mode.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert not args.goal_theta is None, "The theta coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
assert len(args.goal_x) == len(args.goal_theta), "The number of x coordinates must be equal to the number of headings."
# Try to create the save directory
if args.run_batch:
runBatchEvaluation(args)
else:
runSingleEvaluation(args) | 13,511 | Python | 50.376426 | 318 | 0.650877 |
elharirymatteo/RANS/omniisaacgymenvs/tests/__init__.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .runner import * | 1,580 | Python | 53.51724 | 80 | 0.783544 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.