file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
DimensionLab/fmmr-water-tank/water_tank/trainer.py | from modulus.hydra import to_yaml
from modulus.hydra.utils import compose
from modulus.solver import Solver
from modulus.domain import Domain
from modulus.domain.inferencer import PointwiseInferencer
from src.geometry import WaterTank
from src.water_tank import network, constraints, inlet_vel
cfg = compose(config_path="conf", config_name="config", job_name="water_tank_training")
print(to_yaml(cfg))
def run():
geo = WaterTank()
domain = Domain()
nodes = network(cfg, scale=geo.scale)
constraints(cfg, geo=geo, nodes=nodes, domain=domain)
inlet_vel_inference = 1.5
# add inferencer
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(1000000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=4096,
)
domain.add_inferencer(inferencer, "inf_data")
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(5000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=256,
)
domain.add_inferencer(inferencer, "inf_data_small")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 1,325 | Python | 26.624999 | 108 | 0.672453 |
DimensionLab/fmmr-water-tank/water_tank/src/geometry.py | import numpy as np
from sympy import sqrt, Max
from modulus.hydra import to_absolute_path
from modulus.geometry.tessellation import Tessellation
class WaterTank:
"""Water tank geometry"""
inlet_area = None
def __init__(self):
# read stl files to make geometry
point_path = to_absolute_path("./data/stl_files")
inlet_mesh = Tessellation.from_stl(
point_path + "/inlet.stl", airtight=False
)
outlet_left_mesh = Tessellation.from_stl(
point_path + "/outlet_left.stl", airtight=False
)
outlet_right_mesh = Tessellation.from_stl(
point_path + "/outlet_right.stl", airtight=False
)
noslip_mesh = Tessellation.from_stl(
point_path + "/water_tank_noslip.stl", airtight=False
)
interior_mesh = Tessellation.from_stl(
point_path + "/water_tank_closed.stl", airtight=True
)
# scale and normalize mesh and openfoam data
self.center = (0, 0, 0)
self.scale = 1.0
self.inlet_mesh = self.normalize_mesh(inlet_mesh, self.center, self.scale)
self.outlet_left_mesh = self.normalize_mesh(outlet_left_mesh, self.center, self.scale)
self.outlet_right_mesh = self.normalize_mesh(outlet_right_mesh, self.center, self.scale)
self.noslip_mesh = self.normalize_mesh(noslip_mesh, self.center, self.scale)
self.interior_mesh = self.normalize_mesh(interior_mesh, self.center, self.scale)
# geom params
self.inlet_normal = (0.0, 0.0, -2.0)
self.inlet_center = (0.0, 0.0, 3.0)
s = inlet_mesh.sample_boundary(nr_points=10000)
self.inlet_area = np.sum(s["area"])
print("Surface Area: {:.3f}".format(self.inlet_area))
self.inlet_radius = np.sqrt(self.inlet_area / np.pi)
s = self.interior_mesh.sample_interior(nr_points=10000, compute_sdf_derivatives=True)
print("Volume: {:.3f}".format(np.sum(s["area"])))
# inlet velocity profile
def circular_parabola(self, x, y, z, center, normal, radius, max_vel):
centered_x = x - center[0]
centered_y = y - center[1]
centered_z = z - center[2]
distance = sqrt(centered_x ** 2 + centered_y ** 2 + centered_z ** 2)
parabola = max_vel * Max((1 - (distance / radius) ** 2), 0)
return normal[0] * parabola, normal[1] * parabola, normal[2] * parabola
# normalize meshes
def normalize_mesh(self, mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
# normalize invars
def normalize_invar(self, invar, center, scale, dims=2):
invar["x"] -= center[0]
invar["y"] -= center[1]
invar["z"] -= center[2]
invar["x"] *= scale
invar["y"] *= scale
invar["z"] *= scale
if "area" in invar.keys():
invar["area"] *= scale ** dims
return invar
| 2,962 | Python | 37.480519 | 96 | 0.597232 |
DimensionLab/fmmr-water-tank/water_tank/src/water_tank.py | from sympy import Symbol
from modulus.hydra import instantiate_arch, ModulusConfig
from modulus.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
# params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
nu = 1.787e-06 # m2 * s-1
inlet_vel = Symbol("inlet_velocity")
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# parameterization
inlet_vel_range = (0.05, 10.0)
inlet_vel_params = {inlet_vel: inlet_vel_range}
def network(cfg: ModulusConfig, scale):
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu * scale, rho=1.0, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z"), Key("inlet_velocity")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
cfg=cfg.arch.fully_connected,
)
return (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network", jit=cfg.jit)]
)
def constraints(cfg: ModulusConfig, geo, nodes, domain):
# add constraints to solver
# inlet
u, v, w = geo.circular_parabola(
x,
y,
z,
center=geo.inlet_center,
normal=geo.inlet_normal,
radius=geo.inlet_radius,
max_vel=inlet_vel,
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.inlet_mesh,
outvar={"u": u, "v": v, "w": w},
batch_size=cfg.batch_size.inlet,
parameterization=inlet_vel_params
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet_left = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_left_mesh,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
parameterization=inlet_vel_params
)
domain.add_constraint(outlet_left, "outlet_left")
outlet_right = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_right_mesh,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
parameterization=inlet_vel_params
)
domain.add_constraint(outlet_right, "outlet_right")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.noslip_mesh,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
parameterization=inlet_vel_params
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo.interior_mesh,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.interior,
parameterization=inlet_vel_params
)
domain.add_constraint(interior, "interior")
# Integral Continuity 1
# TODO: add integral plane somewhere into the geometry
# Integral Continuity 2
integral_continuity_outlet_left = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_left_mesh,
outvar={"normal_dot_vel": 2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
parameterization=inlet_vel_params
)
domain.add_constraint(integral_continuity_outlet_left, "integral_continuity_2")
# Integral Continuity 3
integral_continuity_outlet_right = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_right_mesh,
outvar={"normal_dot_vel": 2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
parameterization=inlet_vel_params
)
domain.add_constraint(integral_continuity_outlet_right, "integral_continuity_3")
| 4,051 | Python | 30.905512 | 84 | 0.643051 |
DimensionLab/fmmr-water-tank/water_tank/src/plotter.py | import numpy as np
import scipy.interpolate
import matplotlib.pyplot as plt
from modulus.hydra import to_absolute_path
from modulus.utils.io.vtk import var_to_polyvtk
from modulus.utils.io import InferencerPlotter
def generate_velocity_profile_3d():
data = np.load(to_absolute_path("outputs/water_tank/inferencers/simulation.npz"), allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
# velocity in 3D
pos = np.dstack((data["x"], data["y"], data["z"]))
V = np.dstack((data["u"], data["v"], data["w"]))
save_var = {
"x": data["x"],
"y": data["y"],
"z": data["z"],
"p": data["p"],
"pos": pos,
"V": V,
}
var_to_polyvtk(save_var, to_absolute_path("outputs/water_tank/inferencers/velocity_profile"))
class InferencerSlicePlotter2D(InferencerPlotter):
"Default plotter class for inferencer"
def __call__(self, invar, outvar):
"Default function for plotting inferencer data"
# get input variables
x, y = invar["x"][:, 0], invar["y"][:, 0]
bounds = (x.min(), x.max(), y.min(), y.max())
extent, outvar = self.interpolate_output(100, x, y, bounds, outvar)
# make plots
fs = []
for k in outvar:
f = plt.figure(figsize=(5, 4), dpi=144)
plt.imshow(outvar[k].T, origin="lower", extent=extent, cmap="jet")
plt.xlabel("x")
plt.ylabel("y")
plt.colorbar(location="bottom")
plt.title(k)
plt.tight_layout()
fs.append((f, k))
return fs
@staticmethod
def interpolate_output(size, x, y, extent, *outvars):
"Interpolates irregular points onto a mesh"
# define mesh to interpolate onto
xyi = np.meshgrid(
np.linspace(extent[0], extent[1], size),
np.linspace(extent[2], extent[3], size),
indexing="ij",
)
# interpolate outvars onto mesh
outvars_interp = []
for outvar in outvars:
outvar_interp = {}
for k in outvar:
outvar_interp[k] = scipy.interpolate.griddata(
(x, y), outvar[k][:, 0], tuple(xyi)
)
outvars_interp.append(outvar_interp)
return [extent] + outvars_interp
| 2,316 | Python | 29.486842 | 104 | 0.560881 |
DimensionLab/fmmr-water-tank/water_tank/outputs/water_tank/wandb/run-20221010_114502-21ugz615/files/config.yaml | wandb_version: 1
_wandb:
desc: null
value:
cli_version: 0.11.2
framework: torch
is_jupyter_run: false
is_kaggle_kernel: false
python_version: 3.8.10
t:
1:
- 1
2:
- 1
- 3
4: 3.8.10
5: 0.11.2
8:
- 5
| 280 | YAML | 12.380952 | 27 | 0.464286 |
DimensionLab/fmmr-water-tank/water_tank/outputs/water_tank/wandb/latest-run/files/config.yaml | wandb_version: 1
_wandb:
desc: null
value:
cli_version: 0.11.2
framework: torch
is_jupyter_run: false
is_kaggle_kernel: false
python_version: 3.8.10
t:
1:
- 1
2:
- 1
- 3
4: 3.8.10
5: 0.11.2
8:
- 5
| 280 | YAML | 12.380952 | 27 | 0.464286 |
DimensionLab/fmmr-water-tank/water_tank/conf/config.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
save_filetypes: "vtk,np"
custom:
external_monitor_platform:
name: wandb
entity: "michaltakac"
project: "water-tank"
api_key: "e67b70a695e41d3d00689deba4e87c6b6d4a7cdc" # get your api key from Neptune.ai
scheduler:
decay_rate: 0.95
decay_steps: 15000
training:
rec_results_freq : 200
rec_constraint_freq: 50000
max_steps : 1500000
batch_size:
inlet: 650
outlet: 650
no_slip: 5200
interior: 6000
integral_continuity: 310 | 604 | YAML | 17.333333 | 90 | 0.688742 |
DimensionLab/fmmr-water-tank/water_tank/conf/config_eval.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
run_mode: "eval"
save_filetypes: "vtk,np"
custom:
network_dir: 'outputs/water_tank/checkpoints'
initialization_network_dir: 'outputs/water_tank/checkpoints'
scheduler:
decay_rate: 0.95
decay_steps: 15000
training:
rec_results_freq : 200
rec_constraint_freq: 50000
max_steps : 1500000
batch_size:
inlet: 650
outlet: 650
no_slip: 5200
interior: 6000
integral_continuity: 310 | 541 | YAML | 15.9375 | 60 | 0.698706 |
DimensionLab/fmmr-water-tank/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Water tank example"
description="Project commisioned for Faculty of materials, metallurgy and recyclation (FMMR) at Technical university of Košice."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "README.md"
changelog = "docs/CHANGELOG.md"
preview_image = "data/water-tank.png"
icon = "data/icon.png"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Simulation"
# Keywords for the extension
keywords = ["modulus", "water-tank", "STL geometry", "scenario"]
# Use omni.ui to build simple UI
[dependencies]
"modulus_ext.core" = {version="22.9.0"}
"modulus_ext.ui" = {version="2.0.0"}
"hpcvis.vtkm_bridge.core" = {version="1.0.2-alpha-03"}
# Main python module this extension provides".
[[python.module]]
name = "water-tank"
| 987 | TOML | 28.058823 | 128 | 0.727457 |
DimensionLab/fmmr-water-tank/config/extension.gen.toml | [package]
[package.target]
python = ["cp37"]
[package.publish]
date = 1662766157
kitVersion = "103.5+release.6600.0a006a6d.tc"
| 127 | TOML | 17.285712 | 45 | 0.732283 |
DimensionLab/fmmr-water-tank/docs/CHANGELOG.md | # Water tank example
## [1.0.0] - 2022-10-14
Initial version, working with Modulus 22.09
| 90 | Markdown | 17.199997 | 43 | 0.7 |
iMAPSRoboticsTeam/Issac_Sim_Template/SimulationModule.py | import omni.isaac.core.utils.stage as stage
from omni.isaac.core.robots import Robot
import numpy as np
import os
class SimulationModule(): # Rename to unique module name
def setup_scene(self, sim):
usd_path = "Path_to_file"
world = sim.get_world()
world.scene.add_default_ground_plane()
stage.add_reference_to_stage(usd_path=usd_path,
prim_path="/World/Robot")
world.scene.add(Robot(
prim_path="/World/Robot",
name="Robot",
scale=np.array([0.01,0.01,0.01]),
orientation=np.array([0.0, 0.0, 0.0, 1.0]),
position=np.array([0.0, 0.0, 0.3]),)
)
return
def setup_post_load(self, sim, func):
sim._world = sim.get_world()
sim._robot = sim._world.scene.get_object("Robot")
sim._world.add_physics_callback("sending_actions", callback_fn=func)
return
def simulation(self):
pass | 980 | Python | 29.656249 | 76 | 0.570408 |
iMAPSRoboticsTeam/Issac_Sim_Template/SimulationBase.py | from omni.isaac.core import World
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.stage import create_new_stage_async, update_stage_async
import gc
from abc import abstractmethod
class SimulationBase(object):
def __init__(self) -> None:
self._world = None
self._current_tasks = None
self._world_settings = {"physics_dt": 1.0 / 60.0, "stage_units_in_meters": 1.0, "rendering_dt": 1.0 / 60.0}
# self._logging_info = ""
return
def get_world(self):
return self._world
def set_world_settings(self, physics_dt=None, stage_units_in_meters=None, rendering_dt=None):
if physics_dt is not None:
self._world_settings["physics_dt"] = physics_dt
if stage_units_in_meters is not None:
self._world_settings["stage_units_in_meters"] = stage_units_in_meters
if rendering_dt is not None:
self._world_settings["rendering_dt"] = rendering_dt
return
async def load_world_async(self):
"""Function called when clicking load buttton
"""
if World.instance() is None:
await create_new_stage_async()
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene(self._loadWorld)
else:
self._world = World.instance()
self._current_tasks = self._world.get_current_tasks()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load(self._loadWorld)
if len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
async def reset_async(self):
"""Function called when clicking reset buttton
"""
if self._world.is_tasks_scene_built() and len(self._current_tasks) > 0:
self._world.remove_physics_callback("tasks_step")
await self._world.play_async()
await update_stage_async()
await self.setup_pre_reset()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_reset()
if self._world.is_tasks_scene_built() and len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
@abstractmethod
def setup_scene(self, scene: Scene) -> None:
"""used to setup anything in the world, adding tasks happen here for instance.
Args:
scene (Scene): [description]
"""
return
@abstractmethod
async def setup_post_load(self):
"""called after first reset of the world when pressing load,
intializing provate variables happen here.
"""
return
@abstractmethod
async def setup_pre_reset(self):
""" called in reset button before resetting the world
to remove a physics callback for instance or a controller reset
"""
return
@abstractmethod
async def setup_post_reset(self):
""" called in reset button after resetting the world which includes one step with rendering
"""
return
@abstractmethod
async def setup_post_clear(self):
"""called after clicking clear button
or after creating a new stage and clearing the instance of the world with its callbacks
"""
return
# def log_info(self, info):
# self._logging_info += str(info) + "\n"
# return
def _world_cleanup(self):
self._world.stop()
self._world.clear_all_callbacks()
self._current_tasks = None
self.world_cleanup()
return
def world_cleanup(self):
"""Function called when extension shutdowns and starts again, (hot reloading feature)
"""
return
async def clear_async(self):
"""Function called when clicking clear buttton
"""
await create_new_stage_async()
if self._world is not None:
self._world_cleanup()
self._world.clear_instance()
self._world = None
gc.collect()
await self.setup_post_clear()
return
| 4,267 | Python | 33.144 | 115 | 0.606281 |
iMAPSRoboticsTeam/Issac_Sim_Template/Extension.py | from abc import abstractmethod
import omni.ext
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder, state_btn_builder, cb_builder
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from omni.isaac.core import World
from .Simulation import Simulation
import weakref
import os
import asyncio
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Extension(omni.ext.IExt, Simulation):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id: str):
self._menuItems = None
self._buttons = None
self._resetButtons = None
self._GolbalReset = None
self._ext_id = ext_id
self.name = "Robot"
self.robot = Simulation()
self.extra_frames = []
self.start_Extension()
return
def start_Extension(
self,
menu_name = None,
submenu_name = None,
name = "Simulation Suite",
title = "Simulation",
doc_link = None,
overview = "This is a complete simulation package",
file_path = os.path.abspath(__file__),
number_of_extra_frames = 1,
):
menu_items = [MenuItemDescription(name=name, onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
if menu_name == "" or menu_name is None:
self._menu_items = menu_items
elif submenu_name == "" or submenu_name is None:
self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)]
else:
self._menu_items = [
MenuItemDescription(
name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)]
)
]
add_menu_items(self._menu_items, name)
self._buttons = dict()
self._resetButtons = dict()
self._GolbalReset = dict()
self._build_ui(
name=name,
title=title,
doc_link=doc_link,
overview=overview,
file_path=file_path,
number_of_extra_frames=number_of_extra_frames,
ext_id=self._ext_id,
)
"""
USER INTERFACE
"""
def _build_ui(self,
name,
title,
doc_link,
overview,
file_path,
number_of_extra_frames,
ext_id,
):
self._window = ui.Window(name, width=360, height=0, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM)
with self._window.frame:
with ui.VStack(spacing=5, height=10):
#title = ("StakeBot Simulation")
#doc_link = ("https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.ui/docs/index.html")
# overview = (
# "This is a complete control pannel for simulating and testing the StakeBot\n"
# )
setup_ui_headers(ext_id, file_path, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Simulation Name",
height=0,
collasped=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
dict = {
"label": "Load Simulation", ## Give Simulation a Name
"type": "button",
"text": "Load",
"tooltip": "Edit ToolTip",
"on_clicked_fn": self._load_new_world,
}
self._buttons["Load New Simulator"] = btn_builder(**dict) # Give button a unique name
self._buttons["Load New Simulator"].enabled = True
dict = {
"label": "Sim Reset", # Give reset button a name
"type": "button",
"text": "Reset",
"tooltip": "Reset Simulation",
"on_clicked_fn": self._on_reset,
}
self._resetButtons["Sim Reset"] = btn_builder(**dict) # Create a unique name for button
self._resetButtons["Sim Reset"].enabled = False
dict = {
"label": "Reset Simulation",
"type": "button",
"text": "Reset Simulation",
"tooltip": "Reset EtherBot Simulation",
"on_clicked_fn": self._reset_all,
}
self._GolbalReset["Reset"] = btn_builder(**dict)
"""
"""
def _load_new_world(self): # rename function to unique world name
self.robot._loadWorld = "Simulation" # Rename this to unique world name
async def _on_load_world_async():
await self.robot.load_world_async()
await omni.kit.app.get_app().next_update_async()
self.robot._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(self._buttons, False)
self._resetButtons["Sim Reset"].enabled = True
self.post_load_button_event()
self.robot._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def _enable_all_buttons(self, type, flag):
for btn_name, btn in type.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.CLOSED):
if World.instance() is not None:
self.robot._world_cleanup()
self.robot._world.clear_instance()
if hasattr(self, "_buttons"):
if self._buttons is not None:
self._enable_all_buttons(self._buttons, False)
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
try:
self.post_clear_button_event()
except:
pass
return
def _on_reset(self):
async def _on_reset_async():
await self.robot.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
self.robot._world.clear_instance()
asyncio.ensure_future(_on_reset_async())
return
@abstractmethod
def post_reset_button_event(self):
return
@abstractmethod
def post_load_button_event(self):
return
@abstractmethod
def post_clear_button_event(self):
return
def _sample_window_cleanup(self):
remove_menu_items(self._menu_items, self.name)
self._window = None
self._menu_items = None
self._buttons = None
self._resetButtons = None
self._GolbalReset = None
return
def shutdown_cleanup(self):
return
def on_shutdown(self):
if self.robot._world is not None:
self.robot._world_cleanup()
if self._menu_items is not None:
self._sample_window_cleanup()
if self._buttons is not None:
self._enable_all_buttons(self._buttons, False)
if self._resetButtons is not None:
self._enable_all_buttons(self._resetButtons, False)
if self._GolbalReset is not None:
self._GolbalReset["Reset"].enabled = False
self.shutdown_cleanup()
if self.robot._world is not None:
self.robot._world.clear_instance()
self.robot._world.clear()
return
def _reset_all(self):
async def _on_reset_all_async():
try:
if self.robot._world is not None:
await self.robot._world.stop_async()
await omni.kit.app.get_app().next_update_async()
self.robot._world.clear_instance()
self.robot._world.clear()
self._enable_all_buttons(self._buttons, True)
self._enable_all_buttons(self._resetButtons, False)
except:
pass
asyncio.ensure_future(_on_reset_all_async())
return
| 9,508 | Python | 36.58498 | 125 | 0.523138 |
iMAPSRoboticsTeam/Issac_Sim_Template/__init__.py | from .Extension import *
from .SimulationBase import SimulationBase
from .Simulation import Simulation | 102 | Python | 33.333322 | 42 | 0.852941 |
iMAPSRoboticsTeam/Issac_Sim_Template/Simulation.py | from .SimulationBase import SimulationBase
from .SimulationModule import SimulationModule
from omni.isaac.core.utils.types import ArticulationAction
import numpy as np
class Simulation(SimulationBase):
def __init__(self) -> None:
super().__init__()
self._loadWorld = None
self.simMod = SimulationModule()
## Add new Simulation Modules
return
# Setup Scene
def setup_scene(self, loadWorld):
if loadWorld == "Simulation":
self.simMod.setup_post_load(self, self.send_actions)
# elif loadWorld == "NewSim":
# load new sim module
return
# Setup Post Load
async def setup_post_load(self, loadWorld):
if loadWorld == "Simulation":
self.simMod.setup_post_load(self, self.send_actions)
# elif loadWorld == "NewSim":
# load new sim module
return
def send_actions(self, step_size):
self.simMod.simulation()
self._robot.apply_action(ArticulationAction(joint_positions=None,
joint_efforts=None,
joint_velocities=np.array([5,5,5,5])*-1))
return
| 1,257 | Python | 28.95238 | 73 | 0.567224 |
iMAPSRoboticsTeam/Issac_Sim_Template/README.md | # Issac_Sim_Template
This template is used to help get started on developing simulations with Issac Sim.
# How To Use
## Create An Extension
First create a new extension by opening the extension window
Window >> Extensions
Next click the green plus sign on the top left of the window, Select "New Extension Template Project"
Save this to any location
Give it a project name (No Spaces) and use iMAPS for the ext.id name
VS Code should now open.
## Clone The Issac_Sim_Template
Next, open a git bash terminal
Click on the three dots in the upper left corner of VS Code
Click "New Terminal" a terminal will open on the bottom of VS Code window. In this window, click on the down arrow next to the white plus sign, click Git Bash.
> Assuming you have git installed on your windows machine click source control in VS Code to downolad.
This is a git bash terminal, here we can clone the repo into the extension by typing.
```
git clone https://github.com/iMAPSRoboticsTeam/Issac_Sim_Template.git
```
## Run the Setup Script
After the repo has been cloned, open file explorer, locate and double click the setup.bat file inside the repo. It should be inside your extension folder.
After you run this script the code template should now be ready to use. Issac Sim should now have a new extension window.
For Linux, run the following command:
```
./Issac_Sim_Template/setup.sh
```
# Getting Started
This template sets up the developer with all the necessary functions to create and build simulations through a GUI.
Inside the Extension.py file is the code for the GUI. Here you can copy the code below while changing the names of the assets that are stored in their respective dictionaries.
```
dict = {
"label": "Load Simulation", ## Give Simulation a Name
"type": "button",
"text": "Load",
"tooltip": "Edit ToolTip",
"on_clicked_fn": self._load_new_world,
}
self._buttons["Load New Simulator"] = btn_builder(**dict) # Give button a unique name
self._buttons["Load New Simulator"].enabled = True
dict = {
"label": "Sim Reset", # Give reset button a name
"type": "button",
"text": "Reset",
"tooltip": "Reset Simulation",
"on_clicked_fn": self._on_reset,
}
self._resetButtons["Sim Reset"] = btn_builder(**dict) # Create a unique name for button
self._resetButtons["Sim Reset"].enabled = False
```
The Simulations are created within their own respected python files. The SimulationModule.py is an example of how to setup a simulation module for a specific purpose. Inside this file is where the robot and world are loaded into Issac Sim and where the simulation function (function that is used to control the simulation) are placed.
This file stores a class that will be called within the Simulation.py file. This file is where the simulation runs and will call the simulation function within the respected simulation module file. The ArticulationAction class is called inside this file and is used to drive the joints inside the loaded robot file.
# Good Luck
If you get stuck or need help understanding the code better, please email me
```
[email protected]
```
or text me. Issac Sim can be very complicated but hopfully this template will help you better understand the proccess with robotic simulation development.
Good Luck!
| 3,305 | Markdown | 33.8 | 335 | 0.750983 |
elharirymatteo/RANS/setup.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Installation script for the 'isaacgymenvs' python package."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from setuptools import setup, find_packages
import os
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
"numpy==1.23.5",
"protobuf==3.20.2",
"omegaconf==2.3.0",
"hydra-core==1.3.2",
"urllib3==1.26.16",
"moviepy==1.0.3",
"wandb>=0.13",
"cvxpy==1.3.2",
"mujoco==2.3.6",
]
# Installation operation
setup(
name="omniisaacgymenvs",
author="NVIDIA",
version="2023.1.1a",
description="RL environments for robot learning in NVIDIA Isaac Sim.",
keywords=["robotics", "rl"],
include_package_data=True,
install_requires=INSTALL_REQUIRES,
packages=find_packages("."),
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.7, 3.8",
],
zip_safe=False,
)
# EOF
| 2,535 | Python | 34.718309 | 80 | 0.728994 |
elharirymatteo/RANS/README.md | 
## About this repository
This repo is an extension of the Isaac Gym Envs library present at https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs. There, you can find further details and instructions regarding the default tasks (`AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `Crazyflie`, `FrankaCabinet`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM`).
The main additions to the Reinforcement Learning examples provided by Omniverse Isaac Gym are environments related to **Space Robotics**.
Firstly, we start by providing a 2D environmnet, which serves as a simpler version of a realistic spacecraft. The modelled 2D system can be tested with a real rigid structure floating on top of an extremely flat and smooth surface using air bearings. This system is a common solution to emulate free-floating and free-flying satellite motion. This intermediate step is especially important for demonstrating the sim-to-real transfer of the DRL policies trained within Omniverse.
Secondly, we provide a full 3D environment to allow the simulation of free flying spacecrafts in space.
| 3DoF go to XY | 3DoF go to Pose | 6DoF go to XYZ |
| :-: | :-: | :-: |
|  |  |  |
---
## Task Description
Currently we provide two primary environments, each tailored to simulate distinct robotic systems:
1. **3 Degrees of Freedom (3DoF) Robot Simulation:**
The simulator replicates the behavior of the 3DoF robot situated in the ZeroG Lab of the University of Luxembourg (SpaceR group). The system is equipped with 8 thrusters.
In this environment, the following tasks are defined:
- **GoToXY:** Task for position control.
- **GoToPose-2D:** Task for position-attitude control.
- **TrackXYVelocity:** Agent learns to track linear velocities in the xy plane.
- **TrackXYOVelocity:** Agent learns to track both linear and angular velocities.
2. **6 Degrees of Freedom (6DoF) Robot Simulation:**
The simulator emulates spacecraft maneuvers in space, featuring a 6DoF robot configuration with 16 thrusters.
The tasks defined for this environment are:
- **GoToXYZ:** Task for precise spatial positioning.
- **GoToPose-3D:** Task for accurate spatial positioning and orientation.
#### Thrusters Configuration
The default thrusters configuration for both 3DoF and 6DoF scenarios is depicted in the following images, showing the direction of forces applied by the thrusters mounted on the systems.
| 3DoF Thrusters Configuration | 6DoF Thrusters Configuration |
| :-: | :-: |
| <img src="omniisaacgymenvs/images/config3Dof.png" width="200"/> | <img src="omniisaacgymenvs/images/config6Dof.png" width="200"/> |
---
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release.
*Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2022.2.0, to ensure examples work as expected.*
### OmniverseIsaacGymEnvs
Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim.
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/elharirymatteo/RANS.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path.
```
For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh
For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $*
For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh
```
Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`:
```bash
PYTHON_PATH -m pip install -e .
```
### RL Games
We use the [rl-games](https://pypi.org/project/rl-games/1.0.2/) library as a starting point to rework the PPO implementation for the agents we train.
To install the appropriate version of rl-games, clone this repository **INSIDE** RANS:
```bash
git clone https://github.com/AntoineRichard/rl_games
```
Make sure to install the rl_gamers library under the OmniverseIsaacGym dependecy:
```
cd rl_games
PYTHON_PATH -m pip install --upgrade pip
PYTHON_PATH -m pip install -e .
```
## Running the examples
*Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.*
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Training new agents</span></summary>
To train your first policy, run:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP
```
You should see an Isaac Sim window pop up. Once Isaac Sim initialization completes, the FloatingPlatform scene will be constructed and simulation will start running automatically. The process will terminate once training finishes.
Here's another example - GoToPose - using the multi-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP
```
Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar.
To achieve maximum performance, launch training in `headless` mode as follows:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP headless=True
```
#### A Note on the Startup Time of the Simulation
Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually
be optimized in future releases.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Loading trained models (or checkpoints)</span></summary>
Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME`
defaults to the task name, but can also be overridden via the `experiment` argument.
To load a trained checkpoint and continue training, use the `checkpoint` argument:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP checkpoint=runs/MFP2D_Virtual_GoToPose/nn/MFP2D_Virtual_GoToPose.pth
```
To load a trained checkpoint and only perform inference (no training), pass `test=True`
as an argument, along with the checkpoint name. To avoid rendering overhead, you may
also want to run with fewer environments using `num_envs=64`:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP checkpoint=runs/MFP2D_Virtual_GoToPose/nn/MFP2D_Virtual_GoToPose.pth test=True num_envs=64
```
Note that if there are special characters such as `[` or `=` in the checkpoint names,
you will need to escape them and put quotes around the string. For example,
`checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"`
</details>
## Training Scripts
All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`.
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Random policy</span></summary>
To test out a task without RL in the loop, run the random policy script with:
```bash
PYTHON_PATH scripts/random_policy.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Train on single GPU</span></summary>
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Train on multiple GPUs</span></summary>
Lastly, we provide a multi-threaded training script that executes the RL policy on a separate thread than the main thread used for simulation and rendering:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script uses the same RL Games PPO policy as the above, but runs the RL loop on a new thread. Communication between the RL thread and the main thread happens on threaded Queues. Simulation will start automatically, but the script will **not** exit when training terminates, except when running in headless mode. Simulation will stop when training completes or can be stopped by clicking on the Stop button in the UI. Training can be launched again by clicking on the Play button. Similarly, if running inference with `test=True checkpoint=<path/to/checkpoint>`, simulation will run until the Stop button is clicked, or the script will run indefinitely until the process is terminated.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Configuration and command line arguments</span></summary>
We use [Hydra](https://hydra.cc/docs/intro/) to manage the config.
Common arguments for the training scripts are:
* `task=TASK` - Selects which task to use. Any of `MFP2D_Virtual_GoToXY`, `MFP2D_Virtual_GoToPose`, `MFP2D_Virtual_TrackXYVelocity`, `MFP2D_Virtual_TrackXYOVelocity`, `MFP3D_Virtual_GoToXYZ`, `MFP3D_Virtual_GoToPose`, (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task/virtual_floating_platform`)
* `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`).
* `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config).
* `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config
* `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step.
* `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU.
* `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU.
* `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax.
* `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training.
* `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing.
* `headless=HEADLESS` - Whether to run in headless mode.
* `experiment=EXPERIMENT` - Sets the name of the experiment.
* `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments.
* `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation).
* `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used.
Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`.
#### Hydra Notes
Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file.
The way that the `task` and `train` portions of the config works are through the use of config groups.
You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/)
The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`.
In some places in the config you will find other variables referenced (for example,
`num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy.
This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation).
</details>
### Tensorboard
Tensorboard can be launched during training via the following command:
```bash
PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries
```
## WandB support
You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating.
## Citation
If you use the current repository in your work, we suggest citing the following papers:
```bibtex
@article{el2023drift,
title={DRIFT: Deep Reinforcement Learning for Intelligent Floating Platforms Trajectories},
author={El-Hariry, Matteo and Richard, Antoine and Muralidharan, Vivek and Yalcin, Baris Can and Geist, Matthieu and Olivares-Mendez, Miguel},
journal={arXiv preprint arXiv:2310.04266},
year={2023}
}
@article{el2023rans,
title={RANS: Highly-Parallelised Simulator for Reinforcement Learning based Autonomous Navigating Spacecrafts},
author={El-Hariry, Matteo and Richard, Antoine and Olivares-Mendez, Miguel},
journal={arXiv preprint arXiv:2310.07393},
year={2023}
}
```
## Directory Structure
```bash
.
├── cfg
│ ├── controller # Optimal Controllers configurations
│ ├── hl_task # High-level task configurations
│ ├── task # Task configurations
│ │ └── virtual_floating_platform # Virtual floating platform task configurations
│ └── train # Training configurations
│ └── virtual_floating_platform # Virtual floating platform training configurations
├── checkpoints # Checkpoints for saved models
├── conf_runs # Configuration runs for training
├── demos # Demonstration files (gifs)
├── envs
│ └── BuoyancyPhysics # Environment related to buoyancy physics
├── images # Image files
├── mujoco_envs
│ ├── controllers # Controllers for Mujoco environments
│ ├── environments # Mujoco environments
│ └── legacy # Legacy Mujoco environment files
├── notebooks # Jupyter notebooks
├── robots
│ ├── articulations # Articulation-related files
│ │ ├── utils # Utilities for articulations
│ │ └── views # Articulation views
│ └── usd # USD-related files
├── ros # ROS-related files
├── scripts # Utility scripts
├── skrl # Reinforcement learning utilities
├── tasks
│ ├── base # Base task implementations
│ ├── buoyancy # Buoyancy-related tasks
│ ├── factory # Factory task configurations
│ │ └── yaml # YAML configurations for factory tasks
│ ├── shared # Shared task implementations
│ ├── utils # Task utility functions
│ └── virtual_floating_platform # Task implementations for virtual floating platform
├── utils
│ ├── config_utils # Configuration utilities
│ ├── domain_randomization # Domain randomization utilities
│ ├── hydra_cfg # Hydra configuration utilities
│ ├── rlgames # Utilities for rlgames
│ ├── terrain_utils # Terrain-related utilities
│ └── usd_utils # USD-related utilities
└── videos # Video files
```
| 17,693 | Markdown | 56.635179 | 688 | 0.730911 |
elharirymatteo/RANS/config/extension.toml | [gym]
reloadable = true
[package]
version = "0.0.0"
category = "Simulation"
title = "Isaac Gym Envs"
description = "RL environments"
authors = ["Isaac Sim Team"]
repository = "https://gitlab-master.nvidia.com/carbon-gym/omniisaacgymenvs"
keywords = ["isaac"]
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
icon = "data/icon.png"
writeTarget.kit = true
[dependencies]
"omni.isaac.gym" = {}
"omni.isaac.core" = {}
"omni.isaac.cloner" = {}
"omni.isaac.ml_archive" = {} # torch
[[python.module]]
name = "omniisaacgymenvs"
| 532 | TOML | 20.319999 | 75 | 0.693609 |
elharirymatteo/RANS/omniisaacgymenvs/extension.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import inspect
import os
import traceback
import weakref
from abc import abstractmethod
import gym
import hydra
import omni.ext
import omni.timeline
import omni.ui as ui
import omni.usd
from hydra import compose, initialize
from omegaconf import OmegaConf
from omni.isaac.cloner import GridCloner
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
from omni.isaac.core.utils.torch.maths import set_seed
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_train_mt import RLGTrainer, Trainer
from omniisaacgymenvs.utils.task_util import import_tasks, initialize_task
from omni.isaac.ui.callbacks import on_open_folder_clicked, on_open_IDE_clicked
from omni.isaac.ui.menu import make_menu_item_description
from omni.isaac.ui.ui_utils import (
btn_builder,
dropdown_builder,
get_style,
int_builder,
multi_btn_builder,
multi_cb_builder,
scrolling_frame_builder,
setup_ui_headers,
str_builder,
)
from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items
from omni.kit.viewport.utility import get_active_viewport, get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from pxr import Gf
ext_instance = None
class RLExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
self._render_modes = ["Full render", "UI only", "None"]
self._env = None
self._task = None
self._ext_id = ext_id
ext_manager = omni.kit.app.get_app().get_extension_manager()
extension_path = ext_manager.get_extension_path(ext_id)
self._ext_path = os.path.dirname(extension_path) if os.path.isfile(extension_path) else extension_path
self._ext_file_path = os.path.abspath(__file__)
self._initialize_task_list()
self.start_extension(
"",
"",
"RL Examples",
"RL Examples",
"",
"A set of reinforcement learning examples.",
self._ext_file_path,
)
self._task_initialized = False
self._task_changed = False
self._is_training = False
self._render = True
self._resume = False
self._test = False
self._evaluate = False
self._checkpoint_path = ""
self._timeline = omni.timeline.get_timeline_interface()
self._viewport = get_active_viewport()
self._viewport.updates_enabled = True
global ext_instance
ext_instance = self
def _initialize_task_list(self):
self._task_map, _ = import_tasks()
self._task_list = list(self._task_map.keys())
self._task_list.sort()
self._task_list.remove("CartpoleCamera") # we cannot run camera-based training from extension workflow for now. it requires a specialized app file.
self._task_name = self._task_list[0]
self._parse_config(self._task_name)
self._update_task_file_paths(self._task_name)
def _update_task_file_paths(self, task):
self._task_file_path = os.path.abspath(inspect.getfile(self._task_map[task]))
self._task_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/task/{task}.yaml")
self._train_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/train/{task}PPO.yaml")
def _parse_config(self, task, num_envs=None, overrides=None):
hydra.core.global_hydra.GlobalHydra.instance().clear()
initialize(version_base=None, config_path="cfg")
overrides_list = [f"task={task}"]
if overrides is not None:
overrides_list += overrides
if num_envs is None:
self._cfg = compose(config_name="config", overrides=overrides_list)
else:
self._cfg = compose(config_name="config", overrides=overrides_list + [f"num_envs={num_envs}"])
self._cfg_dict = omegaconf_to_dict(self._cfg)
self._sim_config = SimConfig(self._cfg_dict)
def start_extension(
self,
menu_name: str,
submenu_name: str,
name: str,
title: str,
doc_link: str,
overview: str,
file_path: str,
number_of_extra_frames=1,
window_width=550,
keep_window_open=False,
):
window = ui.Workspace.get_window("Property")
if window:
window.visible = False
window = ui.Workspace.get_window("Render Settings")
if window:
window.visible = False
menu_items = [make_menu_item_description(self._ext_id, name, lambda a=weakref.proxy(self): a._menu_callback())]
if menu_name == "" or menu_name is None:
self._menu_items = menu_items
elif submenu_name == "" or submenu_name is None:
self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)]
else:
self._menu_items = [
MenuItemDescription(
name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)]
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._task_dropdown = None
self._cbs = None
self._build_ui(
name=name,
title=title,
doc_link=doc_link,
overview=overview,
file_path=file_path,
number_of_extra_frames=number_of_extra_frames,
window_width=window_width,
keep_window_open=keep_window_open,
)
return
def _build_ui(
self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width, keep_window_open
):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=keep_window_open, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
self._main_stack = ui.VStack(spacing=5, height=0)
with self._main_stack:
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="World Controls",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
with ui.HStack(style=get_style()):
with ui.VStack(style=get_style(), width=ui.Fraction(20)):
dict = {
"label": "Select Task",
"type": "dropdown",
"default_val": 0,
"items": self._task_list,
"tooltip": "Select a task",
"on_clicked_fn": self._on_task_select,
}
self._task_dropdown = dropdown_builder(**dict)
with ui.Frame(tooltip="Open Source Code"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
with ui.Frame(tooltip="Open Task Config"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_cfg_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
with ui.Frame(tooltip="Open Training Config"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._train_cfg_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
dict = {
"label": "Number of environments",
"tooltip": "Enter the number of environments to construct",
"min": 0,
"max": 8192,
"default_val": self._cfg.task.env.numEnvs,
}
self._num_envs_int = int_builder(**dict)
dict = {
"label": "Load Environment",
"type": "button",
"text": "Load",
"tooltip": "Load Environment and Task",
"on_clicked_fn": self._on_load_world,
}
self._load_env_button = btn_builder(**dict)
dict = {
"label": "Rendering Mode",
"type": "dropdown",
"default_val": 0,
"items": self._render_modes,
"tooltip": "Select a rendering mode",
"on_clicked_fn": self._on_render_mode_select,
}
self._render_dropdown = dropdown_builder(**dict)
dict = {
"label": "Configure Training",
"count": 3,
"text": ["Resume from Checkpoint", "Test", "Evaluate"],
"default_val": [False, False, False],
"tooltip": [
"",
"Resume training from checkpoint",
"Play a trained policy",
"Evaluate a policy during training",
],
"on_clicked_fn": [
self._on_resume_cb_update,
self._on_test_cb_update,
self._on_evaluate_cb_update,
],
}
self._cbs = multi_cb_builder(**dict)
dict = {
"label": "Load Checkpoint",
"tooltip": "Enter path to checkpoint file",
"on_clicked_fn": self._on_checkpoint_update,
}
self._checkpoint_str = str_builder(**dict)
dict = {
"label": "Train/Test",
"count": 2,
"text": ["Start", "Stop"],
"tooltip": [
"",
"Launch new training/inference run",
"Terminate current training/inference run",
],
"on_clicked_fn": [self._on_train, self._on_train_stop],
}
self._buttons = multi_btn_builder(**dict)
return
def create_task(self):
headless = self._cfg.headless
enable_viewport = "enable_cameras" in self._cfg.task.sim and self._cfg.task.sim.enable_cameras
self._env = VecEnvRLGamesMT(
headless=headless,
sim_device=self._cfg.device_id,
enable_livestream=self._cfg.enable_livestream,
enable_viewport=enable_viewport or self._cfg.enable_recording,
launch_simulation_app=False,
)
# parse experiment directory
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
experiment_dir = os.path.join(module_path, "runs", self._cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if self._cfg.enable_recording:
if self._cfg.recording_dir == '':
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = self._cfg.recording_dir
video_interval = lambda step: step % self._cfg.recording_interval == 0
video_length = self._cfg.recording_length
self._env.is_vector_env = True
if self._env.metadata is None:
self._env.metadata = {"render_modes": ["rgb_array"], "render_fps": self._cfg.recording_fps}
else:
self._env.metadata["render_modes"] = ["rgb_array"]
self._env.metadata["render_fps"] = self._cfg.recording_fps
self._env = gym.wrappers.RecordVideo(
self._env, video_folder=videos_dir, step_trigger=video_interval, video_length=video_length
)
self._task = initialize_task(self._cfg_dict, self._env, init_sim=False)
self._task_initialized = True
self._task.set_is_extension(True)
def _on_task_select(self, value):
if self._task_initialized and value != self._task_name:
self._task_changed = True
self._task_initialized = False
self._task_name = value
self._parse_config(self._task_name)
self._num_envs_int.set_value(self._cfg.task.env.numEnvs)
self._update_task_file_paths(self._task_name)
def _on_render_mode_select(self, value):
if value == self._render_modes[0]:
self._viewport.updates_enabled = True
window = ui.Workspace.get_window("Viewport")
window.visible = True
if self._env:
self._env.update_viewport = True
self._env.set_render_mode(0)
elif value == self._render_modes[1]:
self._viewport.updates_enabled = False
window = ui.Workspace.get_window("Viewport")
window.visible = False
if self._env:
self._env.update_viewport = False
self._env.set_render_mode(1)
elif value == self._render_modes[2]:
self._viewport.updates_enabled = False
window = ui.Workspace.get_window("Viewport")
window.visible = False
if self._env:
self._env.update_viewport = False
self._env.set_render_mode(2)
def _on_render_cb_update(self, value):
self._render = value
print("updates enabled", value)
self._viewport.updates_enabled = value
if self._env:
self._env.update_viewport = value
if value:
window = ui.Workspace.get_window("Viewport")
window.visible = True
else:
window = ui.Workspace.get_window("Viewport")
window.visible = False
def _on_single_env_cb_update(self, value):
visibility = "invisible" if value else "inherited"
stage = omni.usd.get_context().get_stage()
env_root = stage.GetPrimAtPath("/World/envs")
if env_root.IsValid():
for i, p in enumerate(env_root.GetChildren()):
p.GetAttribute("visibility").Set(visibility)
if value:
stage.GetPrimAtPath("/World/envs/env_0").GetAttribute("visibility").Set("inherited")
env_pos = self._task._env_pos[0].cpu().numpy().tolist()
camera_pos = [env_pos[0] + 10, env_pos[1] + 10, 3]
camera_target = [env_pos[0], env_pos[1], env_pos[2]]
else:
camera_pos = [10, 10, 3]
camera_target = [0, 0, 0]
camera_state = ViewportCameraState("/OmniverseKit_Persp", get_active_viewport())
camera_state.set_position_world(Gf.Vec3d(*camera_pos), True)
camera_state.set_target_world(Gf.Vec3d(*camera_target), True)
def _on_test_cb_update(self, value):
self._test = value
if value is True and self._checkpoint_path.strip() == "":
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
self._checkpoint_str.set_value(os.path.join(module_path, f"runs/{self._task_name}/nn/{self._task_name}.pth"))
def _on_resume_cb_update(self, value):
self._resume = value
if value is True and self._checkpoint_path.strip() == "":
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
self._checkpoint_str.set_value(os.path.join(module_path, f"runs/{self._task_name}/nn/{self._task_name}.pth"))
def _on_evaluate_cb_update(self, value):
self._evaluate = value
def _on_checkpoint_update(self, value):
self._checkpoint_path = value.get_value_as_string()
async def _on_load_world_async(self, use_existing_stage):
# initialize task if not initialized
if not self._task_initialized or not omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid():
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int())
self.create_task()
else:
# update config
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int())
self._task.update_config(self._sim_config)
# clear scene
# self._env.world.scene.clear()
self._env.world._sim_params = self._sim_config.get_physics_params()
await self._env.world.initialize_simulation_context_async()
set_camera_view(eye=[10, 10, 3], target=[0, 0, 0], camera_prim_path="/OmniverseKit_Persp")
if not use_existing_stage:
# clear scene
self._env.world.scene.clear()
# clear environments added to world
omni.usd.get_context().get_stage().RemovePrim("/World/collisions")
omni.usd.get_context().get_stage().RemovePrim("/World/envs")
# create scene
await self._env.world.reset_async_set_up_scene()
# update num_envs in envs
self._env.update_task_params()
else:
self._task.initialize_views(self._env.world.scene)
def _on_load_world(self):
# stop simulation before updating stage
self._timeline.stop()
asyncio.ensure_future(self._on_load_world_async(use_existing_stage=False))
def _on_train_stop(self):
if self._task_initialized:
asyncio.ensure_future(self._env.world.stop_async())
async def _on_train_async(self, overrides=None):
try:
# initialize task if not initialized
print("task initialized:", self._task_initialized)
if not self._task_initialized:
# if this is the first launch of the extension, we do not want to re-create stage if stage already exists
use_existing_stage = False
if omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid():
use_existing_stage = True
print(use_existing_stage)
await self._on_load_world_async(use_existing_stage)
# update config
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int(), overrides=overrides)
sim_config = SimConfig(self._cfg_dict)
self._task.update_config(sim_config)
cfg_dict = omegaconf_to_dict(self._cfg)
# sets seed. if seed is -1 will pick a random one
self._cfg.seed = set_seed(self._cfg.seed, torch_deterministic=self._cfg.torch_deterministic)
cfg_dict["seed"] = self._cfg.seed
self._checkpoint_path = self._checkpoint_str.get_value_as_string()
if self._resume or self._test:
self._cfg.checkpoint = self._checkpoint_path
self._cfg.test = self._test
self._cfg.evaluation = self._evaluate
cfg_dict["checkpoint"] = self._cfg.checkpoint
cfg_dict["test"] = self._cfg.test
cfg_dict["evaluation"] = self._cfg.evaluation
rlg_trainer = RLGTrainer(self._cfg, cfg_dict)
if not rlg_trainer._bad_checkpoint:
trainer = Trainer(rlg_trainer, self._env)
await self._env.world.reset_async_no_set_up_scene()
# this is needed to enable rendering for viewport recording
for _ in range(5):
await self._env.world.render_async()
self._env.set_render_mode(self._render_dropdown.get_item_value_model().as_int)
await self._env.run(trainer)
await omni.kit.app.get_app().next_update_async()
except Exception as e:
print(traceback.format_exc())
finally:
self._is_training = False
if self._task._dr_randomizer.randomize:
await self._task._dr_randomizer.rep.orchestrator.stop_async()
self._task._dr_randomizer.rep.orchestrator._orchestrator.shutdown()
def _on_train(self):
# stop simulation if still running
self._timeline.stop()
self._on_render_mode_select(self._render_modes[self._render_dropdown.get_item_value_model().as_int])
if not self._is_training:
self._is_training = True
asyncio.ensure_future(self._on_train_async())
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def _on_window(self, status):
return
def on_shutdown(self):
self._extra_frames = []
if self._menu_items is not None:
self._sample_window_cleanup()
self.shutdown_cleanup()
global ext_instance
ext_instance = None
return
def shutdown_cleanup(self):
return
def _sample_window_cleanup(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
self._menu_items = None
self._buttons = None
self._load_env_button = None
self._task_dropdown = None
self._cbs = None
self._checkpoint_str = None
return
def get_instance():
return ext_instance
| 24,151 | Python | 43.234432 | 155 | 0.539646 |
elharirymatteo/RANS/omniisaacgymenvs/__init__.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
try:
from .extension import RLExtension, get_instance
# import omniisaacgymenvs.tests
except Exception as e:
pass
# print(e)
# print(traceback.format_exc())
| 1,753 | Python | 46.405404 | 80 | 0.775242 |
elharirymatteo/RANS/omniisaacgymenvs/envs/vec_env_rlgames_mt.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.gym.vec_env import TaskStopException, VecEnvMT
from .vec_env_rlgames import VecEnvRLGames
# VecEnv Wrapper for RL training
class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT):
def _parse_data(self, data):
self._obs = data["obs"]
self._rew = data["rew"].to(self._task.rl_device)
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._resets = data["reset"].to(self._task.rl_device)
self._extras = data["extras"]
def step(self, actions):
if self._stop:
raise TaskStopException()
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device)
self.send_actions(actions)
data = self.get_data()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf
)
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
obs_dict = {}
obs_dict["obs"] = self._obs
obs_dict["states"] = self._states
return obs_dict, self._rew, self._resets, self._extras
| 3,109 | Python | 42.194444 | 118 | 0.705693 |
elharirymatteo/RANS/omniisaacgymenvs/envs/vec_env_rlgames.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import numpy as np
import torch
from omni.isaac.gym.vec_env import VecEnvBase
# VecEnv Wrapper for RL training
class VecEnvRLGames(VecEnvBase):
def _process_data(self):
if type(self._obs) is dict:
if type(self._task.clip_obs) is dict:
for k, v in self._obs.items():
if k in self._task.clip_obs.keys():
self._obs[k] = v.float() / 255.0
self._obs[k] = torch.clamp(
v, -self._task.clip_obs[k], self._task.clip_obs[k]
).to(self._task.rl_device)
else:
self._obs[k] = v
else:
self._obs = torch.clamp(
self._obs, -self._task.clip_obs, self._task.clip_obs
).to(self._task.rl_device)
self._states = torch.clamp(
self._states, -self._task.clip_obs, self._task.clip_obs
).to(self._task.rl_device)
self._rew = self._rew.to(self._task.rl_device)
self._resets = self._resets.to(self._task.rl_device)
self._extras = self._extras
def set_task(
self,
task,
backend="numpy",
sim_params=None,
init_sim=True,
rendering_dt=1.0 / 60.0,
) -> None:
super().set_task(task, backend, sim_params, init_sim, rendering_dt)
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
# only enable rendering when we are recording, or if the task already has it enabled
to_render = self._render
if self._record:
if not hasattr(self, "step_count"):
self.step_count = 0
if self.step_count % self._task.cfg["recording_interval"] == 0:
self.is_recording = True
self.record_length = 0
if self.is_recording:
self.record_length += 1
if self.record_length > self._task.cfg["recording_length"]:
self.is_recording = False
if self.is_recording:
to_render = True
else:
if (
self._task.cfg["headless"]
and not self._task.enable_cameras
and not self._task.cfg["enable_livestream"]
):
to_render = False
self.step_count += 1
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(
actions, -self._task.clip_actions, self._task.clip_actions
).to(self._task.device)
self._task.pre_physics_step(actions)
if (
self.sim_frame_count + self._task.control_frequency_inv
) % self._task.rendering_interval == 0:
for _ in range(self._task.control_frequency_inv - 1):
self._world.step(render=False)
self.sim_frame_count += 1
self._world.step(render=to_render)
self.sim_frame_count += 1
else:
for _ in range(self._task.control_frequency_inv):
self._world.step(render=False)
self.sim_frame_count += 1
self._obs, self._rew, self._resets, self._extras = (
self._task.post_physics_step()
)
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(device=self._task.rl_device),
reset_buf=self._task.reset_buf,
)
self._states = self._task.get_states()
self._process_data()
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
def reset(self, seed=None, options=None):
"""Resets the task and applies default zero actions to recompute observations and states."""
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] Running RL reset")
self._task.reset()
actions = torch.zeros(
(self.num_envs, self._task.num_actions), device=self._task.rl_device
)
obs_dict, _, _, _ = self.step(actions)
return obs_dict
| 6,014 | Python | 38.572368 | 100 | 0.594779 |
elharirymatteo/RANS/omniisaacgymenvs/envs/vec_env_rlgames_mfp.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.gym.vec_env import VecEnvBase
from datetime import datetime
import numpy as np
import torch
# VecEnv Wrapper for RL training
class VecEnvRLGames(VecEnvBase):
def _process_data(self):
if type(self._obs) is dict:
if type(self._task.clip_obs) is dict:
for k, v in self._obs.items():
if k in self._task.clip_obs.keys():
self._obs[k] = v.float() / 255.0
self._obs[k] = torch.clamp(
v, -self._task.clip_obs[k], self._task.clip_obs[k]
).to(self._task.rl_device)
else:
self._obs[k] = v
else:
self._obs = torch.clamp(
self._obs, -self._task.clip_obs, self._task.clip_obs
).to(self._task.rl_device)
self._states = torch.clamp(
self._states, -self._task.clip_obs, self._task.clip_obs
).to(self._task.rl_device)
self._rew = self._rew.to(self._task.rl_device)
self._resets = self._resets.to(self._task.rl_device)
self._extras = self._extras
def set_task(
self,
task,
backend="numpy",
sim_params=None,
init_sim=True,
rendering_dt=1.0 / 60.0,
) -> None:
super().set_task(task, backend, sim_params, init_sim, rendering_dt)
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
# only enable rendering when we are recording, or if the task already has it enabled
to_render = self._render
if self._record:
if not hasattr(self, "step_count"):
self.step_count = 0
if self.step_count % self._task.cfg["recording_interval"] == 0:
self.is_recording = True
self.record_length = 0
if self.is_recording:
self.record_length += 1
if self.record_length > self._task.cfg["recording_length"]:
self.is_recording = False
if self.is_recording:
to_render = True
else:
if (
self._task.cfg["headless"]
and not self._task.enable_cameras
and not self._task.cfg["enable_livestream"]
):
to_render = False
self.step_count += 1
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(
actions, -self._task.clip_actions, self._task.clip_actions
).to(self._task.device)
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv - 1):
self._task.apply_forces()
self._world.step(render=False)
self._task.update_state()
self.sim_frame_count += 1
self._task.apply_forces()
if (
self.sim_frame_count + self._task.control_frequency_inv
) % self._task.rendering_interval == 0:
self._world.step(render=to_render)
else:
self._world.step(render=False)
self._task.update_state()
self.sim_frame_count += 1
(
self._obs,
self._rew,
self._resets,
self._extras,
) = self._task.post_physics_step()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(device=self._task.rl_device),
reset_buf=self._task.reset_buf,
)
self._states = self._task.get_states()
self._process_data()
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
def reset(self):
"""Resets the task and applies default zero actions to recompute observations and states."""
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] Running RL reset")
self._task.reset()
actions = torch.zeros(
(self.num_envs, self._task.num_actions), device=self._task.rl_device
)
obs_dict, _, _, _ = self.step(actions)
return obs_dict
| 6,045 | Python | 37.75641 | 100 | 0.59206 |
elharirymatteo/RANS/omniisaacgymenvs/ros/ros_node.py | from typing import Callable, NamedTuple, Optional, Union, List, Dict
from collections import deque
import numpy as np
import datetime
import torch
import os
import rospy
from std_msgs.msg import ByteMultiArray
from geometry_msgs.msg import PoseStamped, Point, Pose
from omniisaacgymenvs.ros.ros_utills import derive_velocities
from omniisaacgymenvs.mujoco_envs.controllers.hl_controllers import (
PoseController,
PositionController,
VelocityTracker,
DockController,
)
from omniisaacgymenvs.mujoco_envs.environments.disturbances import (
RandomKillThrusters,
Disturbances,
)
class RLPlayerNode:
def __init__(
self,
hl_controller: Union[PositionController, PoseController, VelocityTracker, DockController],
cfg: dict,
map: List[int] = [2, 5, 4, 7, 6, 1, 0, 3],
debug: bool = False,
) -> None:
"""
Args:
hl_controller (Union[PositionController, PoseController, VelocityTracker]): The high-level controller.
map (List[int], optional): The map of the thrusters. Defaults to [2, 5, 4, 7, 6, 1, 0, 3].
platform (Dict[str, Union[bool, dict, float, str, int]], optional): The platform configuration. Defaults to None.
disturbances (Dict[str, Union[bool, float]], optional): The disturbances. Defaults to None.
"""
platform = cfg["task"]["env"]["platform"]
disturbances = cfg["task"]["env"]["disturbances"]
self.play_rate = 1 / (
cfg["task"]["env"]["controlFrequencyInv"] * cfg["task"]["sim"]["dt"]
)
self.run_time = cfg["task"]["env"]["maxEpisodeLength"] / self.play_rate
self.DR = Disturbances(disturbances, platform["seed"])
self.TK = RandomKillThrusters(
{
"num_thrusters_to_kill": platform["randomization"]["max_thruster_kill"]
* platform["randomization"]["kill_thrusters"],
"seed": platform["seed"],
}
)
# Initialize variables
self.buffer_size = 30 # Number of samples for differentiation
self.pose_buffer = deque(maxlen=self.buffer_size)
self.time_buffer = deque(maxlen=self.buffer_size)
self.debug = debug
self.map = map
self.hl_controller = hl_controller
self.reset()
# Initialize Subscriber and Publisher
self.pose_sub = rospy.Subscriber(
"/vrpn_client_node/FP_exp_RL/pose", PoseStamped, self.pose_callback
)
self.goal_sub = rospy.Subscriber(
"/spacer_floating_platform/goal", Point, self.goal_callback
)
self.action_pub = rospy.Publisher(
"/spacer_floating_platform/valves/input", ByteMultiArray, queue_size=1
)
# Initialize ROS message for thrusters
self.thruster_msg = ByteMultiArray()
rospy.on_shutdown(self.shutdown)
def getObs(self) -> Dict[str, np.ndarray]:
"""
returns an up to date observation buffer.
Returns:
Dict[str, np.ndarray]: A dictionary containing the state of the simulation.
"""
state = {}
state["angular_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.ang_vel
)
state["linear_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.lin_vel
)
state["position"] = self.DR.noisy_observations.add_noise_on_pos(self.pos)
state["quaternion"] = self.quat
return state
def reset(self) -> None:
"""
Resets the goal and the buffers."""
self.ready = False
self.hl_controller.initializeLoggers()
self.hl_controller.time = 0
self.count = 0
def shutdown(self) -> None:
"""
Shutdown the node and kills the thrusters while leaving the air-bearing on."""
self.thruster_msg.data = [1, 0, 0, 0, 0, 0, 0, 0, 0]
self.action_pub.publish(self.thruster_msg)
rospy.sleep(1)
self.thruster_msg.data = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.action_pub.publish(self.thruster_msg)
def remap_actions(self, actions: torch.Tensor) -> List[float]:
"""
Remaps the actions from the RL algorithm to the thrusters of the platform.
Args:
actions (torch.Tensor): The actions from the RL algorithm.
Returns:
List[float]: The actions for the thrusters."""
return [actions[i] for i in self.map]
def pose_callback(self, msg: Pose) -> None:
"""
Callback for the pose topic. It updates the state of the agent.
Args:
msg (Pose): The pose message."""
# current_time = rospy.Time.now()
current_time = msg.header.stamp
# Add current pose and time to the buffer
self.pose_buffer.append(msg)
self.time_buffer.append(current_time)
# Calculate velocities if buffer is filled
if len(self.pose_buffer) == self.buffer_size:
self.get_state_from_optitrack(msg)
self.ready = True
def get_state_from_optitrack(self, msg: Pose) -> None:
"""
Converts a ROS message to an observation.
Args:
msg (Pose): The pose message."""
pos = msg.pose.position
quat = msg.pose.orientation
self.pos = [pos.x, pos.y, pos.z]
self.quat = [quat.w, quat.x, quat.y, quat.z]
self.lin_vel, self.ang_vel = derive_velocities(
self.time_buffer, self.pose_buffer
)
def goal_callback(self, msg: Point) -> None:
"""
Callback for the goal topic. It updates the task data with the new goal data.
Args:
msg (Point): The goal message."""
self.hl_controller.setGoal(np.array([msg.x, msg.y, msg.z]))
def get_action(self, run_time: float, lifting_active: int = 1) -> None:
"""
Gets the action from the RL algorithm and publishes it to the thrusters.
Args:
lifting_active (int, optional): Whether or not the lifting thruster is active. Defaults to 1.
"""
self.state = self.getObs()
self.action = self.hl_controller.getAction(self.state, time=run_time)
# self.action = self.action * self.thruster_mask
action = self.remap_actions(self.action)
lifting_active = 1
action.insert(0, lifting_active)
self.thruster_msg.data = action
# self.action_pub.publish(self.thruster_msg)
def print_logs(self) -> None:
"""
Prints the logs."""
print("=========================================")
for key, value in self.hl_controller.logs.items():
print(f"{key}: {value[-1]}")
def run(self) -> None:
"""
Runs the RL algorithm."""
self.update_once = True
self.rate = rospy.Rate(self.play_rate)
start_time = rospy.Time.now()
run_time = rospy.Time.now() - start_time
while (not rospy.is_shutdown()) and (run_time.to_sec() < self.run_time):
if self.ready:
self.get_action(run_time.to_sec())
self.count += 1
if self.debug:
self.print_logs()
run_time = rospy.Time.now() - start_time
self.rate.sleep()
# Kills the thrusters once done
self.shutdown()
| 7,395 | Python | 33.082949 | 125 | 0.588235 |
elharirymatteo/RANS/omniisaacgymenvs/ros/run_ros_from_Isaac.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omni.isaac.kit import SimulationApp
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omegaconf import DictConfig, OmegaConf
import hydra
import os
from omniisaacgymenvs.mujoco_envs.controllers.discrete_LQR_controller import (
DiscreteController,
parseControllerConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import (
RLGamesModel,
)
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import (
MuJoCoFloatingPlatform,
parseEnvironmentConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.hl_controllers import hlControllerFactory
from omniisaacgymenvs.ros.ros_utills import enable_ros_extension
@hydra.main(config_name="config_mujoco", config_path="../cfg")
def run(cfg: DictConfig):
""" "
Run the simulation.
Args:
cfg (DictConfig): A dictionary containing the configuration of the simulation.
"""
# print_dict(cfg)
cfg_dict = omegaconf_to_dict(cfg)
simulation_app = SimulationApp({"headless": True})
enable_ros_extension()
from omniisaacgymenvs.ros.ros_node import RLPlayerNode
import rospy
rospy.init_node("RL_player")
# Create the environment
env = MuJoCoFloatingPlatform(**parseEnvironmentConfig(cfg_dict))
# Get the low-level controller
if cfg_dict["use_rl"]:
assert os.path.exists(
cfg_dict["checkpoint"]
), "A correct path to a neural network must be provided to infer an RL agent."
ll_controller = RLGamesModel(
config=cfg_dict["train"], model_path=cfg_dict["checkpoint"]
)
else:
ll_controller = DiscreteController(**parseControllerConfig(cfg_dict, env))
dt = cfg_dict["task"]["sim"]["dt"]
# Get the high-level controller
hl_controller = hlControllerFactory(cfg_dict, ll_controller, dt)
node = RLPlayerNode(
hl_controller,
cfg=cfg_dict,
debug=True,
)
# Run the node.
node.run()
hl_controller.saveSimulationData()
hl_controller.plotSimulation()
# Close the simulationApp.
simulation_app.close()
if __name__ == "__main__":
# Initialize ROS node
run()
| 2,546 | Python | 27.943182 | 87 | 0.694423 |
elharirymatteo/RANS/omniisaacgymenvs/ros/ros_utills.py | import numpy as np
import os
from typing import List, Tuple
#from geometry_msgs.msg import Pose
#import rospy
def enable_ros_extension(env_var:str = "ROS_DISTRO"):
"""
Enable the ROS extension.
Args:
env_var (str): The environment variable that contains the ROS distro."""
import omni.ext
ROS_DISTRO: str = os.environ.get(env_var, "noetic")
assert ROS_DISTRO in [
"noetic",
"foxy",
"humble",
], f"${env_var} must be one of [noetic, foxy, humble]"
# Get the extension manager and list of available extensions
extension_manager = omni.kit.app.get_app().get_extension_manager()
extensions = extension_manager.get_extensions()
# Determine the selected ROS extension id
if ROS_DISTRO == "noetic":
ros_extension = [ext for ext in extensions if "ros_bridge" in ext["id"]][0]
elif ROS_DISTRO in "humble":
ros_extension = [
ext
for ext in extensions
if "ros2_bridge" in ext["id"] and "humble" in ext["id"]
][0]
elif ROS_DISTRO == "foxy":
ros_extension = [ext for ext in extensions if "ros2_bridge" in ext["id"]][0]
# Load the ROS extension if it is not already loaded
if not extension_manager.is_extension_enabled(ros_extension["id"]):
extension_manager.set_extension_enabled_immediate(ros_extension["id"], True)
def angular_velocities(q:np.ndarray, dt:np.ndarray, N:int=1) -> np.ndarray:
"""
Calculate the angular velocities from the quaternions.
Args:
q (np.ndarray): The quaternions.
dt (np.ndarray): The time difference between each quaternion.
Returns:
np.ndarray: The angular velocities."""
q = q[0::N]
return (2 / dt) * np.array([
q[:-1,0]*q[1:,1] - q[:-1,1]*q[1:,0] - q[:-1,2]*q[1:,3] + q[:-1,3]*q[1:,2],
q[:-1,0]*q[1:,2] + q[:-1,1]*q[1:,3] - q[:-1,2]*q[1:,0] - q[:-1,3]*q[1:,1],
q[:-1,0]*q[1:,3] - q[:-1,1]*q[1:,2] + q[:-1,2]*q[1:,1] - q[:-1,3]*q[1:,0]])
def derive_velocities(time_buffer:list, pose_buffer: list) -> Tuple[np.ndarray, np.ndarray]:
"""
Derive the velocities from the pose and time buffers.
Args:
time_buffer (List[rospy.Time]): The time buffer.
pose_buffer (List[Pose]): The pose buffer.
Returns:
Tuple(np.ndarray, np.ndarray): The linear and angular velocities."""
dt = (time_buffer[-1] - time_buffer[0]).to_sec() # Time difference between first and last pose
# Calculate linear velocities
linear_positions = np.array([[pose.pose.position.x, pose.pose.position.y, pose.pose.position.z] for pose in pose_buffer])
linear_velocities = np.diff(linear_positions, axis=0) / (dt/len(time_buffer))
average_linear_velocity = np.mean(linear_velocities, axis=0)
# Calculate angular velocities
angular_orientations = np.array([[pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z] for pose in pose_buffer])
dt_buff = np.ones((angular_orientations.shape[0] - 1)) * dt / (angular_orientations.shape[0] - 1)
ang_vel = angular_velocities(angular_orientations, dt_buff)
average_angular_velocity = np.mean(ang_vel, axis=1)
return average_linear_velocity, average_angular_velocity | 3,312 | Python | 37.97647 | 163 | 0.626208 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/allegro_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.allegro_hand import AllegroHand
from omniisaacgymenvs.robots.articulations.views.allegro_hand_view import AllegroHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class AllegroHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full"]):
raise Exception("Unknown type of observations!\nobservationType should be one of: [full_no_vel, full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full_no_vel": 50,
"full": 72,
}
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 16
self._num_states = 0
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782], device=self.device)
self.pose_dy, self.pose_dz = -0.2, 0.06
def get_hand(self):
allegro_hand = AllegroHand(
prim_path=self.default_zero_env_path + "/allegro_hand",
name="allegro_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"allegro_hand",
get_prim_at_path(allegro_hand.prim_path),
self._sim_config.parse_actor_config("allegro_hand"),
)
allegro_hand_prim = self._stage.GetPrimAtPath(allegro_hand.prim_path)
allegro_hand.set_allegro_hand_properties(stage=self._stage, allegro_hand_prim=allegro_hand_prim)
allegro_hand.set_motor_control_mode(
stage=self._stage, allegro_hand_path=self.default_zero_env_path + "/allegro_hand"
)
def get_hand_view(self, scene):
return AllegroHandView(prim_paths_expr="/World/envs/.*/allegro_hand", name="allegro_hand_view")
def get_observations(self):
self.get_object_goal_observations()
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 16:19] = self.object_pos
self.obs_buf[:, 19:23] = self.object_rot
self.obs_buf[:, 23:26] = self.goal_pos
self.obs_buf[:, 26:30] = self.goal_rot
self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 34:50] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 32:35] = self.object_pos
self.obs_buf[:, 35:39] = self.object_rot
self.obs_buf[:, 39:42] = self.object_linvel
self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 45:48] = self.goal_pos
self.obs_buf[:, 48:52] = self.goal_rot
self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 56:72] = self.actions
| 6,329 | Python | 42.655172 | 115 | 0.658872 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP2D_Virtual_Dock_RGBD.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP2D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.sensors.exteroceptive.camera import (
camera_factory,
)
from omniisaacgymenvs.robots.articulations.views.MFP2D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances,
)
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import omni
import time
import math
import torch
from torchvision.utils import make_grid
from torchvision.transforms.functional import to_pil_image as ToPILImage
import os
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP2DVirtual_Dock_RGBD(RLTask):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future."""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._platform_cfg = self._task_cfg["env"]["platform"]
self._dock_cfg = self._task_cfg["env"]["dock"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.step = 0
self.iteration = 0
self.save_image_counter = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0, 0, 0.45])
self._dock_view = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces."""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 5)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
"rgb": spaces.Box(
np.ones(
(
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* -np.Inf,
np.ones(
(
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* np.Inf,
),
"depth": spaces.Box(
np.ones(
(
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* -np.Inf,
np.ones(
(
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* np.Inf,
),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def add_stats(self, names: List[str]) -> None:
"""
Adds training statistics to be recorded during training.
Args:
names (List[str]): list of names of the statistics to be recorded."""
for name in names:
torch_zeros = lambda: torch.zeros(
self._num_envs,
dtype=torch.float,
device=self._device,
requires_grad=False,
)
if not name in self.episode_sums.keys():
self.episode_sums[name] = torch_zeros()
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 5),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 3),
device=self._device,
dtype=torch.float,
),
"rgb": torch.zeros(
(
self._num_envs,
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
),
device=self._device,
dtype=torch.float,
),
"depth": torch.zeros(
(
self._num_envs,
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
self.extras_wandb = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): the USD scene to be set up."""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
if self._task_cfg["sim"].get("add_lab", False):
self.get_zero_g_lab()
RLTask.set_up_scene(self, scene, replicate_physics=False)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path,
name="modular_floating_platform_view",
track_contact_force=True,
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add rigidprim view of docking station to the scene
scene, self._dock_view = self.task.add_dock_to_scene(scene)
# Link replicator to existing onboard cameras
self.collect_camera()
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene."""
fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def get_target(self) -> None:
"""
Adds the visualization target to the scene."""
self.task.generate_target(
self.default_zero_env_path,
self._default_marker_position,
self._dock_cfg,
)
def get_zero_g_lab(self) -> None:
"""
Adds the Zero-G-lab to the scene."""
usd_path = os.path.join(os.getcwd(), self._task_cfg["lab_usd_path"])
prim = add_reference_to_stage(usd_path, self._task_cfg["lab_path"])
applyCollider(prim, True)
def collect_camera(self) -> None:
"""
Collect active cameras to generate synthetic images in batch."""
active_sensors = []
active_camera_source_path = self._task_cfg["env"]["sensors"]["RLCamera"][
"prim_path"
]
for i in range(self._num_envs):
# swap env_0 to env_i
sensor_path = active_camera_source_path.split("/")
sensor_path[3] = f"env_{i}"
self._task_cfg["env"]["sensors"]["RLCamera"]["prim_path"] = "/".join(
sensor_path
)
rl_sensor = camera_factory.get("RLCamera")(
self._task_cfg["env"]["sensors"]["RLCamera"],
self.rep,
)
active_sensors.append(rl_sensor)
self.active_sensors = active_sensors
def update_state(self) -> None:
"""
Updates the state of the system."""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.base.get_world_poses(
clone=True
)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.base.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Cast quaternion to Yaw
siny_cosp = 2 * (
self.root_quats[:, 0] * self.root_quats[:, 3]
+ self.root_quats[:, 1] * self.root_quats[:, 2]
)
cosy_cosp = 1 - 2 * (
self.root_quats[:, 2] * self.root_quats[:, 2]
+ self.root_quats[:, 3] * self.root_quats[:, 3]
)
orient_z = torch.arctan2(siny_cosp, cosy_cosp)
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
orient_z = self.DR.noisy_observations.add_noise_on_heading(
orient_z, step=self.step
)
# Compute the heading
self.heading[:, 0] = torch.cos(orient_z)
self.heading[:, 1] = torch.sin(orient_z)
# Update goal pose
self.update_goal_state()
# Update FP contact state
net_contact_forces = self.compute_contact_forces()
# Dump to state
self.current_state = {
"position": root_positions[:, :2],
"orientation": self.heading,
"linear_velocity": root_velocities[:, :2],
"angular_velocity": root_velocities[:, -1],
"net_contact_forces": net_contact_forces,
}
def update_goal_state(self) -> None:
"""
Updates the goal state of the task."""
target_positions, target_orientations = self._dock_view.base.get_world_poses(
clone=True
)
self.task.set_goals(
self.all_indices.long(),
target_positions - self._env_pos,
target_orientations,
self.step,
)
def compute_contact_forces(self) -> torch.Tensor:
"""
Get the contact forces of the platform.
Returns:
net_contact_forces_norm (torch.Tensor): the norm of the net contact forces.
"""
net_contact_forces = self._platforms.base.get_net_contact_forces(clone=False)
return torch.norm(net_contact_forces, dim=-1)
def get_observations(self) -> Dict[str, torch.Tensor]:
"""
Gets the observations of the task to be passed to the policy.
Returns:
observations: a dictionary containing the observations of the task."""
# implement logic to retrieve observation states
self.update_state()
# Get the state
self.obs_buf["state"] = self.task.get_state_observations(self.current_state)
# Get thruster transforms
self.obs_buf["transforms"] = self.virtual_platform.current_transforms
# Get the action masks
self.obs_buf["masks"] = self.virtual_platform.action_masks
self.obs_buf["masses"] = self.DR.mass_disturbances.get_masses_and_com()
# Get the camera data
rgb_obs, depth_obs = self.get_rgbd_data()
self.obs_buf["rgb"] = rgb_obs
self.obs_buf["depth"] = depth_obs
if (
self._task_cfg["env"]["sensors"]["save_to_log"]
and self._cfg["wandb_activate"]
):
if (
self.save_image_counter
% self._task_cfg["env"]["sensors"]["save_frequency"]
== 0
):
rgb_grid = ToPILImage(make_grid(rgb_obs, nrow=5))
depth_grid = ToPILImage(make_grid(depth_obs, nrow=5))
wandb.log(
{
"rgb": wandb.Image(rgb_grid, caption="rgb"),
"depth": wandb.Image(depth_grid, caption="depth"),
}
)
self.save_image_counter += 1
observations = {self._platforms.name: {"obs_buf": self.obs_buf}}
return observations
def get_rgbd_data(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
return batched rgbd data.
Returns:
rgb (torch.Tensor): batched rgb data
depth (torch.Tensor): batched depth data
"""
rs_obs = [sensor.get_observation() for sensor in self.active_sensors]
rgb = torch.stack([ob["rgb"] for ob in rs_obs]).to(self._device)
depth = torch.stack([ob["depth"] for ob in rs_obs]).to(self._device)
rgb = self.DR.noisy_rgb_images.add_noise_on_image(rgb, step=self.step)
depth = self.DR.noisy_depth_images.add_noise_on_image(depth, step=self.step)
return rgb, depth
def pre_physics_step(self, actions: torch.Tensor) -> None:
"""
This function implements the logic to be performed before physics steps.
Args:
actions (torch.Tensor): the actions to be applied to the platform."""
# If is not playing skip
if not self._env._world.is_playing():
return
# Check which environment need to be reset
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# Reset the environments (Robots)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Collect actions
actions = actions.clone().to(self._device)
self.actions = actions
# Remap actions to the correct values
if self._discrete_actions == "MultiDiscrete":
# If actions are multidiscrete [0, 1]
thrust_cmds = self.actions.float()
elif self._discrete_actions == "Continuous":
# Transform continuous actions to [0, 1] discrete actions.
thrust_cmds = torch.clamp((self.actions + 1) / 2, min=0.0, max=1.0)
else:
raise NotImplementedError("")
# Applies the thrust multiplier
thrusts = self.virtual_platform.thruster_cfg.thrust_force * thrust_cmds
# Adds random noise on the actions
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts, step=self.step)
# clear actions for reset envs
thrusts[reset_env_ids] = 0
# If split thrust, equally shares the maximum amount of thrust across thrusters.
if self.split_thrust:
factor = torch.max(
torch.sum(self.actions, -1),
torch.ones((self._num_envs), dtype=torch.float32, device=self._device),
)
self.positions, self.forces = self.virtual_platform.project_forces(
thrusts / factor.view(self._num_envs, 1)
)
else:
self.positions, self.forces = self.virtual_platform.project_forces(thrusts)
return
def apply_forces(self) -> None:
"""
Applies all the forces to the platform and its thrusters."""
# Applies actions from the thrusters
self._platforms.thrusters.apply_forces_and_torques_at_pos(
forces=self.forces, positions=self.positions, is_global=False
)
# Applies the domain randomization
floor_forces = self.DR.force_disturbances.get_force_disturbance(self.root_pos)
torque_disturbance = self.DR.torque_disturbances.get_torque_disturbance(
self.root_pos
)
self._platforms.base.apply_forces_and_torques_at_pos(
forces=floor_forces,
torques=torque_disturbance,
positions=self.root_pos,
is_global=True,
)
def post_reset(self):
"""
This function implements the logic to be performed after a reset."""
# implement any logic required for simulation on-start here
self.root_pos, self.root_rot = self._platforms.get_world_poses()
self.root_velocities = self._platforms.get_velocities()
self._platforms.get_CoM_indices()
self._platforms.get_plane_lock_indices()
self._dock_view.get_plane_lock_indices()
self.initial_root_pos, self.initial_root_rot = (
self.root_pos.clone(),
self.root_rot.clone(),
)
self.initial_pin_pos = self._env_pos
self.initial_pin_rot = torch.zeros(
(self._num_envs, 4), dtype=torch.float32, device=self._device
)
self.initial_pin_rot[:, 0] = 1
# control parameters
self.thrusts = torch.zeros(
(self._num_envs, self._max_actions, 3),
dtype=torch.float32,
device=self._device,
)
self.set_targets(self.all_indices)
def set_targets(self, env_ids: torch.Tensor):
"""
Sets the targets for the task.
Args:
env_ids (torch.Tensor): the indices of the environments for which to set the targets.
"""
num_resets = len(env_ids)
env_long = env_ids.long()
# Randomizes the position and orientation of the dock on the x y axis
target_positions, target_orientation = self.task.get_goals(
env_long,
self.step,
)
siny_cosp = 2 * target_orientation[:, 0] * target_orientation[:, 3]
cosy_cosp = 1 - 2 * (target_orientation[:, 3] * target_orientation[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# apply resets
dof_pos = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_pos[:, self._dock_view.lock_indices[0]] = target_positions[:, 0]
dof_pos[:, self._dock_view.lock_indices[1]] = target_positions[:, 1]
dof_pos[:, self._dock_view.lock_indices[2]] = h
self._dock_view.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_vel[:, self._dock_view.lock_indices[0]] = 0.0
dof_vel[:, self._dock_view.lock_indices[1]] = 0.0
dof_vel[:, self._dock_view.lock_indices[2]] = 0.0
self._dock_view.set_joint_velocities(dof_vel, indices=env_ids)
def reset_idx(self, env_ids: torch.Tensor) -> None:
"""
Resets the environments with the given indices.
Args:
env_ids (torch.Tensor): the indices of the environments to be reset."""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.task.reset(env_ids)
self.set_targets(env_ids)
self.virtual_platform.randomize_thruster_state(env_ids, num_resets)
self.DR.force_disturbances.generate_forces(env_ids, num_resets, step=self.step)
self.DR.torque_disturbances.generate_torques(
env_ids, num_resets, step=self.step
)
self.DR.mass_disturbances.randomize_masses(env_ids, step=self.step)
CoM_shift = self.DR.mass_disturbances.get_CoM(env_ids)
random_mass = self.DR.mass_disturbances.get_masses(env_ids)
# Randomizes the starting position of the platform
pos, quat, vel = self.task.get_initial_conditions(env_ids, step=self.step)
siny_cosp = 2 * quat[:, 0] * quat[:, 3]
cosy_cosp = 1 - 2 * (quat[:, 3] * quat[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# Randomizes mass of the dock
if hasattr(self.task._task_parameters, "spawn_dock_mass_curriculum"):
mass = self.task.get_dock_masses(env_ids, step=self.step)
self._dock_view.base.set_masses(mass, indices=env_ids)
# apply joint resets
dof_pos = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
# self._platforms.CoM.set_masses(random_mass, indices=env_ids)
dof_pos[:, self._platforms.lock_indices[0]] = pos[:, 0]
dof_pos[:, self._platforms.lock_indices[1]] = pos[:, 1]
dof_pos[:, self._platforms.lock_indices[2]] = h
dof_pos[:, self._platforms.CoM_shifter_indices[0]] = CoM_shift[:, 0]
dof_pos[:, self._platforms.CoM_shifter_indices[1]] = CoM_shift[:, 1]
self._platforms.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
dof_vel[:, self._platforms.lock_indices[0]] = vel[:, 0]
dof_vel[:, self._platforms.lock_indices[1]] = vel[:, 1]
dof_vel[:, self._platforms.lock_indices[2]] = vel[:, 5]
self._platforms.set_joint_velocities(dof_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill `extras`
self.extras["episode"] = {}
self.extras_wandb = {}
for key in self.episode_sums.keys():
value = (
torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
)
if key in self._penalties.get_stats_name():
self.extras_wandb[key] = value
elif key in self.task.log_with_wandb:
self.extras_wandb[key] = value
else:
self.extras["episode"][key] = value
self.episode_sums[key][env_ids] = 0.0
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training."""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.abs(
self.current_state["angular_velocity"]
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
def calculate_metrics(self) -> None:
"""
Calculates the metrics of the training.
That is the rewards, penalties, and other perfomance statistics."""
reward = self.task.compute_reward(self.current_state, self.actions)
self.iteration += 1
self.step += 1 / self._task_cfg["env"]["horizon_length"]
penalties = self._penalties.compute_penalty(
self.current_state, self.actions, self.step
)
self.rew_buf[:] = reward - penalties
self.episode_sums = self.task.update_statistics(self.episode_sums)
self.episode_sums = self._penalties.update_statistics(self.episode_sums)
if self.iteration / self._task_cfg["env"]["horizon_length"] % 1 == 0:
self.extras_wandb["wandb_step"] = int(self.step)
for key, value in self._penalties.get_logs().items():
self.extras_wandb[key] = value
for key, value in self.task.get_logs(self.step).items():
self.extras_wandb[key] = value
for key, value in self.DR.get_logs(self.step).items():
self.extras_wandb[key] = value
if self._cfg["wandb_activate"]:
wandb.log(self.extras_wandb)
self.extras_wandb = {}
self.update_state_statistics()
def is_done(self) -> None:
"""
Checks if the episode is done."""
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = self.task.update_kills()
# resets due to episode length
self.reset_buf[:] = torch.where(
self.progress_buf >= self._max_episode_length - 1, ones, die
)
| 30,179 | Python | 37.791774 | 102 | 0.549322 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/ball_balance.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.maths import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.balance_bot import BalanceBot
from pxr import PhysxSchema
class BallBalanceTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 12 + 12
self._num_actions = 3
self.anchored = False
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = self._task_cfg["sim"]["dt"]
self._table_position = torch.tensor([0, 0, 0.56])
self._ball_position = torch.tensor([0.0, 0.0, 1.0])
self._ball_radius = 0.1
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
def set_up_scene(self, scene) -> None:
self.get_balance_table()
self.add_ball()
super().set_up_scene(scene, replicate_physics=False)
self.set_up_table_anchors()
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("balance_bot_view"):
scene.remove_object("balance_bot_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
def get_balance_table(self):
balance_table = BalanceBot(
prim_path=self.default_zero_env_path + "/BalanceBot", name="BalanceBot", translation=self._table_position
)
self._sim_config.apply_articulation_settings(
"table", get_prim_at_path(balance_table.prim_path), self._sim_config.parse_actor_config("table")
)
def add_ball(self):
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball/ball",
translation=self._ball_position,
name="ball_0",
radius=self._ball_radius,
color=torch.tensor([0.9, 0.6, 0.2]),
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
def set_up_table_anchors(self):
from pxr import Gf
height = 0.08
stage = get_current_stage()
for i in range(self._num_envs):
base_path = f"{self.default_base_env_path}/env_{i}/BalanceBot"
for j, leg_offset in enumerate([(0.4, 0, height), (-0.2, 0.34641, 0), (-0.2, -0.34641, 0)]):
# fix the legs to ground
leg_path = f"{base_path}/lower_leg{j}"
ground_joint_path = leg_path + "_ground"
env_pos = stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}").GetAttribute("xformOp:translate").Get()
anchor_pos = env_pos + Gf.Vec3d(*leg_offset)
self.fix_to_ground(stage, ground_joint_path, leg_path, anchor_pos)
def fix_to_ground(self, stage, joint_path, prim_path, anchor_pos):
from pxr import UsdPhysics, Gf
# D6 fixed joint
d6FixedJoint = UsdPhysics.Joint.Define(stage, joint_path)
d6FixedJoint.CreateBody0Rel().SetTargets(["/World/defaultGroundPlane"])
d6FixedJoint.CreateBody1Rel().SetTargets([prim_path])
d6FixedJoint.CreateLocalPos0Attr().Set(anchor_pos)
d6FixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
d6FixedJoint.CreateLocalPos1Attr().Set(Gf.Vec3f(0, 0, 0.18))
d6FixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
# lock all DOF (lock - low is greater than high)
d6Prim = stage.GetPrimAtPath(joint_path)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
def get_observations(self) -> dict:
ball_positions, ball_orientations = self._balls.get_world_poses(clone=False)
ball_positions = ball_positions[:, 0:3] - self._env_pos
ball_velocities = self._balls.get_velocities(clone=False)
ball_linvels = ball_velocities[:, 0:3]
ball_angvels = ball_velocities[:, 3:6]
dof_pos = self._balance_bots.get_joint_positions(clone=False)
dof_vel = self._balance_bots.get_joint_velocities(clone=False)
sensor_force_torques = self._balance_bots.get_measured_joint_forces(joint_indices=self._sensor_indices) # (num_envs, num_sensors, 6)
self.obs_buf[..., 0:3] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 3:6] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 6:9] = ball_positions
self.obs_buf[..., 9:12] = ball_linvels
self.obs_buf[..., 12:15] = sensor_force_torques[..., 0] / 20.0
self.obs_buf[..., 15:18] = sensor_force_torques[..., 3] / 20.0
self.obs_buf[..., 18:21] = sensor_force_torques[..., 4] / 20.0
self.obs_buf[..., 21:24] = sensor_force_torques[..., 5] / 20.0
self.ball_positions = ball_positions
self.ball_linvels = ball_linvels
observations = {"ball_balance": {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += (
self._dt * self._action_speed_scale * actions.to(self.device)
)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits
)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self._balance_bots.set_joint_position_targets(self.dof_position_targets) # .clone())
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.001 # min horizontal dist from origin
max_d = 0.4 # max horizontal dist from origin
min_height = 1.0
max_height = 2.0
min_horizontal_speed = 0
max_horizontal_speed = 2
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze()
ball_pos = self.initial_ball_pos.clone()
ball_rot = self.initial_ball_rot.clone()
# position
ball_pos[env_ids_64, 0:2] += hpos[..., 0:2]
ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
ball_rot[env_ids_64, 0] = 1
ball_rot[env_ids_64, 1:] = 0
ball_velocities = self.initial_ball_velocities.clone()
# linear
ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2]
ball_velocities[env_ids_64, 2] = vspeeds
# angular
ball_velocities[env_ids_64, 3:6] = 0
# reset root state for bbots and balls in selected envs
self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32)
self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32)
# reset root pose and velocity
self._balance_bots.set_world_poses(
self.initial_bot_pos[env_ids_64].clone(), self.initial_bot_rot[env_ids_64].clone(), indices=env_ids_32
)
self._balance_bots.set_velocities(self.initial_bot_velocities[env_ids_64].clone(), indices=env_ids_32)
# reset DOF states for bbots in selected envs
self._balance_bots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
dof_limits = self._balance_bots.get_dof_limits()
self.bbot_dof_lower_limits, self.bbot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
self.initial_dof_positions = self._balance_bots.get_joint_positions()
self.initial_bot_pos, self.initial_bot_rot = self._balance_bots.get_world_poses()
# self.initial_bot_pos[..., 2] = 0.559 # tray_height
self.initial_bot_velocities = self._balance_bots.get_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_ball_velocities = self._balls.get_velocities()
self.dof_position_targets = torch.zeros(
(self.num_envs, self._balance_bots.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
actuated_joints = ["lower_leg0", "lower_leg1", "lower_leg2"]
self.actuated_dof_indices = torch.tensor(
[self._balance_bots._dof_indices[j] for j in actuated_joints], device=self._device, dtype=torch.long
)
force_links = ["upper_leg0", "upper_leg1", "upper_leg2"]
self._sensor_indices = torch.tensor(
[self._balance_bots._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
def calculate_metrics(self) -> None:
ball_dist = torch.sqrt(
self.ball_positions[..., 0] * self.ball_positions[..., 0]
+ (self.ball_positions[..., 2] - 0.7) * (self.ball_positions[..., 2] - 0.7)
+ (self.ball_positions[..., 1]) * self.ball_positions[..., 1]
)
ball_speed = torch.sqrt(
self.ball_linvels[..., 0] * self.ball_linvels[..., 0]
+ self.ball_linvels[..., 1] * self.ball_linvels[..., 1]
+ self.ball_linvels[..., 2] * self.ball_linvels[..., 2]
)
pos_reward = 1.0 / (1.0 + ball_dist)
speed_reward = 1.0 / (1.0 + ball_speed)
self.rew_buf[:] = pos_reward * speed_reward
def is_done(self) -> None:
reset = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
reset = torch.where(
self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset
)
self.reset_buf[:] = reset
| 13,952 | Python | 44.15534 | 140 | 0.630447 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/cartpole_camera.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gym import spaces
import numpy as np
import torch
import omni.usd
from pxr import UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleCameraTask(CartpoleTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = self.camera_width * self.camera_height * 3
self._num_actions = 1
# use multi-dimensional observation for camera RGB
self.observation_space = spaces.Box(
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * -np.Inf,
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * np.Inf)
RLTask.__init__(self, name, env)
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self.camera_type = self._task_cfg["env"].get("cameraType", 'rgb')
self.camera_width = self._task_cfg["env"]["cameraWidth"]
self.camera_height = self._task_cfg["env"]["cameraHeight"]
self.camera_channels = 3
self._export_images = self._task_cfg["env"]["exportImages"]
def cleanup(self) -> None:
# initialize remaining buffers
RLTask.cleanup(self)
# override observation buffer for camera data
self.obs_buf = torch.zeros(
(self.num_envs, self.camera_width, self.camera_height, 3), device=self.device, dtype=torch.float)
def add_camera(self) -> None:
stage = get_current_stage()
camera_path = f"/World/envs/env_0/Camera"
camera_xform = stage.DefinePrim(f'{camera_path}_Xform', 'Xform')
# set up transforms for parent and camera prims
position = (-4.2, 0.0, 3.0)
rotation = (0, -6.1155, -180)
UsdGeom.Xformable(camera_xform).AddTranslateOp()
UsdGeom.Xformable(camera_xform).AddRotateXYZOp()
camera_xform.GetAttribute('xformOp:translate').Set(position)
camera_xform.GetAttribute('xformOp:rotateXYZ').Set(rotation)
camera = stage.DefinePrim(f'{camera_path}_Xform/Camera', 'Camera')
UsdGeom.Xformable(camera).AddRotateXYZOp()
camera.GetAttribute("xformOp:rotateXYZ").Set((90, 0, 90))
# set camera properties
camera.GetAttribute('focalLength').Set(24)
camera.GetAttribute('focusDistance').Set(400)
# hide other environments in the background
camera.GetAttribute("clippingRange").Set((0.01, 20.0))
def set_up_scene(self, scene) -> None:
self.get_cartpole()
self.add_camera()
RLTask.set_up_scene(self, scene)
# start replicator to capture image data
self.rep.orchestrator._orchestrator._is_started = True
# set up cameras
self.render_products = []
env_pos = self._env_pos.cpu()
camera_paths = [f"/World/envs/env_{i}/Camera_Xform/Camera" for i in range(self._num_envs)]
for i in range(self._num_envs):
render_product = self.rep.create.render_product(camera_paths[i], resolution=(self.camera_width, self.camera_height))
self.render_products.append(render_product)
# initialize pytorch writer for vectorized collection
self.pytorch_listener = self.PytorchListener()
self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter")
self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda")
self.pytorch_writer.attach(self.render_products)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
# retrieve RGB data from all render products
images = self.pytorch_listener.get_rgb_data()
if images is not None:
if self._export_images:
from torchvision.utils import save_image, make_grid
img = images/255
save_image(make_grid(img, nrows = 2), 'cartpole_export.png')
self.obs_buf = torch.swapaxes(images, 1, 3).clone().float()/255.0
else:
print("Image tensor is NONE!")
return self.obs_buf
| 6,899 | Python | 42.396226 | 128 | 0.674301 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.anymal_terrain_generator import *
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
from pxr import UsdLux, UsdPhysics
class AnymalTerrainTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.height_samples = None
self.custom_origins = False
self.init_done = False
self._env_spacing = 0.0
self._num_observations = 188
self._num_actions = 12
self.update_config(sim_config)
RLTask.__init__(self, name, env)
self.height_points = self.init_height_points()
self.measured_heights = None
# joint positions offsets
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
# reward episode sums
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"lin_vel_xy": torch_zeros(),
"lin_vel_z": torch_zeros(),
"ang_vel_z": torch_zeros(),
"ang_vel_xy": torch_zeros(),
"orient": torch_zeros(),
"torques": torch_zeros(),
"joint_acc": torch_zeros(),
"base_height": torch_zeros(),
"air_time": torch_zeros(),
"collision": torch_zeros(),
"stumble": torch_zeros(),
"action_rate": torch_zeros(),
"hip": torch_zeros(),
}
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.height_meas_scale = self._task_cfg["env"]["learn"]["heightMeasurementScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["termination"] = self._task_cfg["env"]["learn"]["terminalReward"]
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["ang_vel_xy"] = self._task_cfg["env"]["learn"]["angularVelocityXYRewardScale"]
self.rew_scales["orient"] = self._task_cfg["env"]["learn"]["orientationRewardScale"]
self.rew_scales["torque"] = self._task_cfg["env"]["learn"]["torqueRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["base_height"] = self._task_cfg["env"]["learn"]["baseHeightRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["hip"] = self._task_cfg["env"]["learn"]["hipRewardScale"]
self.rew_scales["fallen_over"] = self._task_cfg["env"]["learn"]["fallenOverRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
self.base_init_state = pos + rot + v_lin + v_ang
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.decimation = self._task_cfg["env"]["control"]["decimation"]
self.dt = self.decimation * self._task_cfg["sim"]["dt"]
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.push_interval = int(self._task_cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
self.curriculum = self._task_cfg["env"]["terrain"]["curriculum"]
self.base_threshold = 0.2
self.knee_threshold = 0.1
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._task_cfg["sim"]["default_physics_material"]["static_friction"] = self._task_cfg["env"]["terrain"][
"staticFriction"
]
self._task_cfg["sim"]["default_physics_material"]["dynamic_friction"] = self._task_cfg["env"]["terrain"][
"dynamicFriction"
]
self._task_cfg["sim"]["default_physics_material"]["restitution"] = self._task_cfg["env"]["terrain"][
"restitution"
]
self._task_cfg["sim"]["add_ground_plane"] = False
def _get_noise_scale_vec(self, cfg):
noise_vec = torch.zeros_like(self.obs_buf[0])
self.add_noise = self._task_cfg["env"]["learn"]["addNoise"]
noise_level = self._task_cfg["env"]["learn"]["noiseLevel"]
noise_vec[:3] = self._task_cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale
noise_vec[3:6] = self._task_cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale
noise_vec[6:9] = self._task_cfg["env"]["learn"]["gravityNoise"] * noise_level
noise_vec[9:12] = 0.0 # commands
noise_vec[12:24] = self._task_cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale
noise_vec[24:36] = self._task_cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale
noise_vec[36:176] = (
self._task_cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale
)
noise_vec[176:188] = 0.0 # previous actions
return noise_vec
def init_height_points(self):
# 1mx1.6m rectangle (without center line)
y = 0.1 * torch.tensor(
[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False
) # 10-50cm on each side
x = 0.1 * torch.tensor(
[-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False
) # 20-80cm on each side
grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
self.num_height_points = grid_x.numel()
points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False)
points[:, :, 0] = grid_x.flatten()
points[:, :, 1] = grid_y.flatten()
return points
def _create_trimesh(self, create_mesh=True):
self.terrain = Terrain(self._task_cfg["env"]["terrain"], num_robots=self.num_envs)
vertices = self.terrain.vertices
triangles = self.terrain.triangles
position = torch.tensor([-self.terrain.border_size, -self.terrain.border_size, 0.0])
if create_mesh:
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position)
self.height_samples = (
torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device)
)
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
self.get_terrain()
self.get_anymal()
super().set_up_scene(scene, collision_filter_global_paths=["/World/terrain"])
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def initialize_views(self, scene):
# initialize terrain variables even if we do not need to re-create the terrain mesh
self.get_terrain(create_mesh=False)
super().initialize_views(scene)
if scene.object_exists("anymal_view"):
scene.remove_object("anymal_view", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_terrain(self, create_mesh=True):
self.env_origins = torch.zeros((self.num_envs, 3), device=self.device, requires_grad=False)
if not self.curriculum:
self._task_cfg["env"]["terrain"]["maxInitMapLevel"] = self._task_cfg["env"]["terrain"]["numLevels"] - 1
self.terrain_levels = torch.randint(
0, self._task_cfg["env"]["terrain"]["maxInitMapLevel"] + 1, (self.num_envs,), device=self.device
)
self.terrain_types = torch.randint(
0, self._task_cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device
)
self._create_trimesh(create_mesh=create_mesh)
self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float)
def get_anymal(self):
anymal_translation = torch.tensor([0.0, 0.0, 0.66])
anymal_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0])
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal",
name="anymal",
translation=anymal_translation,
orientation=anymal_orientation,
)
self._sim_config.apply_articulation_settings(
"anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("anymal")
)
anymal.set_anymal_properties(self._stage, anymal.prim)
anymal.prepare_contacts(self._stage, anymal.prim)
self.dof_names = anymal.dof_names
for i in range(self.num_actions):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
def post_reset(self):
self.base_init_state = torch.tensor(
self.base_init_state, dtype=torch.float, device=self.device, requires_grad=False
)
self.timeout_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# initialize some data used later on
self.up_axis_idx = 2
self.common_step_counter = 0
self.extras = {}
self.noise_scale_vec = self._get_noise_scale_vec(self._task_cfg)
self.commands = torch.zeros(
self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False
) # x vel, y vel, yaw vel, heading
self.commands_scale = torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
device=self.device,
requires_grad=False,
)
self.gravity_vec = torch.tensor(
get_axis_params(-1.0, self.up_axis_idx), dtype=torch.float, device=self.device
).repeat((self.num_envs, 1))
self.forward_vec = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float, device=self.device).repeat(
(self.num_envs, 1)
)
self.torques = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.last_actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False)
self.last_dof_vel = torch.zeros((self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.num_envs):
self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]]
self.num_dof = self._anymals.num_dof
self.dof_pos = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.base_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device)
self.base_quat = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device)
self.base_velocities = torch.zeros((self.num_envs, 6), dtype=torch.float, device=self.device)
self.knee_pos = torch.zeros((self.num_envs * 4, 3), dtype=torch.float, device=self.device)
self.knee_quat = torch.zeros((self.num_envs * 4, 4), dtype=torch.float, device=self.device)
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
self.init_done = True
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
self.update_terrain_level(env_ids)
self.base_pos[env_ids] = self.base_init_state[0:3]
self.base_pos[env_ids, 0:3] += self.env_origins[env_ids]
self.base_pos[env_ids, 0:2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device)
self.base_quat[env_ids] = self.base_init_state[3:7]
self.base_velocities[env_ids] = self.base_init_state[7:]
self._anymals.set_world_poses(
positions=self.base_pos[env_ids].clone(), orientations=self.base_quat[env_ids].clone(), indices=indices
)
self._anymals.set_velocities(velocities=self.base_velocities[env_ids].clone(), indices=indices)
self._anymals.set_joint_positions(positions=self.dof_pos[env_ids].clone(), indices=indices)
self._anymals.set_joint_velocities(velocities=self.dof_vel[env_ids].clone(), indices=indices)
self.commands[env_ids, 0] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 1] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 3] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(
1
) # set small commands to zero
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
self.feet_air_time[env_ids] = 0.0
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]["rew_" + key] = (
torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s
)
self.episode_sums[key][env_ids] = 0.0
self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float())
def update_terrain_level(self, env_ids):
if not self.init_done or not self.curriculum:
# do not change on initial reset
return
root_pos, _ = self._anymals.get_world_poses(clone=False)
distance = torch.norm(root_pos[env_ids, :2] - self.env_origins[env_ids, :2], dim=1)
self.terrain_levels[env_ids] -= 1 * (
distance < torch.norm(self.commands[env_ids, :2]) * self.max_episode_length_s * 0.25
)
self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2)
self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows
self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]]
def refresh_dof_state_tensors(self):
self.dof_pos = self._anymals.get_joint_positions(clone=False)
self.dof_vel = self._anymals.get_joint_velocities(clone=False)
def refresh_body_state_tensors(self):
self.base_pos, self.base_quat = self._anymals.get_world_poses(clone=False)
self.base_velocities = self._anymals.get_velocities(clone=False)
self.knee_pos, self.knee_quat = self._anymals._knees.get_world_poses(clone=False)
def pre_physics_step(self, actions):
if not self.world.is_playing():
return
self.actions = actions.clone().to(self.device)
for i in range(self.decimation):
if self.world.is_playing():
torques = torch.clip(
self.Kp * (self.action_scale * self.actions + self.default_dof_pos - self.dof_pos)
- self.Kd * self.dof_vel,
-80.0,
80.0,
)
self._anymals.set_joint_efforts(torques)
self.torques = torques
SimulationContext.step(self.world, render=False)
self.refresh_dof_state_tensors()
def post_physics_step(self):
self.progress_buf[:] += 1
if self.world.is_playing():
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5 * wrap_to_pi(self.commands[:, 3] - heading), -1.0, 1.0)
self.check_termination()
self.get_states()
self.calculate_metrics()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def push_robots(self):
self.base_velocities[:, 0:2] = torch_rand_float(
-1.0, 1.0, (self.num_envs, 2), device=self.device
) # lin vel x/y
self._anymals.set_velocities(self.base_velocities)
def check_termination(self):
self.timeout_buf = torch.where(
self.progress_buf >= self.max_episode_length - 1,
torch.ones_like(self.timeout_buf),
torch.zeros_like(self.timeout_buf),
)
knee_contact = (
torch.norm(self._anymals._knees.get_net_contact_forces(clone=False).view(self._num_envs, 4, 3), dim=-1)
> 1.0
)
self.has_fallen = (torch.norm(self._anymals._base.get_net_contact_forces(clone=False), dim=1) > 1.0) | (
torch.sum(knee_contact, dim=-1) > 1.0
)
self.reset_buf = self.has_fallen.clone()
self.reset_buf = torch.where(self.timeout_buf.bool(), torch.ones_like(self.reset_buf), self.reset_buf)
def calculate_metrics(self):
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
# other base velocity penalties
rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"]
# orientation penalty
rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"]
# base height penalty
rew_base_height = torch.square(self.base_pos[:, 2] - 0.52) * self.rew_scales["base_height"]
# torque penalty
rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"]
# joint acc penalty
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"]
# fallen over penalty
rew_fallen_over = self.has_fallen * self.rew_scales["fallen_over"]
# action rate penalty
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
# cosmetic penalty for hip motion
rew_hip = (
torch.sum(torch.abs(self.dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["hip"]
)
# total reward
self.rew_buf = (
rew_lin_vel_xy
+ rew_ang_vel_z
+ rew_lin_vel_z
+ rew_ang_vel_xy
+ rew_orient
+ rew_base_height
+ rew_torque
+ rew_joint_acc
+ rew_action_rate
+ rew_hip
+ rew_fallen_over
)
self.rew_buf = torch.clip(self.rew_buf, min=0.0, max=None)
# add termination reward
self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf
# log episode reward sums
self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy
self.episode_sums["ang_vel_z"] += rew_ang_vel_z
self.episode_sums["lin_vel_z"] += rew_lin_vel_z
self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy
self.episode_sums["orient"] += rew_orient
self.episode_sums["torques"] += rew_torque
self.episode_sums["joint_acc"] += rew_joint_acc
self.episode_sums["action_rate"] += rew_action_rate
self.episode_sums["base_height"] += rew_base_height
self.episode_sums["hip"] += rew_hip
def get_observations(self):
self.measured_heights = self.get_heights()
heights = (
torch.clip(self.base_pos[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.0) * self.height_meas_scale
)
self.obs_buf = torch.cat(
(
self.base_lin_vel * self.lin_vel_scale,
self.base_ang_vel * self.ang_vel_scale,
self.projected_gravity,
self.commands[:, :3] * self.commands_scale,
self.dof_pos * self.dof_pos_scale,
self.dof_vel * self.dof_vel_scale,
heights,
self.actions,
),
dim=-1,
)
def get_ground_heights_below_knees(self):
points = self.knee_pos.reshape(self.num_envs, 4, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_ground_heights_below_base(self):
points = self.base_pos.reshape(self.num_envs, 1, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_heights(self, env_ids=None):
if env_ids:
points = quat_apply_yaw(
self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]
) + (self.base_pos[env_ids, 0:3]).unsqueeze(1)
else:
points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (
self.base_pos[:, 0:3]
).unsqueeze(1)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
@torch.jit.script
def quat_apply_yaw(quat, vec):
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, 1:3] = 0.0
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def wrap_to_pi(angles):
angles %= 2 * np.pi
angles -= 2 * np.pi * (angles > np.pi)
return angles
def get_axis_params(value, axis_idx, x_value=0.0, dtype=float, n_dims=3):
"""construct arguments to `Vec` according to axis index."""
zs = np.zeros((n_dims,))
assert axis_idx < n_dims, "the axis dim should be within the vector dimensions"
zs[axis_idx] = 1.0
params = np.where(zs == 1.0, value, zs)
params[0] = x_value
return list(params.astype(dtype))
| 29,313 | Python | 45.530159 | 120 | 0.609218 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/shadow_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.shadow_hand import ShadowHand
from omniisaacgymenvs.robots.articulations.views.shadow_hand_view import ShadowHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class ShadowHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]"
)
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"openai": 42,
"full_no_vel": 77,
"full": 157,
"full_state": 187,
}
self.asymmetric_obs = self._task_cfg["env"]["asymmetric_observations"]
self.use_vel_obs = False
self.fingertip_obs = True
self.fingertips = [
"robot0:ffdistal",
"robot0:mfdistal",
"robot0:rfdistal",
"robot0:lfdistal",
"robot0:thdistal",
]
self.num_fingertips = len(self.fingertips)
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self.force_torque_obs_scale = 10.0
num_states = 0
if self.asymmetric_obs:
num_states = 187
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 20
self._num_states = num_states
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.pose_dy, self.pose_dz = -0.39, 0.10
def get_hand(self):
shadow_hand = ShadowHand(
prim_path=self.default_zero_env_path + "/shadow_hand",
name="shadow_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"shadow_hand",
get_prim_at_path(shadow_hand.prim_path),
self._sim_config.parse_actor_config("shadow_hand"),
)
shadow_hand.set_shadow_hand_properties(stage=self._stage, shadow_hand_prim=shadow_hand.prim)
shadow_hand.set_motor_control_mode(stage=self._stage, shadow_hand_path=shadow_hand.prim_path)
def get_hand_view(self, scene):
hand_view = ShadowHandView(prim_paths_expr="/World/envs/.*/shadow_hand", name="shadow_hand_view")
scene.add(hand_view._fingers)
return hand_view
def get_observations(self):
self.get_object_goal_observations()
self.fingertip_pos, self.fingertip_rot = self._hands._fingers.get_world_poses(clone=False)
self.fingertip_pos -= self._env_pos.repeat((1, self.num_fingertips)).reshape(
self.num_envs * self.num_fingertips, 3
)
self.fingertip_velocities = self._hands._fingers.get_velocities(clone=False)
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.vec_sensor_tensor = self._hands.get_measured_joint_forces(
joint_indices=self._hands._sensor_indices
).view(self._num_envs, -1)
if self.obs_type == "openai":
self.compute_fingertip_observations(True)
elif self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state(False)
else:
print("Unkown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_fingertip_observations(self, no_vel=False):
if no_vel:
# Per https://arxiv.org/pdf/1808.00177.pdf Table 2
# Fingertip positions
# Object Position, but not orientation
# Relative target orientation
# 3*self.num_fingertips = 15
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15)
self.obs_buf[:, 15:18] = self.object_pos
self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 22:42] = self.actions
else:
# 13*self.num_fingertips = 65
self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65)
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 15:35] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 35:65] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 65:68] = self.object_pos
self.obs_buf[:, 68:72] = self.object_rot
self.obs_buf[:, 72:75] = self.object_linvel
self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 78:81] = self.goal_pos
self.obs_buf[:, 81:85] = self.goal_rot
self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 89:109] = self.actions
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 24:37] = self.object_pos
self.obs_buf[:, 27:31] = self.object_rot
self.obs_buf[:, 31:34] = self.goal_pos
self.obs_buf[:, 34:38] = self.goal_rot
self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 57:77] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 48:51] = self.object_pos
self.obs_buf[:, 51:55] = self.object_rot
self.obs_buf[:, 55:58] = self.object_linvel
self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 61:64] = self.goal_pos
self.obs_buf[:, 64:68] = self.goal_rot
self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# (7+6)*self.num_fingertips = 65
self.obs_buf[:, 72:87] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 87:107] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 107:137] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 137:157] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.states_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
# self.states_buf[:, 2*self.num_hand_dofs:3*self.num_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 2 * self.num_hand_dofs # 48
self.states_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.states_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.states_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.states_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.states_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.states_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.states_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 72 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.states_buf[:, obs_end : obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 2 * self.num_hand_dofs : 3 * self.num_hand_dofs] = (
self.force_torque_obs_scale * self.dof_force_tensor
)
obj_obs_start = 3 * self.num_hand_dofs # 48
self.obs_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.obs_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.obs_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.obs_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.obs_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.obs_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.obs_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 96 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end : obs_end + self.num_actions] = self.actions
| 15,107 | Python | 48.211726 | 129 | 0.609188 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/franka_cabinet.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import numpy as np
import torch
from omni.isaac.cloner import Cloner
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cabinet import Cabinet
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.cabinet_view import CabinetView
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from pxr import Usd, UsdGeom
class FrankaCabinetTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self.distX_offset = 0.04
self.dt = 1 / 60.0
self._num_observations = 23
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.num_props = self._task_cfg["env"]["numProps"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
def set_up_scene(self, scene) -> None:
self.get_franka()
self.get_cabinet()
if self.num_props > 0:
self.get_props()
super().set_up_scene(scene, filter_collisions=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("cabinet_view"):
scene.remove_object("cabinet_view", registry_only=True)
if scene.object_exists("drawers_view"):
scene.remove_object("drawers_view", registry_only=True)
if scene.object_exists("prop_view"):
scene.remove_object("prop_view", registry_only=True)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
def get_franka(self):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka")
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
def get_cabinet(self):
cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet")
self._sim_config.apply_articulation_settings(
"cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet")
)
def get_props(self):
prop_cloner = Cloner()
drawer_pos = torch.tensor([0.0515, 0.0, 0.7172])
prop_color = torch.tensor([0.2, 0.4, 0.6])
props_per_row = int(math.ceil(math.sqrt(self.num_props)))
prop_size = 0.08
prop_spacing = 0.09
xmin = -0.5 * prop_spacing * (props_per_row - 1)
zmin = -0.5 * prop_spacing * (props_per_row - 1)
prop_count = 0
prop_pos = []
for j in range(props_per_row):
prop_up = zmin + j * prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * prop_spacing
prop_pos.append([propx, prop_up, 0.0])
prop_count += 1
prop = DynamicCuboid(
prim_path=self.default_zero_env_path + "/prop/prop_0",
name="prop",
color=prop_color,
size=prop_size,
density=100.0,
)
self._sim_config.apply_articulation_settings(
"prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop")
)
prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)]
prop_cloner.clone(
source_prim_path=self.default_zero_env_path + "/prop/prop_0",
prim_paths=prop_paths,
positions=np.array(prop_pos) + drawer_pos.numpy(),
replicate_physics=False,
)
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")),
self._device,
)
lfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")),
self._device,
)
rfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")),
self._device,
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3])
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(
hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]
)
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device)
self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1))
self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.franka_default_dof_pos = torch.tensor(
[1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False)
self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
(
self.franka_grasp_rot,
self.franka_grasp_pos,
self.drawer_grasp_rot,
self.drawer_grasp_pos,
) = self.compute_grasp_transforms(
hand_rot,
hand_pos,
self.franka_local_grasp_rot,
self.franka_local_grasp_pos,
drawer_rot,
drawer_pos,
self.drawer_local_grasp_rot,
self.drawer_local_grasp_pos,
)
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
to_target = self.drawer_grasp_pos - self.franka_grasp_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1),
self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {self._frankas.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
# reset cabinet
self._cabinets.set_joint_positions(
torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices
)
self._cabinets.set_joint_velocities(
torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices
)
# reset props
if self.num_props > 0:
self._props.set_world_poses(
self.default_prop_pos[self.prop_indices[env_ids].flatten()],
self.default_prop_rot[self.prop_indices[env_ids].flatten()],
self.prop_indices[env_ids].flatten().to(torch.int32),
)
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
if self.num_props > 0:
self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses()
self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view(
self._num_envs, self.num_props
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = self.compute_franka_reward(
self.reset_buf,
self.progress_buf,
self.actions,
self.cabinet_dof_pos,
self.franka_grasp_pos,
self.drawer_grasp_pos,
self.franka_grasp_rot,
self.drawer_grasp_rot,
self.franka_lfinger_pos,
self.franka_rfinger_pos,
self.gripper_forward_axis,
self.drawer_inward_axis,
self.gripper_up_axis,
self.drawer_up_axis,
self._num_envs,
self.dist_reward_scale,
self.rot_reward_scale,
self.around_handle_reward_scale,
self.open_reward_scale,
self.finger_dist_reward_scale,
self.action_penalty_scale,
self.distX_offset,
self._max_episode_length,
self.franka_dof_pos,
self.finger_close_reward_scale,
)
def is_done(self) -> None:
# reset if drawer is open or max length reached
self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
def compute_grasp_transforms(
self,
hand_rot,
hand_pos,
franka_local_grasp_rot,
franka_local_grasp_pos,
drawer_rot,
drawer_pos,
drawer_local_grasp_rot,
drawer_local_grasp_pos,
):
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos
)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
def compute_franka_reward(
self,
reset_buf,
progress_buf,
actions,
cabinet_dof_pos,
franka_grasp_pos,
drawer_grasp_pos,
franka_grasp_rot,
drawer_grasp_rot,
franka_lfinger_pos,
franka_rfinger_pos,
gripper_forward_axis,
drawer_inward_axis,
gripper_up_axis,
drawer_up_axis,
num_envs,
dist_reward_scale,
rot_reward_scale,
around_handle_reward_scale,
open_reward_scale,
finger_dist_reward_scale,
action_penalty_scale,
distX_offset,
max_episode_length,
joint_positions,
finger_close_reward_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d**2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = (
torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of forward axis for gripper
dot2 = (
torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward
),
around_handle_reward,
)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist),
finger_dist_reward,
),
finger_dist_reward,
)
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(
d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward
)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions**2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = (
dist_reward_scale * dist_reward
+ rot_reward_scale * rot_reward
+ around_handle_reward_scale * around_handle_reward
+ open_reward_scale * open_reward
+ finger_dist_reward_scale * finger_dist_reward
- action_penalty_scale * action_penalty
+ finger_close_reward * finger_close_reward_scale
)
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
# # prevent bad style in opening drawer
# rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
# rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
return rewards
| 22,933 | Python | 41.313653 | 222 | 0.599922 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/crazyflie.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.crazyflie import Crazyflie
from omniisaacgymenvs.robots.articulations.views.crazyflie_view import CrazyflieView
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class CrazyflieTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 18
self._num_actions = 4
self._crazyflie_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
# parameters for the crazyflie
self.arm_length = 0.05
# parameters for the controller
self.motor_damp_time_up = 0.15
self.motor_damp_time_down = 0.15
# I use the multiplier 4, since 4*T ~ time for a step response to finish, where
# T is a time constant of the first-order filter
self.motor_tau_up = 4 * self.dt / (self.motor_damp_time_up + EPS)
self.motor_tau_down = 4 * self.dt / (self.motor_damp_time_down + EPS)
# thrust max
self.mass = 0.028
self.thrust_to_weight = 1.9
self.motor_assymetry = np.array([1.0, 1.0, 1.0, 1.0])
# re-normalizing to sum-up to 4
self.motor_assymetry = self.motor_assymetry * 4.0 / np.sum(self.motor_assymetry)
self.grav_z = -1.0 * self._task_cfg["sim"]["gravity"][2]
def set_up_scene(self, scene) -> None:
self.get_crazyflie()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("crazyflie_view"):
scene.remove_object("crazyflie_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
for i in range(1, 5):
scene.remove_object(f"m{i}_prop_view", registry_only=True)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
def get_crazyflie(self):
copter = Crazyflie(
prim_path=self.default_zero_env_path + "/Crazyflie", name="crazyflie", translation=self._crazyflie_position
)
self._sim_config.apply_articulation_settings(
"crazyflie", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("crazyflie")
)
def get_target(self):
radius = 0.2
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = self.target_positions - root_positions
self.obs_buf[..., 3:6] = rot_x
self.obs_buf[..., 6:9] = rot_y
self.obs_buf[..., 9:12] = rot_z
self.obs_buf[..., 12:15] = root_linvels
self.obs_buf[..., 15:18] = root_angvels
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
self.actions = actions
# clamp to [-1.0, 1.0]
thrust_cmds = torch.clamp(actions, min=-1.0, max=1.0)
# scale to [0.0, 1.0]
thrust_cmds = (thrust_cmds + 1.0) / 2.0
# filtering the thruster and adding noise
motor_tau = self.motor_tau_up * torch.ones((self._num_envs, 4), dtype=torch.float32, device=self._device)
motor_tau[thrust_cmds < self.thrust_cmds_damp] = self.motor_tau_down
motor_tau[motor_tau > 1.0] = 1.0
# Since NN commands thrusts we need to convert to rot vel and back
thrust_rot = thrust_cmds**0.5
self.thrust_rot_damp = motor_tau * (thrust_rot - self.thrust_rot_damp) + self.thrust_rot_damp
self.thrust_cmds_damp = self.thrust_rot_damp**2
## Adding noise
thrust_noise = 0.01 * torch.randn(4, dtype=torch.float32, device=self._device)
thrust_noise = thrust_cmds * thrust_noise
self.thrust_cmds_damp = torch.clamp(self.thrust_cmds_damp + thrust_noise, min=0.0, max=1.0)
thrusts = self.thrust_max * self.thrust_cmds_damp
# thrusts given rotation
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
rot_matrix = torch.cat((rot_x, rot_y, rot_z), 1).reshape(-1, 3, 3)
force_x = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_y = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_xy = torch.cat((force_x, force_y), 1).reshape(-1, 4, 2)
thrusts = thrusts.reshape(-1, 4, 1)
thrusts = torch.cat((force_xy, thrusts), 2)
thrusts_0 = thrusts[:, 0]
thrusts_0 = thrusts_0[:, :, None]
thrusts_1 = thrusts[:, 1]
thrusts_1 = thrusts_1[:, :, None]
thrusts_2 = thrusts[:, 2]
thrusts_2 = thrusts_2[:, :, None]
thrusts_3 = thrusts[:, 3]
thrusts_3 = thrusts_3[:, :, None]
mod_thrusts_0 = torch.matmul(rot_matrix, thrusts_0)
mod_thrusts_1 = torch.matmul(rot_matrix, thrusts_1)
mod_thrusts_2 = torch.matmul(rot_matrix, thrusts_2)
mod_thrusts_3 = torch.matmul(rot_matrix, thrusts_3)
self.thrusts[:, 0] = torch.squeeze(mod_thrusts_0)
self.thrusts[:, 1] = torch.squeeze(mod_thrusts_1)
self.thrusts[:, 2] = torch.squeeze(mod_thrusts_2)
self.thrusts[:, 3] = torch.squeeze(mod_thrusts_3)
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
prop_rot = self.thrust_cmds_damp * self.prop_max_rot
self.dof_vel[:, 0] = prop_rot[:, 0]
self.dof_vel[:, 1] = -1.0 * prop_rot[:, 1]
self.dof_vel[:, 2] = prop_rot[:, 2]
self.dof_vel[:, 3] = -1.0 * prop_rot[:, 3]
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(4):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
thrust_max = self.grav_z * self.mass * self.thrust_to_weight * self.motor_assymetry / 4.0
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_max = torch.tensor(thrust_max, device=self._device, dtype=torch.float32)
self.motor_linearity = 1.0
self.prop_max_rot = 433.3
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.actions = torch.zeros((self._num_envs, 4), device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
# Extra info
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"rew_pos": torch_zeros(),
"rew_orient": torch_zeros(),
"rew_effort": torch_zeros(),
"rew_spin": torch_zeros(),
"raw_dist": torch_zeros(),
"raw_orient": torch_zeros(),
"raw_effort": torch_zeros(),
"raw_spin": torch_zeros(),
}
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control parameters
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.set_targets(self.all_indices)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (0, 0) and z in (2)
self.target_positions[envs_long, 0:2] = torch.zeros((num_sets, 2), device=self._device)
self.target_positions[envs_long, 2] = torch.ones(num_sets, device=self._device) * 2.0
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.0
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.0, 0.0, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.thrust_cmds_damp[env_ids] = 0
self.thrust_rot_damp[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0.0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# pos reward
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# orient reward
ups = quat_axis(root_quats, 2)
self.orient_z = ups[..., 2]
up_reward = torch.clamp(ups[..., 2], min=0.0, max=1.0)
# effort reward
effort = torch.square(self.actions).sum(-1)
effort_reward = 0.05 * torch.exp(-0.5 * effort)
# spin reward
spin = torch.square(root_angvels).sum(-1)
spin_reward = 0.01 * torch.exp(-1.0 * spin)
# combined reward
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spin_reward) - effort_reward
# log episode reward sums
self.episode_sums["rew_pos"] += pos_reward
self.episode_sums["rew_orient"] += up_reward
self.episode_sums["rew_effort"] += effort_reward
self.episode_sums["rew_spin"] += spin_reward
# log raw info
self.episode_sums["raw_dist"] += target_dist
self.episode_sums["raw_orient"] += ups[..., 2]
self.episode_sums["raw_effort"] += effort
self.episode_sums["raw_spin"] += spin
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 5.0, ones, die)
# z >= 0.5 & z <= 5.0 & up > 0
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
die = torch.where(self.root_positions[..., 2] > 5.0, ones, die)
die = torch.where(self.orient_z < 0.0, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 16,824 | Python | 41.487374 | 120 | 0.619413 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP2D_Virtual.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP2D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.articulations.views.MFP2D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances,
)
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP2DVirtual(RLTask):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future.
"""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._enable_wandb_logs = self._task_cfg["enable_wandb_log"]
self._platform_cfg = self._task_cfg["env"]["platform"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.iteration = 0
self.step = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0, 0, 1.0])
self._marker = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
self.contact_state = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces.
"""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 5)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def add_stats(self, names: List[str]) -> None:
"""
Adds training statistics to be recorded during training.
Args:
names (List[str]): list of names of the statistics to be recorded.
"""
for name in names:
torch_zeros = lambda: torch.zeros(
self._num_envs,
dtype=torch.float,
device=self._device,
requires_grad=False,
)
if not name in self.episode_sums.keys():
self.episode_sums[name] = torch_zeros()
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection.
"""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 5),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 3),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
self.extras_wandb = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): the USD scene to be set up.
"""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
RLTask.set_up_scene(self, scene, replicate_physics=False)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path,
name="modular_floating_platform_view",
track_contact_force=True,
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add arrows to scene if task is go to pose
scene, self._marker = self.task.add_visual_marker_to_scene(scene)
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene.
"""
fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def get_target(self) -> None:
"""
Adds the visualization target to the scene.
"""
self.task.generate_target(
self.default_zero_env_path, self._default_marker_position
)
def update_state(self) -> None:
"""
Updates the state of the system.
"""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.base.get_world_poses(
clone=True
)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.base.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Cast quaternion to Yaw
siny_cosp = 2 * (
self.root_quats[:, 0] * self.root_quats[:, 3]
+ self.root_quats[:, 1] * self.root_quats[:, 2]
)
cosy_cosp = 1 - 2 * (
self.root_quats[:, 2] * self.root_quats[:, 2]
+ self.root_quats[:, 3] * self.root_quats[:, 3]
)
orient_z = torch.arctan2(siny_cosp, cosy_cosp)
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
orient_z = self.DR.noisy_observations.add_noise_on_heading(
orient_z, step=self.step
)
net_contact_forces = self.compute_contact_forces()
# Compute the heading
self.heading[:, 0] = torch.cos(orient_z)
self.heading[:, 1] = torch.sin(orient_z)
# Dump to state
self.current_state = {
"position": root_positions[:, :2],
"orientation": self.heading,
"linear_velocity": root_velocities[:, :2],
"angular_velocity": root_velocities[:, -1],
"net_contact_forces": net_contact_forces,
}
def compute_contact_forces(self) -> torch.Tensor:
"""
Get the contact forces of the platform.
Returns:
net_contact_forces_norm (torch.Tensor): the norm of the net contact forces.
"""
net_contact_forces = self._platforms.base.get_net_contact_forces(clone=False)
return torch.norm(net_contact_forces, dim=-1)
def get_observations(self) -> Dict[str, torch.Tensor]:
"""
Gets the observations of the task to be passed to the policy.
Returns:
observations: a dictionary containing the observations of the task.
"""
# implement logic to retrieve observation states
self.update_state()
# Get the state
self.obs_buf["state"] = self.task.get_state_observations(self.current_state)
# Get thruster transforms
self.obs_buf["transforms"] = self.virtual_platform.current_transforms
# Get the action masks
self.obs_buf["masks"] = self.virtual_platform.action_masks
self.obs_buf["masses"] = self.DR.mass_disturbances.get_masses_and_com()
observations = {self._platforms.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions: torch.Tensor) -> None:
"""
This function implements the logic to be performed before physics steps.
Args:
actions (torch.Tensor): the actions to be applied to the platform.
"""
# If is not playing skip
if not self._env._world.is_playing():
return
# Check which environment need to be reset
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# Reset the environments (Robots)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Collect actions
actions = actions.clone().to(self._device)
self.actions = actions
# Remap actions to the correct values
if self._discrete_actions == "MultiDiscrete":
# If actions are multidiscrete [0, 1]
thrust_cmds = self.actions.float()
elif self._discrete_actions == "Continuous":
# Transform continuous actions to [0, 1] discrete actions.
thrust_cmds = torch.clamp((self.actions + 1) / 2, min=0.0, max=1.0)
else:
raise NotImplementedError("")
# Applies the thrust multiplier
thrusts = self.virtual_platform.thruster_cfg.thrust_force * thrust_cmds
# Adds random noise on the actions
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts, step=self.step)
# clear actions for reset envs
thrusts[reset_env_ids] = 0
# If split thrust, equally shares the maximum amount of thrust across thrusters.
if self.split_thrust:
factor = torch.max(
torch.sum(self.actions, -1),
torch.ones((self._num_envs), dtype=torch.float32, device=self._device),
)
self.positions, self.forces = self.virtual_platform.project_forces(
thrusts / factor.view(self._num_envs, 1)
)
else:
self.positions, self.forces = self.virtual_platform.project_forces(thrusts)
return
def apply_forces(self) -> None:
"""
Applies all the forces to the platform and its thrusters.
"""
# Applies actions from the thrusters
self._platforms.thrusters.apply_forces_and_torques_at_pos(
forces=self.forces, positions=self.positions, is_global=False
)
# Applies the domain randomization
floor_forces = self.DR.force_disturbances.get_force_disturbance(self.root_pos)
torque_disturbance = self.DR.torque_disturbances.get_torque_disturbance(
self.root_pos
)
self._platforms.base.apply_forces_and_torques_at_pos(
forces=floor_forces,
torques=torque_disturbance,
positions=self.root_pos,
is_global=True,
)
def post_reset(self):
"""
This function implements the logic to be performed after a reset.
"""
# implement any logic required for simulation on-start here
self.root_pos, self.root_rot = self._platforms.base.get_world_poses()
self.root_velocities = self._platforms.base.get_velocities()
self.dof_pos = self._platforms.get_joint_positions()
self.dof_vel = self._platforms.get_joint_velocities()
self._platforms.get_CoM_indices()
self._platforms.get_plane_lock_indices()
# Set initial conditions
self.initial_root_pos, self.initial_root_rot = (
self.root_pos.clone(),
self.root_rot.clone(),
)
self.initial_pin_pos = self._env_pos
self.initial_pin_rot = torch.zeros(
(self._num_envs, 4), dtype=torch.float32, device=self._device
)
self.initial_pin_rot[:, 0] = 1
# Set the initial contact state
self.contact_state = torch.zeros(
(self._num_envs),
dtype=torch.float32,
device=self._device,
)
# control parameters
self.thrusts = torch.zeros(
(self._num_envs, self._max_actions, 3),
dtype=torch.float32,
device=self._device,
)
self.set_targets(self.all_indices)
def set_targets(self, env_ids: torch.Tensor):
"""
Sets the targets for the task.
Args:
env_ids (torch.Tensor): the indices of the environments for which to set the targets.
"""
num_sets = len(env_ids)
env_long = env_ids.long()
# Randomizes the position of the ball on the x y axes
target_positions, target_orientation = self.task.get_goals(
env_long,
step=self.step,
)
if len(target_positions.shape) == 3:
position = (
target_positions
+ self.initial_pin_pos[env_long]
.view(num_sets, 1, 3)
.expand(*target_positions.shape)
).reshape(-1, 3)
a = (env_long * target_positions.shape[1]).repeat_interleave(
target_positions.shape[1]
)
b = torch.arange(target_positions.shape[1], device=self._device).repeat(
target_positions.shape[0]
)
env_long = a + b
target_orientation = target_orientation.reshape(-1, 4)
else:
position = target_positions + self.initial_pin_pos[env_long]
## Apply the new goals
if self._marker:
self._marker.set_world_poses(
position,
target_orientation,
indices=env_long,
)
def reset_idx(self, env_ids: torch.Tensor) -> None:
"""
Resets the environments with the given indices.
Args:
env_ids (torch.Tensor): the indices of the environments to be reset.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.task.reset(env_ids)
self.set_targets(env_ids)
self.virtual_platform.randomize_thruster_state(env_ids, num_resets)
self.DR.force_disturbances.generate_forces(env_ids, num_resets, step=self.step)
self.DR.torque_disturbances.generate_torques(
env_ids, num_resets, step=self.step
)
self.DR.mass_disturbances.randomize_masses(env_ids, step=self.step)
CoM_shift = self.DR.mass_disturbances.get_CoM(env_ids)
random_mass = self.DR.mass_disturbances.get_masses(env_ids)
# Randomizes the starting position of the platform
pos, quat, vel = self.task.get_initial_conditions(env_ids, step=self.step)
siny_cosp = 2 * quat[:, 0] * quat[:, 3]
cosy_cosp = 1 - 2 * (quat[:, 3] * quat[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# apply resets
dof_pos = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
# Resets the contacts
self.contact_state[env_ids] = 0
# self._platforms.CoM.set_masses(random_mass, indices=env_ids)
dof_pos[:, self._platforms.lock_indices[0]] = pos[:, 0]
dof_pos[:, self._platforms.lock_indices[1]] = pos[:, 1]
dof_pos[:, self._platforms.lock_indices[2]] = h
dof_pos[:, self._platforms.CoM_shifter_indices[0]] = CoM_shift[:, 0]
dof_pos[:, self._platforms.CoM_shifter_indices[1]] = CoM_shift[:, 1]
self._platforms.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
dof_vel[:, self._platforms.lock_indices[0]] = vel[:, 0]
dof_vel[:, self._platforms.lock_indices[1]] = vel[:, 1]
dof_vel[:, self._platforms.lock_indices[2]] = vel[:, 5]
self._platforms.set_joint_velocities(dof_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill `extras`
self.extras["episode"] = {}
self.extras_wandb = {}
for key in self.episode_sums.keys():
value = (
torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
)
if key in self._penalties.get_stats_name():
self.extras_wandb[key] = value
elif key in self.task.log_with_wandb:
self.extras_wandb[key] = value
else:
self.extras["episode"][key] = value
self.episode_sums[key][env_ids] = 0.0
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training.
"""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.abs(
self.current_state["angular_velocity"]
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
def calculate_metrics(self) -> None:
"""
Calculates the metrics of the training.
That is the rewards, penalties, and other perfomance statistics.
"""
position_reward = self.task.compute_reward(self.current_state, self.actions)
self.iteration += 1
self.step += 1 / self._task_cfg["env"]["horizon_length"]
penalties = self._penalties.compute_penalty(
self.current_state, self.actions, self.step
)
self.rew_buf[:] = position_reward - penalties
self.episode_sums = self.task.update_statistics(self.episode_sums)
self.episode_sums = self._penalties.update_statistics(self.episode_sums)
if self._enable_wandb_logs:
if self.iteration / self._task_cfg["env"]["horizon_length"] % 1 == 0:
self.extras_wandb["wandb_step"] = int(self.step)
for key, value in self._penalties.get_logs().items():
self.extras_wandb[key] = value
for key, value in self.task.get_logs(self.step).items():
self.extras_wandb[key] = value
for key, value in self.DR.get_logs(self.step).items():
self.extras_wandb[key] = value
wandb.log(self.extras_wandb)
self.extras_wandb = {}
self.update_state_statistics()
def is_done(self) -> None:
"""
Checks if the episode is done.
"""
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = self.task.update_kills()
# resets due to episode length
self.reset_buf[:] = torch.where(
self.progress_buf >= self._max_episode_length - 1, ones, die
)
| 23,705 | Python | 36.990385 | 102 | 0.581101 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class HumanoidLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTask.set_up_scene(self, scene)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("humanoid_view"):
scene.remove_object("humanoid_view", registry_only=True)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
def get_humanoid(self):
humanoid = Humanoid(
prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions
)
self._sim_config.apply_articulation_settings(
"Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid")
)
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = torch.tensor(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
)
self.max_motor_effort = torch.max(self.joint_gears)
self.motor_effort_ratio = self.joint_gears / self.max_motor_effort
dof_limits = self._humanoids.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = torch.tensor(
[self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, motor_effort_ratio, joints_at_limit_cost_scale):
# type: (Tensor, Tensor, float) -> Tensor
scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02
dof_at_limit_cost = torch.sum(
(torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1
)
return dof_at_limit_cost
| 5,980 | Python | 41.119718 | 117 | 0.651003 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/franka_deformable.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils
from omni.isaac.core.materials.deformable_material import DeformableMaterial
from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim
from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView
from omni.physx.scripts import deformableUtils, physicsUtils
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema
class FrankaDeformableTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.dt = 1/60.
self._num_observations = 39
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["actionScale"]
def set_up_scene(self, scene) -> None:
self.stage = get_current_stage()
self.assets_root_path = get_assets_root_path()
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self.get_franka()
self.get_beaker()
self.get_deformable_tube()
super().set_up_scene(scene=scene, replicate_physics=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self.deformableView)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("deformabletube_view"):
scene.remove_object("deformabletube_view", registry_only=True)
self._frankas = FrankaView(
prim_paths_expr="/World/envs/.*/franka", name="franka_view"
)
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self.deformableView)
def get_franka(self):
franka = Franka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
translation=torch.tensor([0.0, 0.0, 0.0]),
)
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
franka.set_franka_properties(stage=self.stage, prim=franka.prim)
def get_beaker(self):
_usd_path = self.assets_root_path + "/Isaac/Props/Beaker/beaker_500ml.usd"
mesh_path = self.default_zero_env_path + "/beaker"
add_reference_to_stage(_usd_path, mesh_path)
beaker = RigidPrim(
prim_path=mesh_path+"/beaker",
name="beaker",
position=torch.tensor([0.5, 0.2, 0.095]),
)
self._sim_config.apply_articulation_settings("beaker", beaker.prim, self._sim_config.parse_actor_config("beaker"))
def get_deformable_tube(self):
_usd_path = self.assets_root_path + "/Isaac/Props/DeformableTube/tube.usd"
mesh_path = self.default_zero_env_path + "/deformableTube/tube"
add_reference_to_stage(_usd_path, mesh_path)
skin_mesh = get_prim_at_path(mesh_path)
physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh)
physicsUtils.set_or_add_translate_op(skin_mesh, (0.6, 0.0, 0.005))
physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([0, 0, 1]), 90).GetQuat())
def get_observations(self) -> dict:
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
dof_pos_scaled = (
2.0 * (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
tube_positions = self.deformableView.get_simulation_mesh_nodal_positions(clone=False)
tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities(clone=False)
self.tube_front_positions = tube_positions[:, 200, :] - self._env_pos
self.tube_front_velocities = tube_velocities[:, 200, :]
self.tube_back_positions = tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = tube_velocities[:, -1, :]
front_to_gripper = self.tube_front_positions - self.gripper_site_pos
to_front_goal = self.front_goal_pos - self.tube_front_positions
to_back_goal = self.back_goal_pos - self.tube_back_positions
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
front_to_gripper,
to_front_goal,
to_back_goal,
self.tube_front_positions,
self.tube_front_velocities,
self.tube_back_positions,
self.tube_back_velocities,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
self.franka_dof_targets[:, -1] = self.franka_dof_targets[:, -2]
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
pos = self.franka_default_dof_pos
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
self.deformableView.set_simulation_mesh_nodal_positions(self.initial_tube_positions[env_ids], indices)
self.deformableView.set_simulation_mesh_nodal_velocities(self.initial_tube_velocities[env_ids], indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.franka_default_dof_pos = torch.tensor(
[0.00, 0.63, 0.00, -2.15, 0.00, 2.76, 0.75, 0.02, 0.02], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
self.front_goal_pos = torch.tensor([0.36, 0.0, 0.23], device=self._device).repeat((self._num_envs, 1))
self.back_goal_pos = torch.tensor([0.5, 0.2, 0.0], device=self._device).repeat((self._num_envs, 1))
self.goal_hand_rot = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self._device).repeat((self.num_envs, 1))
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
self.initial_tube_positions = self.deformableView.get_simulation_mesh_nodal_positions()
self.initial_tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities()
self.tube_front_positions = self.initial_tube_positions[:, 0, :] - self._env_pos
self.tube_front_velocities = self.initial_tube_velocities[:, 0, :]
self.tube_back_positions = self.initial_tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = self.initial_tube_velocities[:, -1, :]
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
goal_distance_error = torch.norm(self.tube_back_positions[:, 0:2] - self.back_goal_pos[:, 0:2], p = 2, dim = -1)
goal_dist_reward = 1.0 / (5*goal_distance_error + .025)
current_z_level = self.tube_back_positions[:, 2:3]
z_lift_level = torch.where(
goal_distance_error < 0.07, torch.zeros_like(current_z_level), torch.ones_like(current_z_level)*0.18
)
front_lift_error = torch.norm(current_z_level - z_lift_level, p = 2, dim = -1)
front_lift_reward = 1.0 / (5*front_lift_error + .025)
rewards = goal_dist_reward + 4*front_lift_reward
self.rew_buf[:] = rewards
def is_done(self) -> None:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] < 0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] < -1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
| 13,316 | Python | 42.805921 | 136 | 0.641108 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP2D_Virtual_Dock.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP2D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.articulations.views.MFP2D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances,
)
from omniisaacgymenvs.robots.sensors.exteroceptive.camera import (
camera_factory,
)
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import omni
import time
import math
import torch
import os
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP2DVirtual_Dock(RLTask):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future."""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._platform_cfg = self._task_cfg["env"]["platform"]
self._dock_cfg = self._task_cfg["env"]["dock"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.step = 0
self.iteration = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0, 0, 0.45])
self._dock_view = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces."""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 5)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def add_stats(self, names: List[str]) -> None:
"""
Adds training statistics to be recorded during training.
Args:
names (List[str]): list of names of the statistics to be recorded."""
for name in names:
torch_zeros = lambda: torch.zeros(
self._num_envs,
dtype=torch.float,
device=self._device,
requires_grad=False,
)
if not name in self.episode_sums.keys():
self.episode_sums[name] = torch_zeros()
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 5),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 3),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
self.extras_wandb = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): the USD scene to be set up."""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
if self._task_cfg["sim"].get("add_lab", False):
self.get_zero_g_lab()
RLTask.set_up_scene(self, scene, replicate_physics=False)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path,
name="modular_floating_platform_view",
track_contact_force=True,
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add rigidprim view of docking station to the scene
scene, self._dock_view = self.task.add_dock_to_scene(scene)
# Collects replicator camera
if self._task_cfg["env"].get("sensors", None) is not None:
self.collect_camera()
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene."""
fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def get_target(self) -> None:
"""
Adds the visualization target to the scene."""
self.task.generate_target(
self.default_zero_env_path,
self._default_marker_position,
self._dock_cfg,
)
def get_zero_g_lab(self) -> None:
"""
Adds the Zero-G-lab to the scene."""
usd_path = os.path.join(os.getcwd(), self._task_cfg["lab_usd_path"])
prim = add_reference_to_stage(usd_path, self._task_cfg["lab_path"])
applyCollider(prim, True)
def collect_camera(self) -> None:
"""
Collect active cameras to generate synthetic images in batch."""
active_sensors = []
active_camera_source_path = self._task_cfg["env"]["sensors"]["RLCamera"]["prim_path"]
for i in range(self._num_envs):
# swap env_0 to env_i
sensor_path = active_camera_source_path.split("/")
sensor_path[3] = f"env_{i}"
self._task_cfg["env"]["sensors"]["RLCamera"]["prim_path"] = (
"/".join(sensor_path)
)
rl_sensor = camera_factory.get("RLCamera")(
self._task_cfg["env"]["sensors"]["RLCamera"],
self.rep,
)
active_sensors.append(rl_sensor)
self.active_sensors = active_sensors
def update_state(self) -> None:
"""
Updates the state of the system."""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.base.get_world_poses(clone=True)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.base.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Cast quaternion to Yaw
siny_cosp = 2 * (
self.root_quats[:, 0] * self.root_quats[:, 3]
+ self.root_quats[:, 1] * self.root_quats[:, 2]
)
cosy_cosp = 1 - 2 * (
self.root_quats[:, 2] * self.root_quats[:, 2]
+ self.root_quats[:, 3] * self.root_quats[:, 3]
)
orient_z = torch.arctan2(siny_cosp, cosy_cosp)
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
orient_z = self.DR.noisy_observations.add_noise_on_heading(
orient_z, step=self.step
)
# Compute the heading
self.heading[:, 0] = torch.cos(orient_z)
self.heading[:, 1] = torch.sin(orient_z)
# Update goal pose
self.update_goal_state()
# Update FP contact state
net_contact_forces = self.compute_contact_forces()
# Dump to state
self.current_state = {
"position": root_positions[:, :2],
"orientation": self.heading,
"linear_velocity": root_velocities[:, :2],
"angular_velocity": root_velocities[:, -1],
"net_contact_forces": net_contact_forces,
}
def update_goal_state(self) -> None:
"""
Updates the goal state of the task."""
target_positions, target_orientations = self._dock_view.base.get_world_poses(clone=True)
self.task.set_goals(self.all_indices.long(), target_positions-self._env_pos, target_orientations, self.step)
def compute_contact_forces(self) -> torch.Tensor:
"""
Get the contact forces of the platform.
Returns:
net_contact_forces_norm (torch.Tensor): the norm of the net contact forces.
"""
net_contact_forces = self._platforms.base.get_net_contact_forces(clone=False)
return torch.norm(net_contact_forces, dim=-1)
def get_observations(self) -> Dict[str, torch.Tensor]:
"""
Gets the observations of the task to be passed to the policy.
Returns:
observations: a dictionary containing the observations of the task."""
# implement logic to retrieve observation states
self.update_state()
# Get the state
self.obs_buf["state"] = self.task.get_state_observations(self.current_state)
# Get thruster transforms
self.obs_buf["transforms"] = self.virtual_platform.current_transforms
# Get the action masks
self.obs_buf["masks"] = self.virtual_platform.action_masks
self.obs_buf["masses"] = self.DR.mass_disturbances.get_masses_and_com()
observations = {self._platforms.name: {"obs_buf": self.obs_buf}}
return observations
def get_rgbd_data(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
return batched rgbd data.
Returns:
rgb (torch.Tensor): batched rgb data
depth (torch.Tensor): batched depth data
"""
rs_obs = [sensor.get_observation() for sensor in self.active_sensors]
rgb = torch.stack([ob["rgb"] for ob in rs_obs]).to(self._device)
depth = torch.stack([ob["depth"] for ob in rs_obs]).to(self._device)
rgb = self.DR.noisy_rgb_images.add_noise_on_image(rgb, step=self.step)
depth = self.DR.noisy_depth_images.add_noise_on_image(depth, step=self.step)
return rgb, depth
def pre_physics_step(self, actions: torch.Tensor) -> None:
"""
This function implements the logic to be performed before physics steps.
Args:
actions (torch.Tensor): the actions to be applied to the platform."""
# If is not playing skip
if not self._env._world.is_playing():
return
# Check which environment need to be reset
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# Reset the environments (Robots)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Collect actions
actions = actions.clone().to(self._device)
self.actions = actions
# Remap actions to the correct values
if self._discrete_actions == "MultiDiscrete":
# If actions are multidiscrete [0, 1]
thrust_cmds = self.actions.float()
elif self._discrete_actions == "Continuous":
# Transform continuous actions to [0, 1] discrete actions.
thrust_cmds = torch.clamp((self.actions + 1) / 2, min=0.0, max=1.0)
else:
raise NotImplementedError("")
# Applies the thrust multiplier
thrusts = self.virtual_platform.thruster_cfg.thrust_force * thrust_cmds
# Adds random noise on the actions
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts, step=self.step)
# clear actions for reset envs
thrusts[reset_env_ids] = 0
# If split thrust, equally shares the maximum amount of thrust across thrusters.
if self.split_thrust:
factor = torch.max(
torch.sum(self.actions, -1),
torch.ones((self._num_envs), dtype=torch.float32, device=self._device),
)
self.positions, self.forces = self.virtual_platform.project_forces(
thrusts / factor.view(self._num_envs, 1)
)
else:
self.positions, self.forces = self.virtual_platform.project_forces(thrusts)
return
def apply_forces(self) -> None:
"""
Applies all the forces to the platform and its thrusters."""
# Applies actions from the thrusters
self._platforms.thrusters.apply_forces_and_torques_at_pos(
forces=self.forces, positions=self.positions, is_global=False
)
# Applies the domain randomization
floor_forces = self.DR.force_disturbances.get_force_disturbance(self.root_pos)
torque_disturbance = self.DR.torque_disturbances.get_torque_disturbance(
self.root_pos
)
self._platforms.base.apply_forces_and_torques_at_pos(
forces=floor_forces,
torques=torque_disturbance,
positions=self.root_pos,
is_global=True,
)
def post_reset(self):
"""
This function implements the logic to be performed after a reset."""
# implement any logic required for simulation on-start here
self.root_pos, self.root_rot = self._platforms.get_world_poses()
self.root_velocities = self._platforms.get_velocities()
self._platforms.get_CoM_indices()
self._platforms.get_plane_lock_indices()
self._dock_view.get_plane_lock_indices()
self.initial_root_pos, self.initial_root_rot = (
self.root_pos.clone(),
self.root_rot.clone(),
)
self.initial_pin_pos = self._env_pos
self.initial_pin_rot = torch.zeros(
(self._num_envs, 4), dtype=torch.float32, device=self._device
)
self.initial_pin_rot[:, 0] = 1
# control parameters
self.thrusts = torch.zeros(
(self._num_envs, self._max_actions, 3),
dtype=torch.float32,
device=self._device,
)
self.set_targets(self.all_indices)
def set_targets(self, env_ids: torch.Tensor):
"""
Sets the targets for the task.
Args:
env_ids (torch.Tensor): the indices of the environments for which to set the targets.
"""
num_resets = len(env_ids)
env_long = env_ids.long()
# Randomizes the position and orientation of the dock on the x y axis
target_positions, target_orientation = self.task.get_goals(
env_long,
self.step,
)
siny_cosp = 2 * target_orientation[:, 0] * target_orientation[:, 3]
cosy_cosp = 1 - 2 * (target_orientation[:, 3] * target_orientation[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# apply resets
dof_pos = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_pos[:, self._dock_view.lock_indices[0]] = target_positions[:, 0]
dof_pos[:, self._dock_view.lock_indices[1]] = target_positions[:, 1]
dof_pos[:, self._dock_view.lock_indices[2]] = h
self._dock_view.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_vel[:, self._dock_view.lock_indices[0]] = 0.0
dof_vel[:, self._dock_view.lock_indices[1]] = 0.0
dof_vel[:, self._dock_view.lock_indices[2]] = 0.0
self._dock_view.set_joint_velocities(dof_vel, indices=env_ids)
def reset_idx(self, env_ids: torch.Tensor) -> None:
"""
Resets the environments with the given indices.
Args:
env_ids (torch.Tensor): the indices of the environments to be reset."""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.task.reset(env_ids)
self.set_targets(env_ids)
self.virtual_platform.randomize_thruster_state(env_ids, num_resets)
self.DR.force_disturbances.generate_forces(env_ids, num_resets, step=self.step)
self.DR.torque_disturbances.generate_torques(
env_ids, num_resets, step=self.step
)
self.DR.mass_disturbances.randomize_masses(env_ids, step=self.step)
CoM_shift = self.DR.mass_disturbances.get_CoM(env_ids)
random_mass = self.DR.mass_disturbances.get_masses(env_ids)
# Randomizes the starting position of the platform
pos, quat, vel = self.task.get_initial_conditions(env_ids, step=self.step)
siny_cosp = 2 * quat[:, 0] * quat[:, 3]
cosy_cosp = 1 - 2 * (quat[:, 3] * quat[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# Randomizes mass of the dock
if hasattr(self.task._task_parameters, "spawn_dock_mass_curriculum"):
mass = self.task.get_dock_masses(env_ids, step=self.step)
self._dock_view.base.set_masses(mass, indices=env_ids)
# apply joint resets
dof_pos = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
# self._platforms.CoM.set_masses(random_mass, indices=env_ids)
dof_pos[:, self._platforms.lock_indices[0]] = pos[:, 0]
dof_pos[:, self._platforms.lock_indices[1]] = pos[:, 1]
dof_pos[:, self._platforms.lock_indices[2]] = h
dof_pos[:, self._platforms.CoM_shifter_indices[0]] = CoM_shift[:, 0]
dof_pos[:, self._platforms.CoM_shifter_indices[1]] = CoM_shift[:, 1]
self._platforms.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
dof_vel[:, self._platforms.lock_indices[0]] = vel[:, 0]
dof_vel[:, self._platforms.lock_indices[1]] = vel[:, 1]
dof_vel[:, self._platforms.lock_indices[2]] = vel[:, 5]
self._platforms.set_joint_velocities(dof_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill `extras`
self.extras["episode"] = {}
self.extras_wandb = {}
for key in self.episode_sums.keys():
value = (
torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
)
if key in self._penalties.get_stats_name():
self.extras_wandb[key] = value
elif key in self.task.log_with_wandb:
self.extras_wandb[key] = value
else:
self.extras["episode"][key] = value
self.episode_sums[key][env_ids] = 0.0
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training."""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.abs(
self.current_state["angular_velocity"]
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
def calculate_metrics(self) -> None:
"""
Calculates the metrics of the training.
That is the rewards, penalties, and other perfomance statistics."""
reward = self.task.compute_reward(self.current_state, self.actions)
self.iteration += 1
self.step += 1 / self._task_cfg["env"]["horizon_length"]
penalties = self._penalties.compute_penalty(
self.current_state, self.actions, self.step
)
self.rew_buf[:] = reward - penalties
self.episode_sums = self.task.update_statistics(self.episode_sums)
self.episode_sums = self._penalties.update_statistics(self.episode_sums)
if self.iteration / self._task_cfg["env"]["horizon_length"] % 1 == 0:
self.extras_wandb["wandb_step"] = int(self.step)
for key, value in self._penalties.get_logs().items():
self.extras_wandb[key] = value
for key, value in self.task.get_logs(self.step).items():
self.extras_wandb[key] = value
for key, value in self.DR.get_logs(self.step).items():
self.extras_wandb[key] = value
if self._cfg["wandb_activate"]:
wandb.log(self.extras_wandb)
self.extras_wandb = {}
self.update_state_statistics()
def is_done(self) -> None:
"""
Checks if the episode is done."""
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = self.task.update_kills()
# resets due to episode length
self.reset_buf[:] = torch.where(
self.progress_buf >= self._max_episode_length - 1, ones, die
) | 26,095 | Python | 38.479576 | 116 | 0.58904 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class AntLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = torch.tensor([0, 0, 0.5])
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTask.set_up_scene(self, scene)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("ant_view"):
scene.remove_object("ant_view", registry_only=True)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings(
"Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant")
)
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = torch.tensor([15, 15, 15, 15, 15, 15, 15, 15], dtype=torch.float32, device=self._device)
dof_limits = self._ants.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = torch.tensor(
[self._ants._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self._ants.num_dof)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, num_dof):
# type: (Tensor, int) -> Tensor
return torch.sum(obs_buf[:, 12 : 12 + num_dof] > 0.99, dim=-1)
| 4,691 | Python | 41.654545 | 115 | 0.69708 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("cartpole_view"):
scene.remove_object("cartpole_view", registry_only=True)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
def get_cartpole(self):
cartpole = Cartpole(
prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole")
)
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
self.obs_buf[:, 0] = self.cart_pos
self.obs_buf[:, 1] = self.cart_vel
self.obs_buf[:, 2] = self.pole_pos
self.obs_buf[:, 3] = self.pole_vel
observations = {self._cartpoles.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.to(self._device)
forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device)
forces[:, self._cart_dof_idx] = self._max_push_effort * actions[:, 0]
indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device)
self._cartpoles.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions
dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# randomize DOF velocities
dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._cartpoles.set_joint_positions(dof_pos, indices=indices)
self._cartpoles.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
# randomize all envs
indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
reward = 1.0 - self.pole_pos * self.pole_pos - 0.01 * torch.abs(self.cart_vel) - 0.005 * torch.abs(self.pole_vel)
reward = torch.where(torch.abs(self.cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward)
reward = torch.where(torch.abs(self.pole_pos) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
self.rew_buf[:] = reward
def is_done(self) -> None:
resets = torch.where(torch.abs(self.cart_pos) > self._reset_dist, 1, 0)
resets = torch.where(torch.abs(self.pole_pos) > math.pi / 2, 1, resets)
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets)
self.reset_buf[:] = resets
| 7,250 | Python | 42.945454 | 121 | 0.65931 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP3D_Virtual.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP3D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.articulations.views.MFP3D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_disturbances import (
Disturbances,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
quat_to_mat,
)
from omniisaacgymenvs.tasks.MFP2D_Virtual import MFP2DVirtual
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import torch
import omni
import time
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP3DVirtual(MFP2DVirtual):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future."""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._platform_cfg = self._task_cfg["env"]["platform"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.iteration = 0
self.step = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0.0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0.0, 0.0, 0.0])
self._marker = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces.
"""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 10)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(4,)),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection.
"""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 10),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 4),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): The USD stage to setup.
"""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
RLTask.set_up_scene(self, scene)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path, name="modular_floating_platform_view"
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add arrows to scene if task is go to pose
scene, self._marker = self.task.add_visual_marker_to_scene(scene)
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene.
"""
self._fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(self._fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def update_state(self) -> None:
"""
Updates the state of the system.
"""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.get_world_poses(clone=True)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
# Compute the heading
heading = quat_to_mat(self.root_quats)
# Dump to state
self.current_state = {
"position": root_positions,
"orientation": heading,
"linear_velocity": root_velocities[:, :3],
"angular_velocity": root_velocities[:, 3:],
}
def set_targets(self, env_ids: torch.Tensor) -> None:
"""
Sets the targets for the task.
Args:
env_ids: The indices of the environments to set the targets for."""
env_long = env_ids.long()
# Randomizes the position of the ball on the x y z axes
target_positions, target_orientation = self.task.get_goals(
env_long,
self.initial_pin_pos.clone(),
self.initial_pin_rot.clone(),
step=self.step,
)
# Apply the new goals
if self._marker:
self._marker.set_world_poses(
target_positions[env_long],
target_orientation[env_long],
indices=env_long,
)
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training."""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.norm(
self.current_state["angular_velocity"], dim=-1
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
| 11,320 | Python | 35.519355 | 102 | 0.593286 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/quadcopter.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.quadcopter import Quadcopter
from omniisaacgymenvs.robots.articulations.views.quadcopter_view import QuadcopterView
class QuadcopterTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 21
self._num_actions = 12
self._copter_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
max_thrust = 2.0
self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_copter()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("quadcopter_view"):
scene.remove_object("quadcopter_view", registry_only=True)
if scene.object_exists("rotors_view"):
scene.remove_object("rotors_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
def get_copter(self):
copter = Quadcopter(
prim_path=self.default_zero_env_path + "/Quadcopter", name="quadcopter", translation=self._copter_position
)
self._sim_config.apply_articulation_settings(
"copter", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("copter")
)
def get_target(self):
radius = 0.05
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
self.obs_buf[..., 13:21] = self.dof_pos
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.clone().to(self._device)
dof_action_speed_scale = 8 * math.pi
self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits
)
thrust_action_speed_scale = 100
self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]
self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)
self.forces[:, 0, 2] = self.thrusts[:, 0]
self.forces[:, 1, 2] = self.thrusts[:, 1]
self.forces[:, 2, 2] = self.thrusts[:, 2]
self.forces[:, 3, 2] = self.thrusts[:, 3]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids]
# apply actions
self._copters.set_joint_position_targets(self.dof_position_targets)
self._copters.rotors.apply_forces(self.forces, is_global=False)
def post_reset(self):
# control tensors
self.dof_position_targets = torch.zeros(
(self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False)
self.forces = torch.zeros(
(self._num_envs, self._copters.rotors.count // self._num_envs, 3),
dtype=torch.float32,
device=self._device,
requires_grad=False,
)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device)
self.target_positions[:, 2] = 1.0
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
self.dof_vel = self._copters.get_joint_velocities(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
dof_limits = self._copters.get_dof_limits()
self.dof_lower_limits = dof_limits[0][:, 0].to(device=self._device)
self.dof_upper_limits = dof_limits[0][:, 1].to(device=self._device)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 3 * target_dist * target_dist) # 2
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage)
rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1 / 400))
rew = torch.clip(rew, 0.0, None)
self.rew_buf[:] = rew
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 3.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.3, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 11,492 | Python | 42.866412 | 120 | 0.640707 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/ingenuity.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ingenuity import Ingenuity
from omniisaacgymenvs.robots.articulations.views.ingenuity_view import IngenuityView
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
import numpy as np
import torch
import math
class IngenuityTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.thrust_limit = 2000
self.thrust_lateral_component = 0.2
self._num_observations = 13
self._num_actions = 6
self._ingenuity_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_ingenuity()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("ingenuity_view"):
scene.remove_object("ingenuity_view", registry_only=True)
for i in range(2):
if scene.object_exists(f"physics_rotor_{i}_view"):
scene.remove_object(f"physics_rotor_{i}_view", registry_only=True)
if scene.object_exists(f"visual_rotor_{i}_view"):
scene.remove_object(f"visual_rotor_{i}_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
def get_ingenuity(self):
copter = Ingenuity(prim_path=self.default_zero_env_path + "/Ingenuity", name="ingenuity", translation=self._ingenuity_position)
self._sim_config.apply_articulation_settings("ingenuity", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("ingenuity"))
def get_target(self):
radius = 0.1
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball"))
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
observations = {
self._copters.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
lateral_fraction_prop_0 = torch.clamp(
actions[:, 0:2] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
lateral_fraction_prop_1 = torch.clamp(
actions[:, 3:5] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0
self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0
self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1
self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
self.dof_vel[:, self.spinning_indices[0]] = 50
self.dof_vel[:, self.spinning_indices[1]] = -50
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(2):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
self.spinning_indices = torch.tensor([1, 3], device=self._device)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control tensors
self.thrusts = torch.zeros((self._num_envs, 2, 3), dtype=torch.float32, device=self._device)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (-1, 1) and z in (1, 2)
self.target_positions[envs_long, 0:2] = torch.rand((num_sets, 2), device=self._device) * 2 - 1
self.target_positions[envs_long, 2] = torch.rand(num_sets, device=self._device) + 1
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.4
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, 1] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_pos[env_ids, 3] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 2.5 * target_dist * target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 30 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 10 * spinnage * spinnage)
# combined reward
# uprightness and spinning only matter when close to the target
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spinnage_reward)
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 20.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 12,385 | Python | 42.921986 | 151 | 0.635204 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
class AnymalTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 48
self._num_actions = 12
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["cosmetic"] = self._task_cfg["env"]["learn"]["cosmeticRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.dt = 1 / 60
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._anymal_translation = torch.tensor([0.0, 0.0, 0.62])
self._env_spacing = self._task_cfg["env"]["envSpacing"]
def set_up_scene(self, scene) -> None:
self.get_anymal()
super().set_up_scene(scene)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("anymalview"):
scene.remove_object("anymalview", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_anymal(self):
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal", name="Anymal", translation=self._anymal_translation
)
self._sim_config.apply_articulation_settings(
"Anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("Anymal")
)
# Configure joint properties
joint_paths = []
for quadrant in ["LF", "LH", "RF", "RH"]:
for component, abbrev in [("HIP", "H"), ("THIGH", "K")]:
joint_paths.append(f"{quadrant}_{component}/{quadrant}_{abbrev}FE")
joint_paths.append(f"base/{quadrant}_HAA")
for joint_path in joint_paths:
set_drive(f"{anymal.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000)
def get_observations(self) -> dict:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity) * self.lin_vel_scale
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity) * self.ang_vel_scale
projected_gravity = quat_rotate(torso_rotation, self.gravity_vec)
dof_pos_scaled = (dof_pos - self.default_dof_pos) * self.dof_pos_scale
commands_scaled = self.commands * torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
requires_grad=False,
device=self.commands.device,
)
obs = torch.cat(
(
base_lin_vel,
base_ang_vel,
projected_gravity,
commands_scaled,
dof_pos_scaled,
dof_vel * self.dof_vel_scale,
self.actions,
),
dim=-1,
)
self.obs_buf[:] = obs
observations = {self._anymals.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self.world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
indices = torch.arange(self._anymals.count, dtype=torch.int32, device=self._device)
self.actions[:] = actions.clone().to(self._device)
current_targets = self.current_targets + self.action_scale * self.actions * self.dt
self.current_targets[:] = tensor_clamp(
current_targets, self.anymal_dof_lower_limits, self.anymal_dof_upper_limits
)
self._anymals.set_joint_position_targets(self.current_targets, indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF velocities
velocities = torch_rand_float(-0.1, 0.1, (num_resets, self._anymals.num_dof), device=self._device)
dof_pos = self.default_dof_pos[env_ids]
dof_vel = velocities
self.current_targets[env_ids] = dof_pos[:]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._anymals.set_joint_positions(dof_pos, indices)
self._anymals.set_joint_velocities(dof_vel, indices)
self._anymals.set_world_poses(
self.initial_root_pos[env_ids].clone(), self.initial_root_rot[env_ids].clone(), indices
)
self._anymals.set_velocities(root_vel, indices)
self.commands_x[env_ids] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_y[env_ids] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (num_resets, 1), device=self._device
).squeeze()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
def post_reset(self):
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
dof_names = self._anymals.dof_names
for i in range(self.num_actions):
name = dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
self.initial_root_pos, self.initial_root_rot = self._anymals.get_world_poses()
self.current_targets = self.default_dof_pos.clone()
dof_limits = self._anymals.get_dof_limits()
self.anymal_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.anymal_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.commands = torch.zeros(self._num_envs, 3, dtype=torch.float, device=self._device, requires_grad=False)
self.commands_y = self.commands.view(self._num_envs, 3)[..., 1]
self.commands_x = self.commands.view(self._num_envs, 3)[..., 0]
self.commands_yaw = self.commands.view(self._num_envs, 3)[..., 2]
# initialize some data used later on
self.extras = {}
self.gravity_vec = torch.tensor([0.0, 0.0, -1.0], device=self._device).repeat((self._num_envs, 1))
self.actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.last_dof_vel = torch.zeros(
(self._num_envs, 12), dtype=torch.float, device=self._device, requires_grad=False
)
self.last_actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.time_out_buf = torch.zeros_like(self.reset_buf)
# randomize all envs
indices = torch.arange(self._anymals.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity)
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity)
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
rew_lin_vel_z = torch.square(base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - dof_vel), dim=1) * self.rew_scales["joint_acc"]
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
rew_cosmetic = (
torch.sum(torch.abs(dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["cosmetic"]
)
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_joint_acc + rew_action_rate + rew_cosmetic + rew_lin_vel_z
total_reward = torch.clip(total_reward, 0.0, None)
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = dof_vel[:]
self.fallen_over = self._anymals.is_base_below_threshold(threshold=0.51, ground_heights=0.0)
total_reward[torch.nonzero(self.fallen_over)] = -1
self.rew_buf[:] = total_reward.detach()
def is_done(self) -> None:
# reset agents
time_out = self.progress_buf >= self.max_episode_length - 1
self.reset_buf[:] = time_out | self.fallen_over
| 14,344 | Python | 44.539682 | 118 | 0.630996 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_xy_seq.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughXYSequenceReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughXYSequenceParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin import VisualPin
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import colorsys
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughXYSequenceTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a sequence of points in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoThroughXY task, the robot
has to reach a sequence of points in the 2D plane.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoThroughXYSequence task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughXYSequenceTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughXYSequenceParameters(**task_param)
self._reward_parameters = GoThroughXYSequenceReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._all = torch.arange(self._num_envs, device=self._device)
self._trajectory_completed = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, self._task_parameters.num_points, 2),
device=self._device,
dtype=torch.float32,
)
self._target_index = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.long
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._target_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._delta_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 5
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = (
self._target_positions[self._all, self._target_index]
- current_state["position"]
).squeeze()
# linear velocity error (normed velocity)
self.linear_velocity_err = self._target_velocities - torch.norm(
current_state["linear_velocity"], dim=-1
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
# Compute target heading as the angle required to be looking at the target
self._target_headings = torch.arctan2(
self._position_error[:, 1], self._position_error[:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = self.linear_velocity_err
# position of the other points in the sequence
for i in range(self._task_parameters.num_points - 1):
overflowing = self._target_index + i + 1 >= self._task_parameters.num_points
indices = self._target_index + (i + 1) * (1 - overflowing.int())
self._task_data[:, 5 + 2 * i : 5 + 2 * i + 2] = (
self._target_positions[self._all, indices] - current_state["position"]
) * (1 - overflowing.int()).view(-1, 1)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.abs(self.linear_velocity_err)
position_progress = (
self._previous_position_dist - self.position_dist
) / torch.abs(self._target_velocities)
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# Heading
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
goal_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
reached_ids = goal_reached.nonzero(as_tuple=False).squeeze(-1)
# if the goal is reached, the target index is updated
self._target_index = self._target_index + goal_reached
self._trajectory_completed = (
self._target_index >= self._task_parameters.num_points
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
self.linear_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
self.linear_velocity_dist,
)
self._previous_position_dist = self.position_dist.clone()
# If goal is reached make next progress null
self._previous_position_dist[reached_ids] = 0
return (
self.progress_reward
+ self.heading_reward
+ self.linear_velocity_reward
- self.boundary_penalty
- self._reward_parameters.time_penalty
+ self._trajectory_completed * self._reward_parameters.terminal_reward
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._trajectory_completed, dtype=torch.long)
ones = torch.ones_like(self._trajectory_completed, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._trajectory_completed > 0, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._trajectory_completed[env_ids] = 0
self._target_index[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
for i in range(self._task_parameters.num_points):
if i == 0:
self._target_positions[env_ids, i] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
else:
r = self._spawn_position_sampler.sample(
num_goals, step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
point = torch.zeros((num_goals, 2), device=self._device)
point[:, 0] = r * torch.cos(theta)
point[:, 1] = r * torch.sin(theta)
self._target_positions[env_ids, i] = (
self._target_positions[env_ids, i - 1] + point
)
# Randomize heading
self._delta_headings[env_ids] = self._spawn_heading_sampler.sample(
num_goals, step, device=self._device
)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
self._target_velocities[env_ids] = r
# Creates tensors to save position and orientation
p = torch.zeros(
(num_goals, self._task_parameters.num_points, 3), device=self._device
)
q = torch.zeros(
(num_goals, self._task_parameters.num_points, 4),
device=self._device,
dtype=torch.float32,
)
q[:, :, 0] = 1
p[:, :, :2] = self._target_positions[env_ids]
p[:, :, 2] = 2
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 0, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
target_position_local = (
self._target_positions[env_ids, 0, :2] - initial_position[:, :2]
)
target_heading = torch.arctan2(
target_position_local[:, 1], target_position_local[:, 0]
)
theta = target_heading + self._delta_headings[env_ids]
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
for i in range(self._task_parameters.num_points):
color = torch.tensor(
colorsys.hsv_to_rgb(i / self._task_parameters.num_points, 1, 1)
)
ball_radius = 0.2
poll_radius = 0.025
poll_length = 2
VisualPin(
prim_path=path + "/pin_" + str(i),
translation=position,
name="target_" + str(i),
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/pin_[0-5]")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 23,063 | Python | 35.32126 | 106 | 0.580454 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_rewards.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import torch
from dataclasses import dataclass
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYZReward:
""" "
Reward function and parameters for the GoToXY task."""
name: str = "GoToXYZ"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
position_error: torch.Tensor,
) -> torch.Tensor:
"""
Defines the function used to compute the reward for the GoToXY task."""
if self.reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error)
elif self.reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error * position_error)
elif self.reward_mode.lower() == "exponential":
position_reward = torch.exp(-position_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return position_reward
@dataclass
class GoToPoseReward:
"""
Reward function and parameters for the GoToPose task."""
name: str = "GoToPose"
position_reward_mode: str = "linear"
heading_reward_mode: str = "linear"
position_exponential_reward_coeff: float = 0.25
heading_exponential_reward_coeff: float = 0.25
position_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.position_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_error: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
if self.position_reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "exponential":
position_reward = (
torch.exp(-position_error / self.position_exponential_reward_coeff)
* self.position_scale
)
else:
raise ValueError("Unknown reward type.")
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward
@dataclass
class TrackXYZVelocityReward:
"""
Reward function and parameters for the TrackXYVelocity task."""
name: str = "TrackXYZVelocity"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYVelocity task."""
if self.reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + velocity_error)
elif self.reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + velocity_error * velocity_error)
elif self.reward_mode.lower() == "exponential":
velocity_reward = torch.exp(-velocity_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return velocity_reward
@dataclass
class Track6DoFVelocityReward:
"""
Reward function and parameters for the TrackXYOVelocity task."""
name: str = "Track6DVelocity"
linear_reward_mode: str = "linear"
angular_reward_mode: str = "linear"
linear_exponential_reward_coeff: float = 0.25
angular_exponential_reward_coeff: float = 0.25
linear_scale: float = 1.0
angular_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.linear_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.angular_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
linear_velocity_error: torch.Tensor,
angular_velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYOVelocity task.
"""
if self.linear_reward_mode.lower() == "linear":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "square":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "exponential":
linear_reward = (
torch.exp(-linear_velocity_error / self.linear_exponential_reward_coeff)
* self.linear_scale
)
else:
raise ValueError("Unknown reward type.")
if self.angular_reward_mode.lower() == "linear":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "square":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "exponential":
angular_reward = (
torch.exp(
-angular_velocity_error / self.angular_exponential_reward_coeff
)
* self.angular_scale
)
else:
raise ValueError("Unknown reward type.")
return linear_reward, angular_reward
| 7,793 | Python | 33.794643 | 88 | 0.589632 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_disturbances_parameters.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
@dataclass
class MassDistributionDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the mass distribution disturbance.
"""
mass_curriculum: CurriculumParameters = field(default_factory=dict)
com_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
def __post_init__(self):
self.mass_curriculum = CurriculumParameters(**self.mass_curriculum)
self.com_curriculum = CurriculumParameters(**self.com_curriculum)
@dataclass
class ForceDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the force disturbance.
"""
force_curriculum: CurriculumParameters = field(default_factory=dict)
use_sinusoidal_patterns: bool = False
min_freq: float = 0.1
max_freq: float = 5.0
min_offset: float = 0.0
max_offset: float = 1.0
enable: bool = False
def __post_init__(self):
self.force_curriculum = CurriculumParameters(**self.force_curriculum)
assert self.min_freq > 0, "The minimum frequency must be positive."
assert self.max_freq > 0, "The maximum frequency must be positive."
assert (
self.max_freq > self.min_freq
), "The maximum frequency must be larger than the minimum frequency."
@dataclass
class TorqueDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the force disturbance.
"""
torque_curriculum: CurriculumParameters = field(default_factory=dict)
use_sinusoidal_patterns: bool = False
min_freq: float = 0.1
max_freq: float = 5.0
min_offset: float = 0.0
max_offset: float = 1.0
enable: bool = False
def __post_init__(self):
self.torque_curriculum = CurriculumParameters(**self.torque_curriculum)
assert self.min_freq > 0, "The minimum frequency must be positive."
assert self.max_freq > 0, "The maximum frequency must be positive."
assert (
self.max_freq > self.min_freq
), "The maximum frequency must be larger than the minimum frequency."
@dataclass
class NoisyObservationsParameters:
"""
This class provides an interface to adjust the hyperparameters of the observation noise.
"""
position_curriculum: CurriculumParameters = field(default_factory=dict)
velocity_curriculum: CurriculumParameters = field(default_factory=dict)
orientation_curriculum: CurriculumParameters = field(default_factory=dict)
enable_position_noise: bool = False
enable_velocity_noise: bool = False
enable_orientation_noise: bool = False
def __post_init__(self):
self.position_curriculum = CurriculumParameters(**self.position_curriculum)
self.velocity_curriculum = CurriculumParameters(**self.velocity_curriculum)
self.orientation_curriculum = CurriculumParameters(
**self.orientation_curriculum
)
@dataclass
class NoisyActionsParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
action_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
def __post_init__(self):
self.action_curriculum = CurriculumParameters(**self.action_curriculum)
@dataclass
class NoisyImagesParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
image_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
modality: bool = "rgb"
def __post_init__(self):
self.image_curriculum = CurriculumParameters(**self.image_curriculum)
@dataclass
class DisturbancesParameters:
"""
Collection of disturbances.
"""
mass_disturbance: MassDistributionDisturbanceParameters = field(
default_factory=dict
)
force_disturbance: ForceDisturbanceParameters = field(default_factory=dict)
torque_disturbance: TorqueDisturbanceParameters = field(default_factory=dict)
observations_disturbance: NoisyObservationsParameters = field(default_factory=dict)
actions_disturbance: NoisyActionsParameters = field(default_factory=dict)
rgb_disturbance: NoisyImagesParameters = field(default_factory=dict)
depth_disturbance: NoisyImagesParameters = field(default_factory=dict)
def __post_init__(self):
self.mass_disturbance = MassDistributionDisturbanceParameters(
**self.mass_disturbance
)
self.force_disturbance = ForceDisturbanceParameters(**self.force_disturbance)
self.torque_disturbance = TorqueDisturbanceParameters(**self.torque_disturbance)
self.observations_disturbance = NoisyObservationsParameters(
**self.observations_disturbance
)
self.actions_disturbance = NoisyActionsParameters(**self.actions_disturbance)
self.rgb_disturbance = NoisyImagesParameters(**self.rgb_disturbance)
self.depth_disturbance = NoisyImagesParameters(**self.depth_disturbance) | 5,477 | Python | 35.039473 | 104 | 0.710973 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_disturbances.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
NoisyImagesParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
import omni
class MassDistributionDisturbances:
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
parameters: MassDistributionDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.mass_sampler = CurriculumSampler(parameters.mass_curriculum)
self.CoM_sampler = CurriculumSampler(parameters.com_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the mass disturbances.
"""
self.platforms_mass = (
torch.ones((self._num_envs, 1), device=self._device, dtype=torch.float32)
* self.mass_sampler.get_min()
)
self.platforms_CoM = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
def randomize_masses(self, env_ids: torch.Tensor, step: int = 0) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
if self.parameters.enable:
num_resets = len(env_ids)
self.platforms_mass[env_ids, 0] = self.mass_sampler.sample(
num_resets, step, device=self._device
)
r = self.CoM_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
self.platforms_CoM[env_ids, 0] = torch.cos(theta) * r
self.platforms_CoM[env_ids, 1] = torch.sin(theta) * r
def get_masses(
self,
env_ids: torch.Tensor,
) -> torch.Tensor:
"""
Returns the masses and CoM of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
Returns:
Tuple(torch.Tensor, torch.Tensor): The masses and CoM of the platforms.
"""
return self.platforms_mass[:, 0]
def get_masses_and_com(
self,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns the masses and CoM of the platforms.
Returns:
Tuple(torch.Tensor, torch.Tensor): The masses and CoM of the platforms.
"""
return torch.cat((self.platforms_mass, self.platforms_CoM), axis=1)
def get_CoM(self, env_ids: torch.Tensor) -> torch.Tensor:
"""
Returns the CoM of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
Returns:
torch.Tensor: The CoM of the platforms.
"""
return self.platforms_CoM[env_ids]
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
mass = self.platforms_mass.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(mass, bins=32)
ax.set_title("Mass disturbance")
ax.set_xlim(
self.mass_sampler.get_min_bound(), self.mass_sampler.get_max_bound()
)
ax.set_xlabel("mass (Kg)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/mass_disturbance"] = wandb.Image(data)
if self.parameters.enable:
com = torch.norm(self.platforms_CoM.cpu(), axis=-1).numpy().flatten()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(com, bins=32)
ax.set_title("CoM disturbance")
ax.set_xlim(
self.CoM_sampler.get_min_bound(), self.CoM_sampler.get_max_bound()
)
ax.set_xlabel("Displacement (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/CoM_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/mass_disturbance_rate"] = self.mass_sampler.get_rate(step)
dict["disturbance/CoM_disturbance_rate"] = self.CoM_sampler.get_rate(step)
return dict
class ForceDisturbance:
"""
Creates disturbances by applying random forces.
"""
def __init__(
self,
parameters: ForceDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.parameters = parameters
self.force_sampler = CurriculumSampler(self.parameters.force_curriculum)
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the force disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_forces = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.forces = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_forces(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the forces using a sinusoidal pattern or not.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_forces[env_ids] = self.force_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.force_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
self.forces[env_ids, 0] = torch.cos(theta) * r
self.forces[env_ids, 1] = torch.sin(theta) * r
def get_force_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the forces given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The floor forces.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[:, 0] = (
torch.sin(root_pos[:, 0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[:, 1] = (
torch.sin(root_pos[:, 1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
return self.forces
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
force = self.force_sampler.sample(self._num_envs, step, device=self._device)
force = force.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(force, bins=32)
ax.set_title("Force disturbance")
ax.set_xlim(0, self.force_sampler.get_max_bound())
ax.set_xlabel("force (N)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/force_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/force_disturbance_rate"] = self.force_sampler.get_rate(
step
)
return dict
class TorqueDisturbance:
"""
Creates disturbances by applying a torque to its center.
"""
def __init__(
self,
parameters: TorqueDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.parameters = parameters
self.torque_sampler = CurriculumSampler(self.parameters.torque_curriculum)
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the torque disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._torque_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_torques = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.torques = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_torques(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the torque disturbance.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current step of the training. Default to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
# use the same min/max frequencies and offsets for the floor
self._torque_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_torques[env_ids] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
else:
self.torques[env_ids, 2] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
def get_torque_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the torques given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque disturbance."""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 2] = (
torch.sin(root_pos * self._torque_freq + self._torque_offset)
* self._max_torques
)
return self.torques
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
torque = self.torque_sampler.sample(
self._num_envs, step, device=self._device
)
torque = torque.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(torque, bins=32)
ax.set_title("Torque disturbance")
ax.set_xlim(
self.torque_sampler.get_min_bound(), self.torque_sampler.get_max_bound()
)
ax.set_xlabel("torque (Nm)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/torque_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/torque_disturbance_rate"] = self.torque_sampler.get_rate(
step
)
return dict
class NoisyObservations:
"""
Adds noise to the observations of the robot.
"""
def __init__(
self,
parameters: NoisyObservationsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.position_sampler = CurriculumSampler(parameters.position_curriculum)
self.velocity_sampler = CurriculumSampler(parameters.velocity_curriculum)
self.orientation_sampler = CurriculumSampler(parameters.orientation_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_pos(self, pos: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the position of the robot.
Args:
pos (torch.Tensor): The position of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The position of the robot with noise.
"""
if self.parameters.enable_position_noise:
self.pos_shape = pos.shape
pos += self.position_sampler.sample(
self._num_envs * pos.shape[1], step, device=self._device
).reshape(-1, self.pos_shape[1])
return pos
def add_noise_on_vel(self, vel: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the velocity of the robot.
Args:
vel (torch.Tensor): The velocity of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The velocity of the robot with noise.
"""
if self.parameters.enable_velocity_noise:
self.vel_shape = vel.shape
vel += self.velocity_sampler.sample(
self._num_envs * vel.shape[1], step, device=self._device
).reshape(-1, self.vel_shape[1])
return vel
def add_noise_on_heading(
self, heading: torch.Tensor, step: int = 0
) -> torch.Tensor:
"""
Adds noise to the heading of the robot.
Args:
heading (torch.Tensor): The heading of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The heading of the robot with noise.
"""
if self.parameters.enable_orientation_noise:
heading += self.orientation_sampler.sample(
self._num_envs, step, device=self._device
)
return heading
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable_position_noise:
position = self.position_sampler.sample(
self._num_envs * self.pos_shape[1], step, device=self._device
)
position = position.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(position, bins=32)
ax.set_title("Position noise")
ax.set_xlim(
self.position_sampler.get_min_bound(),
self.position_sampler.get_max_bound(),
)
ax.set_xlabel("noise (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/position_noise"] = wandb.Image(data)
if self.parameters.enable_velocity_noise:
velocity = self.velocity_sampler.sample(
self._num_envs * self.vel_shape[1], step, device=self._device
)
velocity = velocity.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(velocity, bins=32)
ax.set_title("Velocity noise")
ax.set_xlim(
self.velocity_sampler.get_min_bound(),
self.position_sampler.get_max_bound(),
)
ax.set_xlabel("noise (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/velocity_noise"] = wandb.Image(data)
if self.parameters.enable_orientation_noise:
orientation = self.orientation_sampler.sample(
self._num_envs, step, device=self._device
)
orientation = orientation.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(orientation, bins=32)
ax.set_title("Orientation noise")
ax.set_xlim(
self.orientation_sampler.get_min_bound(),
self.orientation_sampler.get_max_bound(),
)
ax.set_xlabel("noise (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/orientation_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable_position_noise:
dict["disturbance/position_disturbance_rate"] = (
self.position_sampler.get_rate(step)
)
if self.parameters.enable_velocity_noise:
dict["disturbance/velocity_disturbance_rate"] = (
self.velocity_sampler.get_rate(step)
)
if self.parameters.enable_orientation_noise:
dict["disturbance/orientation_disturbance_rate"] = (
self.orientation_sampler.get_rate(step)
)
return dict
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyActionsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_act(self, act: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
act (torch.Tensor): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The actions of the robot with noise.
"""
if self.parameters.enable:
self.shape = act.shape
act += self.action_sampler.sample(
self._num_envs * act.shape[1], step, device=self._device
).reshape(-1, self.shape[1])
return act
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
action = self.action_sampler.sample(
self._num_envs * self.shape[1], step, device=self._device
).reshape(-1, self.shape[1])
action = action.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(action, bins=32)
ax.set_title("Action noise")
ax.set_xlim(
self.action_sampler.get_min_bound(), self.action_sampler.get_max_bound()
)
ax.set_xlabel("noise (N)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/action_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/action_disturbance_rate"] = self.action_sampler.get_rate(
step
)
return dict
class NoisyImages:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyImagesParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.image_sampler = CurriculumSampler(parameters.image_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_image(self, image: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
image (torch.Tensor): The image observation of the robot. Shape is (num_envs, channel, height, width).
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The image observation of the robot with noise.
"""
if self.parameters.enable:
self.shape = image.shape
image += self.image_sampler.sample(
self._num_envs * self.shape[1] * self.shape[2] * self.shape[3], step, device=self._device
).reshape(-1, self.shape[1], self.shape[2], self.shape[3])
return image
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
image = self.image_sampler.sample(
self._num_envs * self.shape[1] * self.shape[2] * self.shape[3], step, device=self._device
).reshape(-1, self.shape[1], self.shape[2], self.shape[3])
image = image.squeeze().cpu().numpy()[0]
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.imshow(image)
ax.set_title("Action noise")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict[f"disturbance/{self.parameters.modality}_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict[f"disturbance/{self.parameters.modality}_disturbance_rate"] = self.image_sampler.get_rate(
step
)
return dict
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.parameters.mass_disturbance,
num_envs,
device,
)
self.force_disturbances = ForceDisturbance(
self.parameters.force_disturbance,
num_envs,
device,
)
self.torque_disturbances = TorqueDisturbance(
self.parameters.torque_disturbance,
num_envs,
device,
)
self.noisy_observations = NoisyObservations(
self.parameters.observations_disturbance,
num_envs,
device,
)
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
self.noisy_rgb_images = NoisyImages(
self.parameters.rgb_disturbance,
num_envs,
device
)
self.noisy_depth_images = NoisyImages(
self.parameters.depth_disturbance,
num_envs,
device
)
def get_logs(self, step: int) -> dict:
"""
Collects logs for all the disturbances.
Args:
step (int): The current training step.
Returns:
dict: The logs for all used disturbances.
"""
dict = {}
dict = {**dict, **self.mass_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.force_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.torque_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.noisy_observations.get_scalar_logs(step)}
dict = {**dict, **self.noisy_actions.get_scalar_logs(step)}
if step % 50 == 0:
dict = {**dict, **self.mass_disturbances.get_image_logs(step)}
dict = {**dict, **self.force_disturbances.get_image_logs(step)}
dict = {**dict, **self.torque_disturbances.get_image_logs(step)}
dict = {**dict, **self.noisy_observations.get_image_logs(step)}
dict = {**dict, **self.noisy_actions.get_image_logs(step)}
return dict
| 32,339 | Python | 32.757829 | 114 | 0.551192 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_pose_seq.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughPoseSequenceReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughPoseSequenceParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow import VisualArrow
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import colorsys
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughPoseSequenceTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a sequence of points in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoThroughXY task, the robot
has to reach a sequence of points in the 2D plane.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoThroughXYSequence task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughPoseSequenceTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughPoseSequenceParameters(**task_param)
self._reward_parameters = GoThroughPoseSequenceReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._all = torch.arange(self._num_envs, device=self._device)
self._trajectory_completed = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, self._task_parameters.num_points, 2),
device=self._device,
dtype=torch.float32,
)
self._target_headings = torch.zeros(
(self._num_envs, self._task_parameters.num_points),
device=self._device,
dtype=torch.float32,
)
self._target_index = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.long
)
self._target_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._delta_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 5
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = (
self._target_positions[self._all, self._target_index]
- current_state["position"]
).squeeze()
# linear velocity error (normed velocity)
self.linear_velocity_err = self._target_velocities - torch.norm(
current_state["linear_velocity"], dim=-1
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings[self._all, self._target_index] - heading),
torch.cos(self._target_headings[self._all, self._target_index] - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = self.linear_velocity_err
# position of the other points in the sequence
for i in range(self._task_parameters.num_points - 1):
overflowing = (
self._target_index + i + 1 >= self._task_parameters.num_points
).int()
indices = self._target_index + (i + 1) * (1 - overflowing)
self._task_data[:, 5 + 4 * i : 5 + 4 * i + 2] = (
self._target_positions[self._all, indices] - current_state["position"]
) * (1 - overflowing).view(-1, 1)
heading_error = torch.arctan2(
torch.sin(
self._target_headings[self._all, indices]
- self._target_headings[self._all, indices - 1]
),
torch.cos(
self._target_headings[self._all, indices]
- self._target_headings[self._all, indices - 1]
),
)
self._task_data[:, 5 + 4 * i + 2] = torch.cos(heading_error) * (
1 - overflowing
)
self._task_data[:, 5 + 4 * i + 3] = torch.cos(heading_error) * (
1 - overflowing
)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.abs(self.linear_velocity_err)
position_progress = (
self._previous_position_dist - self.position_dist
) / torch.abs(self._target_velocities)
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# Heading
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
position_goal_reached = (
self.position_dist < self._task_parameters.position_tolerance
)
heading_goal_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
)
goal_reached = (position_goal_reached * heading_goal_reached).int()
reached_ids = goal_reached.nonzero(as_tuple=False).squeeze(-1)
# if the goal is reached, the target index is updated
self._target_index = self._target_index + goal_reached
self._trajectory_completed = (
self._target_index >= self._task_parameters.num_points
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
self.linear_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
self.linear_velocity_dist,
)
self._previous_position_dist = self.position_dist.clone()
# If goal is reached make next progress null
self._previous_position_dist[reached_ids] = 0
return (
self.progress_reward
+ self.heading_reward
+ self.linear_velocity_reward
- self.boundary_penalty
- self._reward_parameters.time_penalty
+ self._trajectory_completed * self._reward_parameters.terminal_reward
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._trajectory_completed, dtype=torch.long)
ones = torch.ones_like(self._trajectory_completed, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._trajectory_completed > 0, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._trajectory_completed[env_ids] = 0
self._target_index[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
for i in range(self._task_parameters.num_points):
if i == 0:
self._target_positions[env_ids, i] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
else:
r = self._spawn_position_sampler.sample(
num_goals, step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
point = torch.zeros((num_goals, 2), device=self._device)
point[:, 0] = r * torch.cos(theta)
point[:, 1] = r * torch.sin(theta)
self._target_positions[env_ids, i] = (
self._target_positions[env_ids, i - 1] + point
)
self._target_headings[env_ids, i] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
# Randomize heading
self._delta_headings[env_ids] = self._spawn_heading_sampler.sample(
num_goals, step, device=self._device
)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
self._target_velocities[env_ids] = r
# Creates tensors to save position and orientation
p = torch.zeros(
(num_goals, self._task_parameters.num_points, 3), device=self._device
)
q = torch.zeros(
(num_goals, self._task_parameters.num_points, 4),
device=self._device,
dtype=torch.float32,
)
q[:, :, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, :, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
p[:, :, :2] = self._target_positions[env_ids]
p[:, :, 2] = 2
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 0, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
target_position_local = (
self._target_positions[env_ids, 0, :2] - initial_position[:, :2]
)
target_heading = torch.arctan2(
target_position_local[:, 1], target_position_local[:, 0]
)
theta = target_heading + self._delta_headings[env_ids]
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
for i in range(self._task_parameters.num_points):
color = torch.tensor(
colorsys.hsv_to_rgb(i / self._task_parameters.num_points, 1, 1)
)
body_radius = 0.1
body_length = 0.5
head_radius = 0.2
head_length = 0.5
poll_radius = 0.025
poll_length = 2
VisualArrow(
prim_path=path + "/arrow_" + str(i),
translation=position,
name="target_" + str(i),
body_radius=body_radius,
body_length=body_length,
poll_radius=poll_radius,
poll_length=poll_length,
head_radius=head_radius,
head_length=head_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/arrow_[0-5]")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 24,339 | Python | 35.601504 | 106 | 0.573976 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_parameters.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BoundaryPenalty,
)
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYZParameters:
"""
Parameters for the GoToXY task.
"""
name: str = "GoToXYZ"
position_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoToPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToPose"
position_tolerance: float = 0.01
orientation_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.orientation_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYZVelocityParameters:
"""
Parameters for the TrackXYVelocity task.
"""
name: str = "TrackXYZVelocity"
lin_vel_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_velocity: float = 0.75
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_velocity >= 0, "Goal random velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class Track6DoFVelocityParameters:
"""
Parameters for the TrackXYOVelocity task.
"""
name: str = "Track6DoFVelocity"
lin_vel_tolerance: float = 0.01
ang_vel_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_linear_velocity: float = 0.75
goal_random_angular_velocity: float = 1
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
target_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.ang_vel_tolerance > 0
), "Angular velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert (
self.goal_random_linear_velocity >= 0
), "Goal random linear velocity must be positive."
assert (
self.goal_random_angular_velocity >= 0
), "Goal random angular velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.target_angular_velocity_curriculum = CurriculumParameters(
**self.target_angular_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
| 7,201 | Python | 35.744898 | 88 | 0.669768 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_close_proximity_dock.py | __author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
CloseProximityDockParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.dock import Dock, DockView
from omni.isaac.core.articulations import ArticulationView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class CloseProximityDockTask(Core):
"""
Implements the CloseProximityDock task. The robot has to reach a target position and heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
super(CloseProximityDockTask, self).__init__(num_envs, device)
# Observation buffers
self._dim_task_data = 4 # data to be used to fullfil the task (floats) [6:10]
self._num_observations = 10
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, self._dim_task_data),
device=self._device,
dtype=torch.float32,
)
# Task and reward parameters
self._task_parameters = CloseProximityDockParameters(**task_param)
self._reward_parameters = GoToPoseReward(**reward_param)
# Curriculum samplers
self._fp_footprint_diameter_sampler = CurriculumSampler(
self._task_parameters.fp_footprint_diameter_curriculum
)
self._spawn_dock_mass_sampler = CurriculumSampler(
self._task_parameters.spawn_dock_mass_curriculum
)
self._spawn_dock_space_sampler = CurriculumSampler(
self._task_parameters.spawn_dock_space_curriculum
)
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self.spawn_relative_angle_sampler = CurriculumSampler(
self._task_parameters.spawn_relative_angle_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._anchor_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_orientations = torch.zeros(
(self._num_envs, 4), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.relative_angle = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._goal_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 6
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:2] = current_state["orientation"]
self._obs_buffer[:, 2:4] = current_state["linear_velocity"]
self._obs_buffer[:, 4] = current_state["angular_velocity"]
self._obs_buffer[:, 5] = self._task_label
self._obs_buffer[:, 6:10] = self._task_data
return self._obs_buffer
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "position_reward" in stats.keys():
stats["position_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.relative_angle_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.contact_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.relative_angle_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.contact_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# heading distance
self._anchor_positions = self._target_positions.clone()
self._anchor_positions[:, 0] += self._task_parameters.goal_to_penalty_anchor_dist * torch.cos(self._target_headings)
self._anchor_positions[:, 1] += self._task_parameters.goal_to_penalty_anchor_dist * torch.sin(self._target_headings)
self._goal_headings = torch.atan2(
(self._anchor_positions - current_state["position"])[:, 1],
(self._anchor_positions - current_state["position"])[:, 0]
)
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
# relaxed heading error
self._heading_error = torch.abs(
torch.arctan2(
torch.sin(self._goal_headings - heading),
torch.cos(self._goal_headings - heading),
)
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
return self.update_observation_tensor(current_state)
def compute_relative_angle(self, fp_position:torch.Tensor):
"""
Compute relative angle between FP and anchor point of cone-shape penalty.
Args:
fp_position: position of the FP in env coordinate.
Returns:
relative_angle: relative angle between FP and anchor point.
"""
self._anchor_positions = self._target_positions.clone()
self._anchor_positions[:, 0] += self._task_parameters.goal_to_penalty_anchor_dist * torch.cos(self._target_headings)
self._anchor_positions[:, 1] += self._task_parameters.goal_to_penalty_anchor_dist * torch.sin(self._target_headings)
relative_angle = torch.atan2((fp_position - self._anchor_positions)[:, 1], (fp_position - self._anchor_positions)[:, 0]) - self._target_headings
relative_angle = torch.atan2(torch.sin(relative_angle), torch.cos(relative_angle)) # normalize angle within (-pi, pi)
return relative_angle
def compute_reward(
self,
current_state: dict,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
This method differs from GoToPose task since the task is docking.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# compute reward mask
self.relative_angle = self.compute_relative_angle(current_state["position"])
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
# heading error
self.heading_dist = self._heading_error
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# cone shape penalty on fp-dock relative angle
relative_angle_penalty = self._task_parameters.relative_angle_penalty.compute_penalty(
self.relative_angle, step
)
# contact penalty
contact_penalty, self._contact_kills = (
self._task_parameters.contact_penalty.compute_penalty(
current_state["net_contact_forces"], step
)
)
# Checks if the goal is reached
position_goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
).int()
goal_is_reached = position_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.position_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist, self.heading_dist
)
return (
self.position_reward
+ self.heading_reward
- boundary_penalty
- relative_angle_penalty
- contact_penalty
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
die = torch.where(self._contact_kills, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["position_reward"] += self.position_reward
stats["heading_reward"] += self.heading_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
stats = self._task_parameters.relative_angle_penalty.update_statistics(stats)
stats = self._task_parameters.contact_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task."""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations in env coordinate.
"""
num_goals = len(env_ids)
target_positions = torch.zeros(
(num_goals, 3), device=self._device, dtype=torch.float32
)
target_orientations = torch.zeros(
(num_goals, 4), device=self._device, dtype=torch.float32
)
# Randomizes the target position (completely random)
dock_space = self._spawn_dock_space_sampler.sample(num_goals, step, device=self._device) # space between dock face close to wall and wall surface (free space)
self._target_positions[env_ids, 0] = \
2*(self._task_parameters.env_x/2 - self._task_parameters.dock_footprint_diameter - dock_space) * torch.rand((num_goals,), device=self._device) \
- (self._task_parameters.env_x/2 - self._task_parameters.dock_footprint_diameter - dock_space)
self._target_positions[env_ids, 1] = \
2*(self._task_parameters.env_y/2 - self._task_parameters.dock_footprint_diameter - dock_space) * torch.rand((num_goals,), device=self._device) \
- (self._task_parameters.env_y/2 - self._task_parameters.dock_footprint_diameter - dock_space)
# Randomizes the target heading
# First, make dock face the center of environment.
self._target_headings[env_ids] = torch.atan2(self._target_positions[env_ids, 1], self._target_positions[env_ids, 0]) + math.pi # facing center
self._target_orientations[env_ids, 0] = torch.cos(
self._target_headings[env_ids] * 0.5
)
self._target_orientations[env_ids, 3] = torch.sin(
self._target_headings[env_ids] * 0.5
)
# Retrieve the target positions and orientations at batch index = env_ids
target_positions[:, :2] = self._target_positions[env_ids]
target_positions[:, 2] = torch.ones(num_goals, device=self._device) * 0.45
target_orientations[:] = self._target_orientations[env_ids]
# Add offset to the local target position
fp_foot_print_diameter = self._fp_footprint_diameter_sampler.sample(num_goals, step, device=self._device)
self._target_positions[env_ids, 0] += (fp_foot_print_diameter / 2) * torch.cos(self._target_headings[env_ids])
self._target_positions[env_ids, 1] += (fp_foot_print_diameter / 2) * torch.sin(self._target_headings[env_ids])
return target_positions, target_orientations
def set_goals(self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step:int = 0) -> None:
"""
Update goal attribute of task class.
Args:
env_ids: The environment ids for which the goal is set.
target_positions: The target positions for the robots in env coordinate (world position - env_position).
target_orientations: The target orientations for the robots."""
self._target_positions[env_ids] = target_positions[:, :2]
siny_cosp = 2 * target_orientations[env_ids, 0] * target_orientations[env_ids, 3]
cosy_cosp = 1 - 2 * (target_orientations[env_ids, 3] * target_orientations[env_ids, 3])
self._target_headings[env_ids] = torch.arctan2(siny_cosp, cosy_cosp)
# Add offset to the local target position
fp_foot_print_diameter = self._fp_footprint_diameter_sampler.sample(len(env_ids), step, device=self._device)
self._target_positions[env_ids, 0] += (fp_foot_print_diameter / 2) * torch.cos(self._target_headings[env_ids])
self._target_positions[env_ids, 1] += (fp_foot_print_diameter / 2) * torch.sin(self._target_headings[env_ids])
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates spawning positions for the robots following a curriculum.
[Warmup] Randomize only position, but FP always faces center of FP.
[In curriculum] Randomize position and orientation.
[After curriculum] Max position and orientation.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step."""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the initial position and orientation
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
relative_angle = self.spawn_relative_angle_sampler.sample(num_resets, step, device=self._device)
initial_position[:, 0] = self._target_positions[env_ids, 0] + r * torch.cos(self._target_headings[env_ids] + relative_angle)
initial_position[:, 1] = self._target_positions[env_ids, 1] + r * torch.sin(self._target_headings[env_ids] + relative_angle)
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
heading_noise = self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
heading_angle = self._target_headings[env_ids] + relative_angle + math.pi + heading_noise
initial_orientation[:, 0] = torch.cos(heading_angle * 0.5)
initial_orientation[:, 3] = torch.sin(heading_angle * 0.5)
### Randomize linear and angular velocity ###
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def get_dock_masses(self, env_ids: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Generates a random mass for the dock.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The mass of the dock.
"""
mass = self._spawn_dock_mass_sampler.sample(len(env_ids), step, device=self._device)
return mass
def generate_target(self, path, position: torch.Tensor, dock_param: dict = None):
"""
Generate a docking station where the FP will dock to.
Args:
path (str): path to the prim
position (torch.Tensor): position of the docking station
dock_param (dict, optional): dictionary of DockParameters. Defaults to None.
"""
Dock(
prim_path=path+"/dock",
name="dock",
position=position,
dock_params=dock_param,
)
def add_dock_to_scene(
self, scene: Usd.Stage
)->Tuple[Usd.Stage, ArticulationView]:
"""
Adds articulation view and rigiprim view of docking station to the scene.
Args:
scene (Usd.Stage): The scene to add the docking station to."""
dock = DockView(prim_paths_expr="/World/envs/.*/dock")
scene.add(dock)
scene.add(dock.base)
return scene, dock
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
delta_angle = self.spawn_relative_angle_sampler.sample(num_resets, step, device=self._device)
heading_noise = self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
dock_space = self._spawn_dock_space_sampler.sample(num_resets, step, device=self._device)
dock_mass = self._spawn_dock_mass_sampler.sample(num_resets, step, device=self._device)
r = r.cpu().numpy()
delta_angle = delta_angle.cpu().numpy()
heading_noise = heading_noise.cpu().numpy()
dock_space = dock_space.cpu().numpy()
dock_mass = dock_mass.cpu().numpy()
### Plot spawn mass ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(dock_mass, bins=32)
ax.set_title("Dock mass")
ax.set_xlim(
self._spawn_dock_mass_sampler.get_min_bound(),
self._spawn_dock_mass_sampler.get_max_bound(),
)
ax.set_xlabel("mass (kg)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/dock_mass"] = wandb.Image(data)
### Plot spawn position ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
### Plot spawn relative heading ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(delta_angle, bins=32)
ax.set_title("Initial relative heading")
ax.set_xlim(
self.spawn_relative_angle_sampler.get_min_bound(),
self.spawn_relative_angle_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_relative_heading"] = wandb.Image(data)
### Plot spawn heading noise ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading_noise, bins=32)
ax.set_title("Initial heading noise")
ax.set_xlim(
self.spawn_relative_angle_sampler.get_min_bound(),
self.spawn_relative_angle_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading_noise"] = wandb.Image(data)
### Plot dock space ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(dock_space, bins=32)
ax.set_title("Dock space")
ax.set_xlim(
self._spawn_dock_space_sampler.get_min_bound(),
self._spawn_dock_space_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/dock_space"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
return {}
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict | 26,482 | Python | 38.409226 | 166 | 0.594668 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/spacecraft_definition.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from typing import List
import math
@dataclass
class CoreParameters:
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
def __post_init__(self):
assert self.shape in [
"cylinder",
"sphere",
"asset",
], "The shape must be 'cylinder', 'sphere' or 'asset'."
assert self.radius > 0, "The radius must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
assert self.mass > 0, "The mass must be larger than 0."
assert len(self.CoM) == 3, "The length of the CoM coordinates must be 3."
assert self.refinement > 0, "The refinement level must be larger than 0."
self.refinement = int(self.refinement)
@dataclass
class ThrusterParameters:
"""
The definition of a basic thruster.
"""
max_force: float = 1.0
position: tuple = (0, 0, 0)
orientation: tuple = (0, 0, 0)
delay: float = 0.0
response_order: int = 0
tau: float = 1.0
def __post_init__(self):
assert self.tau > 0, "The response time of the system must be larger than 0"
assert self.response_order in [
0,
1,
], "The response order of the system must be 0 or 1."
assert (
self.delay >= 0
), "The delay in system response must larger or equal to 0."
@dataclass
class ReactionWheelParameters:
"""
The definition of a basic reaction wheel.
"""
mass: float = 0.250
inertia: float = 0.3
position: tuple = (0, 0, 0)
orientation: tuple = (0, 0, 0)
max_speed: float = 5000
delay: float = 0.0
response_order: float = 1
tau: float = 1.0
def __post_init__(self):
assert self.tau > 0, "The response time of the system must be larger than 0"
assert self.response_order in [
0,
1,
], "The response order of the system must be 0 or 1."
assert (
self.delay >= 0
), "The delay in system response must larger or equal to 0."
assert (
self.max_speed > 0
), "The maximum speed of the reaction wheel must be larger than 0."
@dataclass
class FloatingPlatformParameters:
"""
Thruster configuration parameters.
"""
use_four_configurations: bool = False
num_anchors: int = 4
offset: float = math.pi / 4
thrust_force: float = 1.0
visualize: bool = False
save_path: str = "thruster_configuration.png"
thruster_model: ThrusterParameters = field(default_factory=dict)
reaction_wheel_model: ReactionWheelParameters = field(default_factory=dict)
def __post_init__(self):
assert self.num_anchors > 1, "num_anchors must be larger or equal to 2."
def generate_anchors_2D(self, radius):
for i in range(self.num_anchors):
math.pi * 2 * i / self.num_anchors
pass
def generate_anchors_3D(self, radius):
pass
@dataclass
class SpaceCraftDefinition:
"""
The definition of the spacecraft / floating platform.
"""
use_floating_platform_generation = True
core: CoreParameters = field(default_factory=dict)
floating_platform: FloatingPlatformParameters = field(default_factory=dict)
thrusters: List[ThrusterParameters] = field(default_factory=list)
reaction_wheels: List[ReactionWheelParameters] = field(default_factory=list)
def __post_init__(self):
self.core = CoreParameters(**self.core)
if self.use_floating_platform_generation == False:
raise NotImplementedError
@dataclass
class PlatformRandomization:
"""
Platform randomization parameters.
"""
random_permutation: bool = False
random_offset: bool = False
randomize_thruster_position: bool = False
min_random_radius: float = 0.125
max_random_radius: float = 0.25
random_theta: float = 0.125
randomize_thrust_force: bool = False
min_thrust_force: float = 0.5
max_thrust_force: float = 1.0
kill_thrusters: bool = False
max_thruster_kill: int = 1
def compute_actions(cfg_param: FloatingPlatformParameters):
"""
Computes the number of actions for the thruster configuration.
"""
if cfg_param.use_four_configurations:
return 10
else:
return cfg_param.num_anchors * 4
| 4,767 | Python | 27.722891 | 84 | 0.624921 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_thruster_generator.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
euler_angles_to_matrix,
)
from typing import List, Tuple
from dataclasses import dataclass, field
import torch
import math
@dataclass
class ConfigurationParameters:
"""
Thruster configuration parameters."""
use_four_configurations: bool = False
num_anchors: int = 4
offset: float = math.pi / 4
thrust_force: float = 1.0
visualize: bool = False
save_path: str = "thruster_configuration.png"
def __post_init__(self):
assert self.num_anchors > 1, "num_anchors must be larger or equal to 2."
@dataclass
class PlatformParameters:
"""
Platform physical parameters."""
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
enable_collision: bool = False
@dataclass
class PlatformRandomization:
"""
Platform randomization parameters."""
random_permutation: bool = False
random_offset: bool = False
randomize_thruster_position: bool = False
min_random_radius: float = 0.125
max_random_radius: float = 0.25
random_theta: float = 0.125
randomize_thrust_force: bool = False
min_thrust_force: float = 0.5
max_thrust_force: float = 1.0
kill_thrusters: bool = False
max_thruster_kill: int = 1
def compute_actions(cfg_param: ConfigurationParameters):
"""
Computes the number of actions for the thruster configuration."""
if cfg_param.use_four_configurations:
return 10
else:
return cfg_param.num_anchors * 4
class VirtualPlatform:
"""
Generates a virtual floating platform with thrusters."""
def __init__(self, num_envs: int, platform_cfg: dict, device: str) -> None:
self._num_envs = num_envs
self._device = device
# Generates dataclasses from the configuration file
self.core_cfg = PlatformParameters(**platform_cfg["core"])
self.rand_cfg = PlatformRandomization(**platform_cfg["randomization"])
self.thruster_cfg = ConfigurationParameters(**platform_cfg["configuration"])
# Computes the number of actions
self._max_thrusters = compute_actions(self.thruster_cfg)
# Sets the empty buffers
self.transforms3D = torch.zeros(
(num_envs, self._max_thrusters, 4, 4),
device=self._device,
dtype=torch.float32,
)
self.current_transforms = torch.zeros(
(num_envs, self._max_thrusters, 10),
device=self._device,
dtype=torch.float32,
)
self.action_masks = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.long
)
self.thrust_force = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.float32
)
# Creates a unit vector to project the forces
self.create_unit_vector()
# Generates a visualization file for the provided thruster configuration
if True: # self.thruster_cfg.visualize:
self.generate_base_platforms(self._num_envs, torch.arange(self._num_envs))
self.visualize(self.thruster_cfg.save_path)
def create_unit_vector(self) -> None:
"""
Creates a unit vector to project the forces.
The forces are in 2D so the unit vector is a 2D vector."""
tmp_x = torch.ones(
(self._num_envs, self._max_thrusters, 1),
device=self._device,
dtype=torch.float32,
)
tmp_y = torch.zeros(
(self._num_envs, self._max_thrusters, 2),
device=self._device,
dtype=torch.float32,
)
self.unit_vector = torch.cat([tmp_x, tmp_y], dim=-1)
def project_forces(self, forces: torch.Tensor) -> list:
"""
Projects the forces on the platform."""
# Applies force scaling, applies action masking
rand_forces = forces * self.thrust_force * (1 - self.action_masks)
# Split transforms into translation and rotation
R = self.transforms3D[:, :, :3, :3].reshape(-1, 3, 3)
T = self.transforms3D[:, :, 3, :3].reshape(-1, 3)
# Create a zero tensor to add 3rd dimmension
zero = torch.zeros((T.shape[0], 1), device=self._device, dtype=torch.float32)
# Generate positions
positions = T
# Project forces
force_vector = self.unit_vector * rand_forces.view(
self._num_envs, self._max_thrusters, 1
)
projected_forces = torch.matmul(R, force_vector.view(-1, 3, 1))
return positions, projected_forces[:, :, 0]
def randomize_thruster_state(self, env_ids: torch.Tensor, num_resets: int) -> None:
"""
Randomizes the spatial configuration of the thruster."""
self.generate_base_platforms(num_resets, env_ids)
def generate_base_platforms(self, num_envs: int, env_ids: torch.Tensor) -> None:
"""
Generates the spatial configuration of the thruster."""
# ====================
# Basic thruster positioning
# ====================
# Generates a fixed offset between the heading and the first generated thruster
random_offset = (
torch.ones((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
/ self.thruster_cfg.num_anchors
)
# Adds a random offset to each simulated platform between the heading and the first generated thruster
if self.rand_cfg.random_offset:
random_offset += (
torch.rand((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
* 2
)
# Generates a 180 degrees offset between two consecutive thruster (+/- 90 degrees).
thrust_90_x = torch.zeros(
(self._num_envs, self._max_thrusters), device=self._device
)
thrust_90_y = (
(
torch.concat(
[
torch.ones(2, device=self._device) / 2.0,
torch.arange(2, device=self._device),
]
)
.repeat(self._max_thrusters // 4)
.expand(self._num_envs, self._max_thrusters)
* 2
- 1
)
* math.pi
/ 2
)
thrust_90_z = (
(
torch.concat(
[
torch.arange(2, device=self._device),
torch.ones(2, device=self._device) / 2.0,
]
)
.repeat(self._max_thrusters // 4)
.expand(self._num_envs, self._max_thrusters)
* 2
- 1
)
* math.pi
/ 2
)
# Generates N, four by four thruster
thrust_offset = (
torch.arange(self.thruster_cfg.num_anchors, device=self._device)
.repeat_interleave(4)
.expand(self._num_envs, self._max_thrusters)
/ self.thruster_cfg.num_anchors
* math.pi
* 2
)
# Generates a mask indicating if the thrusters are usable or not. Used by the transformer to mask the sequence.
mask = torch.ones((self._num_envs, self._max_thrusters), device=self._device)
# ====================
# Random thruster killing
# ====================
# Kill thrusters:
if self.rand_cfg.kill_thrusters:
# Generates 0 and 1 to decide how many thrusters will be killed
weights = torch.ones((self._num_envs, 2), device=self._device)
kills = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=True
)
# Selects L indices to set to N+1
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
kill_ids = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=False
)
# Multiplies kill or not kill with the ids.
# If no kill, then the value is set to max_thrusters + 1, such that it can be filtered out later
final_kill_ids = kills * kill_ids + (1 - kills) * self._max_thrusters
# Creates a mask from the kills:
kill_mask = torch.sum(
torch.nn.functional.one_hot(final_kill_ids, self._max_thrusters + 1),
dim=1,
)
# Removes the duplicates
kill_mask = 1 - kill_mask[:, : self._max_thrusters]
if self.thruster_cfg.use_four_configurations:
mask[self._num_envs // 4 :] = (
mask[self._num_envs // 4 :] * kill_mask[self._num_envs // 4 :]
)
else:
mask = mask * kill_mask
# Generates the transforms and masks
transforms3D = torch.zeros_like(self.transforms3D) # Used to project the forces
action_masks = torch.zeros_like(self.action_masks) # Used to mask actions
current_transforms = torch.zeros_like(
self.current_transforms
) # Used to feed to the transformer
# ====================
# Randomizes the thruster poses and characteristics.
# ====================
# Randomizes the thrust force:
if self.rand_cfg.randomize_thrust_force:
thrust_force = (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_thrust_force - self.rand_cfg.min_thrust_force)
+ self.rand_cfg.min_thrust_force
)
else:
thrust_force = torch.ones(
(self._num_envs, self._max_thrusters), device=self._device
)
# Thruster angular position with regards to the center of mass.
theta2 = random_offset + thrust_offset
# Randomizes thruster poses if requested:
if self.rand_cfg.randomize_thruster_position:
radius = self.core_cfg.radius * (
1
+ torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_random_radius + self.rand_cfg.min_random_radius)
- self.rand_cfg.min_random_radius
)
theta2 += (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.random_theta * 2)
- self.rand_cfg.random_theta
)
else:
radius = self.core_cfg.radius
# Thruster angle:
thrust_90_z = theta2 + thrust_90_z
# ====================
# Computes the 3D transforms of the thruster locations.
# ====================
euler = torch.concatenate(
[
thrust_90_x.view(thrust_90_x.shape + (1,)),
thrust_90_y.view(thrust_90_x.shape + (1,)),
thrust_90_z.view(thrust_90_x.shape + (1,)),
],
axis=-1,
)
# 3D transforms defining the thruster locations.
transforms3D[:, :, :3, :3] = euler_angles_to_matrix(euler, "XYZ")
transforms3D[:, :, 3, 0] = torch.cos(theta2) * radius
transforms3D[:, :, 3, 1] = torch.sin(theta2) * radius
transforms3D[:, :, 3, 2] = 0
transforms3D[:, :, 3, 3] = 1
transforms3D = transforms3D * mask.view(
mask.shape
+ (
1,
1,
)
)
# Actions masks to define which thrusters can be used.
action_masks[:, :] = 1 - mask.long()
# Transforms to feed to the transformer.
current_transforms[:, :, :6] = transforms3D[:, :, :2, :3].reshape(
self._num_envs, self._max_thrusters, 6
)
current_transforms[:, :, 6:9] = transforms3D[:, :, 3, :3]
current_transforms[:, :, 9] = thrust_force
current_transforms = current_transforms * mask.view(mask.shape + (1,))
# Applies random permutations to the thrusters while keeping the non-used thrusters at the end of the sequence.
if self.rand_cfg.random_permutation:
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
selected_thrusters = torch.multinomial(
weights, num_samples=self._max_thrusters, replacement=False
)
mask = torch.gather(1 - mask, 1, selected_thrusters)
_, sorted_idx = mask.sort(1)
selected_thrusters = torch.gather(selected_thrusters, 1, sorted_idx)
transforms3D = torch.gather(
transforms3D,
1,
selected_thrusters.view(
self._num_envs, self._max_thrusters, 1, 1
).expand(self._num_envs, self._max_thrusters, 4, 4),
)
current_transforms = torch.gather(
current_transforms,
1,
selected_thrusters.view(self._num_envs, self._max_thrusters, 1).expand(
self._num_envs, self._max_thrusters, 10
),
)
action_masks = torch.gather(action_masks, 1, selected_thrusters)
thrust_force = torch.gather(thrust_force, 1, selected_thrusters)
# Updates the proper indices
self.thrust_force[env_ids] = thrust_force[env_ids]
self.action_masks[env_ids] = action_masks[env_ids]
self.current_transforms[env_ids] = current_transforms[env_ids]
self.transforms3D[env_ids] = transforms3D[env_ids]
def visualize(self, save_path: str = None):
"""
Visualizes the thruster configuration."""
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import get_test_data
import numpy as np
# Creates a list of color
cmap = cm.get_cmap("hsv")
colors = []
for i in range(self._max_thrusters):
colors.append(cmap(i / self._max_thrusters))
# Split into 1/4th of the envs, so that we can visualize all the configs in use_four_configuration mode.
env_ids = [
0,
1,
2,
3,
self._num_envs // 4,
self._num_envs // 4 + 1,
self._num_envs // 4 + 2,
self._num_envs // 4 + 3,
2 * self._num_envs // 4,
2 * self._num_envs // 4 + 1,
2 * self._num_envs // 4 + 2,
2 * self._num_envs // 4 + 3,
3 * self._num_envs // 4,
3 * self._num_envs // 4 + 1,
3 * self._num_envs // 4 + 2,
3 * self._num_envs // 4 + 3,
]
# Generates a thrust on all the thrusters
forces = torch.ones(
(self._num_envs, self._max_thrusters),
device=self._device,
dtype=torch.float32,
)
# Project
p, f = self.project_forces(forces)
# Reshape and get only the 2D values for plot.
p = p.reshape(self._num_envs, self._max_thrusters, 3)
f = f.reshape(self._num_envs, self._max_thrusters, 3)
p = np.array(p.cpu())
f = np.array(f.cpu())
def repeatForEach(elements, times):
return [e for e in elements for _ in range(times)]
def renderColorsForQuiver3d(colors):
colors = list(filter(lambda x: x != (0.0, 0.0, 0.0), colors))
return colors + repeatForEach(colors, 2)
fig = plt.figure()
fig.set_size_inches(20, 20)
for i in range(4):
for j in range(4):
idx = env_ids[i * 4 + j]
ax = fig.add_subplot(4, 4, i * 4 + (j + 1), projection="3d")
ax.quiver(
p[idx, :, 0],
p[idx, :, 1],
p[idx, :, 2],
f[idx, :, 0],
f[idx, :, 1],
f[idx, :, 2],
color=renderColorsForQuiver3d(colors),
length=0.2,
normalize=True,
)
ax.set_xlim([-0.4, 0.4])
ax.set_ylim([-0.4, 0.4])
ax.set_zlim([-0.4, 0.4])
fig.tight_layout()
fig.savefig(save_path, dpi=300)
plt.close()
| 17,104 | Python | 35.784946 | 119 | 0.534846 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_go_to_pose.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
quat_to_mat,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
GoToPoseParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask as GoToPoseTask2D,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow3D import VisualArrow3D
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToPoseTask(GoToPoseTask2D, Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = GoToPoseParameters(**task_param)
self._reward_parameters = GoToPoseReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs, 3, 3), device=self._device, dtype=torch.float32
)
self._target_quat = torch.zeros(
(self._num_envs, 4), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# heading distance
self._heading_error = torch.bmm(
torch.transpose(current_state["orientation"], -2, -1), self._target_headings
)
# Encode task data
self._task_data[:, :3] = self._position_error
self._task_data[:, 3:] = self._heading_error[:, :2, :].reshape(
self._num_envs, 6
)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int. optional): The current training step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
trace = (
self._heading_error[:, 0, 0]
+ self._heading_error[:, 1, 1]
+ self._heading_error[:, 2, 2]
)
self.heading_dist = torch.arccos((trace - 1) / 2)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
position_goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.orientation_tolerance
).int()
goal_is_reached = position_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.position_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist, self.heading_dist
)
return self.position_reward + self.heading_reward - self.boundary_penalty
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions of the environments.
target_orientations (torch.Tensor): The target orientations of the environments.
step (int, optional): The current training step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 3), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
target_positions[env_ids, :3] += self._target_positions[env_ids]
# Randomize heading
uvw = torch.rand((num_goals, 3), device=self._device)
quat = torch.zeros((num_goals, 4), device=self._device)
quat[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(uvw[:, 2] * 2 * math.pi)
quat[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(uvw[:, 1] * 2 * math.pi)
quat[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(uvw[:, 1] * 2 * math.pi)
quat[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(uvw[:, 2] * 2 * math.pi)
target_orientations[env_ids] = quat
# cast quaternions to rotation matrix
self._target_quat[env_ids] = quat
self._target_headings[env_ids] = quat_to_mat(quat)
return target_positions, target_orientations
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates spawning positions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current training step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial positions, orientations, and velocities.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self._goal_reached[env_ids] = 0
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) * torch.sin(phi) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) * torch.sin(phi) + self._target_positions[env_ids, 1]
)
initial_position[:, 2] = r * torch.cos(phi) + self._target_positions[env_ids, 2]
# Randomizes the orientation of the platform
# We want to sample something that's not too far from the original orientation
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Projects the angular distance on a sphere in the RPY space
u = (
torch.rand(num_resets, device=self._device, dtype=torch.float32)
* math.pi
* 2
)
v = torch.rand(num_resets, device=self._device, dtype=torch.float32) * math.pi
roll = r * torch.cos(u) * torch.sin(v)
pitch = r * torch.sin(u) * torch.sin(v)
yaw = r * torch.cos(v)
# Cast the displacement in the Quaternion space
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
w0 = cr * cp * cy + sr * sp * sy
x0 = sr * cp * cy - cr * sp * sy
y0 = cr * sp * cy + sr * cp * sy
z0 = cr * cp * sy - sr * sp * cy
w1 = self._target_quat[env_ids, 0]
x1 = self._target_quat[env_ids, 1]
y1 = self._target_quat[env_ids, 2]
z1 = self._target_quat[env_ids, 3]
# Quaternion multiplication with the target orientation
initial_orientation[:, 0] = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1
initial_orientation[:, 1] = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1
initial_orientation[:, 2] = w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1
initial_orientation[:, 3] = w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1
initial_orientation /= torch.norm(
initial_orientation + EPS, dim=-1, keepdim=True
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return initial_position, initial_orientation, initial_velocity
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 2D pose to be reached by the agent."""
color = torch.tensor([1, 0, 0])
body_radius = 0.025
body_length = 1.5
head_radius = 0.075
head_length = 0.5
VisualArrow3D(
prim_path=path + "/arrow",
translation=position,
name="target_0",
body_radius=body_radius,
body_length=body_length,
head_radius=head_radius,
head_length=head_length,
color=color,
)
def add_visual_marker_to_scene(self, scene):
"""
Adds the visual marker to the scene."""
arrows = XFormPrimView(prim_paths_expr="/World/envs/.*/arrow")
scene.add(arrows)
return scene, arrows
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocity.cpu().numpy()
angular_velocities = angular_velocity.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
| 16,881 | Python | 36.683036 | 113 | 0.590072 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_to_pose.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoToPoseParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow import VisualArrow
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToPoseTask(Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoToPoseTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoToPoseParameters(**task_param)
self._reward_parameters = GoToPoseReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "position_reward" in stats.keys():
stats["position_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
position_goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
).int()
goal_is_reached = position_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.position_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist, self.heading_dist
)
return self.position_reward + self.heading_reward - self.boundary_penalty
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["position_reward"] += self.position_reward
stats["heading_reward"] += self.heading_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, :2] += self._target_positions[env_ids]
p[:, 2] = 2
# Randomize heading
self._target_headings[env_ids] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
q[:, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = (
self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
+ self._target_headings[env_ids]
)
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
color = torch.tensor([1, 0, 0])
body_radius = 0.1
body_length = 0.5
head_radius = 0.2
head_length = 0.5
poll_radius = 0.025
poll_length = 2
VisualArrow(
prim_path=path + "/arrow",
translation=position,
name="target_0",
body_radius=body_radius,
body_length=body_length,
poll_radius=poll_radius,
poll_length=poll_length,
head_radius=head_radius,
head_length=head_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
arrows = XFormPrimView(prim_paths_expr="/World/envs/.*/arrow")
scene.add(arrows)
return scene, arrows
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
return {}
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 17,739 | Python | 33.181117 | 93 | 0.583517 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_track_6DoF_velocity.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
Track6DoFVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
Track6DoFVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xyo_velocity import (
TrackXYOVelocityTask as TrackXYOVelocityTask2D,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYOVelocityTask(TrackXYOVelocityTask2D, Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
) -> None:
"""
Initializes the GoToPoseTask.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = Track6DoFVelocityParameters(**task_param)
self._reward_parameters = Track6DoFVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._target_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.target_angular_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_linear_velocities = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._target_angular_velocities = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 3
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._linear_velocity_error = (
self._target_linear_velocities - current_state["linear_velocity"]
)
self._angular_velocity_error = (
self._target_angular_velocities - current_state["angular_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :3] = self._linear_velocity_error
self._task_data[:, 3:6] = self._angular_velocity_error
return self.update_observation_tensor(current_state)
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.sqrt(
torch.square(self._linear_velocity_error).sum(-1)
)
self.angular_velocity_dist = torch.sqrt(
torch.square(self._angular_velocity_error).sum(-1)
)
# Checks if the goal is reached
lin_goal_is_reached = (
self.linear_velocity_dist < self._task_parameters.lin_vel_tolerance
).int()
ang_goal_is_reached = (
self.angular_velocity_dist < self._task_parameters.ang_vel_tolerance
).int()
goal_is_reached = lin_goal_is_reached * ang_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
(
self.linear_velocity_reward,
self.angular_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
self.linear_velocity_dist,
self.angular_velocity_dist,
)
return self.linear_velocity_reward + self.angular_velocity_reward
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
step (int, optional): The current step. Defaults to 0.
Returns:
list: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
phi = torch.rand((num_goals,), device=self._device) * math.pi
self._target_linear_velocities[env_ids, 0] = (
r * torch.cos(theta) * torch.sin(phi)
)
self._target_linear_velocities[env_ids, 1] = (
r * torch.sin(theta) * torch.sin(phi)
)
self._target_linear_velocities[env_ids, 2] = r * torch.cos(phi)
# Randomizes the target angular velocity
r = self._target_angular_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
phi = torch.rand((num_goals,), device=self._device) * math.pi
self._target_angular_velocities[env_ids, 0] = (
r * torch.cos(theta) * torch.sin(phi)
)
self._target_angular_velocities[env_ids, 1] = (
r * torch.sin(theta) * torch.sin(phi)
)
self._target_angular_velocities[env_ids, 2] = r * torch.cos(phi)
# This does not matter
return target_positions, target_orientations
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
uvw = torch.rand((num_resets, 3), device=self._device)
initial_orientation[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(
uvw[:, 2] * 2 * math.pi
)
initial_orientation[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(
uvw[:, 2] * 2 * math.pi
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return (
initial_position,
initial_orientation,
initial_velocity,
)
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the target angular velocity
d = self._target_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
d = d.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(r, bins=32)
ax[0].set_title("Target normed linear velocity")
ax[0].set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(d, bins=32)
ax[1].set_title("Target normed angular velocity")
ax[1].set_xlim(
self._target_angular_velocity_sampler.get_min_bound(),
self._target_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
| 14,089 | Python | 34.943877 | 87 | 0.595003 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_factory.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_go_to_xyz import GoToXYZTask
from omniisaacgymenvs.tasks.MFP.MFP3D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_track_xyz_velocity import (
TrackXYZVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_track_6DoF_velocity import (
TrackXYOVelocityTask,
)
class TaskFactory:
"""
Factory class to create tasks."""
def __init__(self):
self.creators = {}
def register(self, name: str, task):
"""
Registers a new task."""
self.creators[name] = task
def get(
self, task_dict: dict, reward_dict: dict, num_envs: int, device: str
) -> object:
"""
Returns a task."""
assert (
task_dict["name"] == reward_dict["name"]
), "The mode of both the task and the reward must match."
mode = task_dict["name"]
assert task_dict["name"] in self.creators.keys(), "Unknown task mode."
return self.creators[mode](task_dict, reward_dict, num_envs, device)
task_factory = TaskFactory()
task_factory.register("GoToXYZ", GoToXYZTask)
task_factory.register("GoToPose", GoToPoseTask)
task_factory.register("TrackXYZVelocity", TrackXYZVelocityTask)
task_factory.register("Track6DoFVelocity", TrackXYOVelocityTask)
# task_factory.register("TrackXYVelocityHeading", TrackXYVelocityHeadingTask)
| 1,673 | Python | 29.999999 | 82 | 0.673042 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_go_to_xyz.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
GoToXYZReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
GoToXYZParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_xy import (
GoToXYTask as GoToXYTask2D,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin3D import VisualPin3D
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToXYZTask(GoToXYTask2D, Core):
"""
Implements the GoToXY task. The robot has to reach a target position."""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToXYZ task.
Args:
task_param (dict): Dictionary containing the task parameters.
reward_param (dict): Dictionary containing the reward parameters.
num_envs (int): Number of environments.
device (str): Device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = GoToXYZParameters(**task_param)
self._reward_parameters = GoToXYZReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._position_error = self._target_positions - current_state["position"]
self._task_data[:, :3] = self._position_error
return self.update_observation_tensor(current_state)
def get_goals(
self,
env_ids: torch.Tensor,
targets_position: torch.Tensor,
targets_orientation: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
self._target_positions[env_ids] = (
torch.rand((num_goals, 3), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
targets_position[env_ids, :3] += self._target_positions[env_ids]
return targets_position, targets_orientation
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates spawning positions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self._goal_reached[env_ids] = 0
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_position[:, 0] = r * torch.cos(theta) * torch.sin(phi)
initial_position[:, 1] = r * torch.sin(theta) * torch.sin(phi)
initial_position[:, 1] = r * torch.cos(phi)
# Randomizes the orientation of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
uvw = torch.rand((num_resets, 3), device=self._device)
initial_orientation[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(
uvw[:, 2] * 2 * math.pi
)
initial_orientation[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(
uvw[:, 2] * 2 * math.pi
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return initial_position, initial_orientation, initial_velocity
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
A pin is generated to represent the 3D position to be reached by the agent."""
color = torch.tensor([1, 0, 0])
ball_radius = 0.05
poll_radius = 0.025
poll_length = 2
VisualPin3D(
prim_path=path + "/pin",
translation=position,
name="target_0",
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Spawn radius")
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/spawn_position"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities[:, 0], bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities[:, 1], bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
| 10,907 | Python | 35.481605 | 93 | 0.602182 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_task_rewards.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import torch
from dataclasses import dataclass
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoThroughXYReward:
"""
Reward function and parameters for the GoThroughXY task."""
name: str = "GoThroughXY"
heading_reward_mode: str = "linear"
velocity_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
velocity_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
velocity_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.velocity_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
if self.velocity_reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "exponential":
velocity_reward = (
torch.exp(-velocity_error / self.velocity_exponential_reward_coeff)
* self.velocity_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward, velocity_reward
@dataclass
class GoThroughXYSequenceReward:
"""
Reward function and parameters for the GoThroughXYSequence task."""
name: str = "GoThroughXYSequence"
heading_reward_mode: str = "linear"
velocity_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
velocity_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
velocity_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.velocity_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
if self.velocity_reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "exponential":
velocity_reward = (
torch.exp(-velocity_error / self.velocity_exponential_reward_coeff)
* self.velocity_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward, velocity_reward
@dataclass
class GoThroughPoseReward:
"""
Reward function and parameters for the GoThroughXY task."""
name: str = "GoThroughPose"
heading_reward_mode: str = "linear"
velocity_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
velocity_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
velocity_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.velocity_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
if self.velocity_reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "exponential":
velocity_reward = (
torch.exp(-velocity_error / self.velocity_exponential_reward_coeff)
* self.velocity_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward, velocity_reward
@dataclass
class GoThroughPoseSequenceReward:
"""
Reward function and parameters for the GoThroughXYSequence task."""
name: str = "GoThroughPoseSequence"
heading_reward_mode: str = "linear"
velocity_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
velocity_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
velocity_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.velocity_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
if self.velocity_reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + heading_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "exponential":
velocity_reward = (
torch.exp(-velocity_error / self.velocity_exponential_reward_coeff)
* self.velocity_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward, velocity_reward
@dataclass
class GoThroughGateReward:
"""
Reward function and parameters for the GoThroughXY task."""
name: str = "GoThroughGate"
heading_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
reverse_penalty: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward
@dataclass
class GoThroughGateSequenceReward:
"""
Reward function and parameters for the GoThroughXY task."""
name: str = "GoThroughGate"
heading_reward_mode: str = "linear"
heading_exponential_reward_coeff: float = 0.25
time_penalty: float = 0.0
terminal_reward: float = 0.0
reverse_penalty: float = 0.0
dt: float = 0.02
action_repeat: int = 10
position_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
self.dt = self.dt * self.action_repeat
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_progress: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
position_reward = self.position_scale * position_progress / self.dt
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward
@dataclass
class GoToXYReward:
""" "
Reward function and parameters for the GoToXY task."""
name: str = "GoToXY"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
position_error: torch.Tensor,
) -> torch.Tensor:
"""
Defines the function used to compute the reward for the GoToXY task."""
if self.reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error)
elif self.reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error * position_error)
elif self.reward_mode.lower() == "exponential":
position_reward = torch.exp(-position_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return position_reward
@dataclass
class GoToPoseReward:
"""
Reward function and parameters for the GoToPose task."""
name: str = "GoToPose"
position_reward_mode: str = "linear"
heading_reward_mode: str = "linear"
position_exponential_reward_coeff: float = 0.25
heading_exponential_reward_coeff: float = 0.25
position_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.position_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_error: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
if self.position_reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "exponential":
position_reward = (
torch.exp(-position_error / self.position_exponential_reward_coeff)
* self.position_scale
)
else:
raise ValueError("Unknown reward type.")
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward
@dataclass
class TrackXYVelocityReward:
"""
Reward function and parameters for the TrackXYVelocity task."""
name: str = "TrackXYVelocity"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYVelocity task."""
if self.reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + velocity_error)
elif self.reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + velocity_error * velocity_error)
elif self.reward_mode.lower() == "exponential":
velocity_reward = torch.exp(-velocity_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return velocity_reward
@dataclass
class TrackXYOVelocityReward:
"""
Reward function and parameters for the TrackXYOVelocity task.
"""
name: str = "TrackXYOVelocity"
linear_reward_mode: str = "linear"
angular_reward_mode: str = "linear"
linear_exponential_reward_coeff: float = 0.25
angular_exponential_reward_coeff: float = 0.25
linear_scale: float = 1.0
angular_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.linear_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.angular_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
linear_velocity_error: torch.Tensor,
angular_velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYOVelocity task.
"""
if self.linear_reward_mode.lower() == "linear":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "square":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "exponential":
linear_reward = (
torch.exp(-linear_velocity_error / self.linear_exponential_reward_coeff)
* self.linear_scale
)
else:
raise ValueError("Unknown reward type.")
if self.angular_reward_mode.lower() == "linear":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "square":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "exponential":
angular_reward = (
torch.exp(
-angular_velocity_error / self.angular_exponential_reward_coeff
)
* self.angular_scale
)
else:
raise ValueError("Unknown reward type.")
return linear_reward, angular_reward
@dataclass
class TrackXYVelocityHeadingReward:
"""
Reward function and parameters for the TrackXYVelHeading task."""
name: str = "TrackXYVelocityHeading"
velocity_reward_mode: str = "linear"
heading_reward_mode: str = "linear"
velocity_exponential_reward_coeff: float = 0.25
heading_exponential_reward_coeff: float = 0.25
velocity_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.velocity_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
velocity_error: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
if self.velocity_reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + velocity_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + velocity_error) * self.velocity_scale
elif self.velocity_reward_mode.lower() == "exponential":
velocity_reward = (
torch.exp(-velocity_error / self.velocity_exponential_reward_coeff)
* self.velocity_scale
)
else:
raise ValueError("Unknown reward type.")
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return velocity_reward, heading_reward
| 24,663 | Python | 34.284692 | 88 | 0.591534 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_xy.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughXYReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughXYParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin import VisualPin
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughXYTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a point in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoToXY task,
the robot has to go through the target point and keep moving.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughXYTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughXYParameters(**task_param)
self._reward_parameters = GoThroughXYReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._target_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._delta_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# linear velocity error (normed velocity)
self.linear_velocity_err = self._target_velocities - torch.norm(
current_state["linear_velocity"], dim=-1
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
# Compute target heading as the angle required to be looking at the target
self._target_headings = torch.arctan2(
self._position_error[:, 1], self._position_error[:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = self.linear_velocity_err
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.abs(self.linear_velocity_err)
position_progress = (
self._previous_position_dist - self.position_dist
) / torch.abs(self._target_velocities)
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# boundary penalty
self.heading_dist = torch.abs(self._heading_error)
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
self._goal_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
self.linear_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
self.linear_velocity_dist,
)
self._previous_position_dist = self.position_dist.clone()
return (
self.progress_reward
+ self.heading_reward
+ self.linear_velocity_reward
- self.boundary_penalty
- self._reward_parameters.time_penalty
+ self._reward_parameters.terminal_reward * self._goal_reached
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._goal_reached > 0, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, :2] += self._target_positions[env_ids]
p[:, 2] = 2
# Randomize heading
self._delta_headings[env_ids] = self._spawn_heading_sampler.sample(
num_goals, step, device=self._device
)
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
self._target_velocities[env_ids] = r
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
target_position_local = (
self._target_positions[env_ids, :2] - initial_position[:, :2]
)
target_heading = torch.arctan2(
target_position_local[:, 1], target_position_local[:, 0]
)
theta = target_heading + self._delta_headings[env_ids]
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
color = torch.tensor([1, 0, 0])
ball_radius = 0.2
poll_radius = 0.025
poll_length = 2
VisualPin(
prim_path=path + "/pin",
translation=position,
name="target_0",
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/pin")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 20,544 | Python | 34.179794 | 93 | 0.587422 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_core.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import torch
from dataclasses import dataclass
from omniisaacgymenvs.tasks.MFP.MFP2D_core import Core as Core2D
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
def quat_to_mat(quat: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of quaternions to a batch of rotation matrices.
Args:
quat (torch.Tensor): Batch of quaternions.
Returns:
torch.Tensor: The batch of rotation matrices.
"""
w, x, y, z = torch.unbind(quat, -1)
two_s = 2.0 / ((quat * quat).sum(-1) + EPS)
R = torch.stack(
(
1 - two_s * (y * y + z * z),
two_s * (x * y - z * w),
two_s * (x * z + y * w),
two_s * (x * y + z * w),
1 - two_s * (x * x + z * z),
two_s * (y * z - x * w),
two_s * (x * z - y * w),
two_s * (y * z + x * w),
1 - two_s * (x * x + y * y),
),
-1,
)
return R.reshape(quat.shape[:-1] + (3, 3))
def mat_to_quat(mat: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of rotation matrices to a batch of quaternions.
Args:
mat (torch.Tensor): Batch of rotation matrices.
Returns:
torch.Tensor: The batch of quaternions.
q = [w,x,y,z]
"""
quat = torch.zeros((mat.shape[0], 4), dtype=mat.dtype, device=mat.device)
t = mat[..., 0, 0] + mat[..., 1, 1] + mat[..., 2, 2]
r = torch.sqrt(1 + t) + EPS
s = 0.5 / r
quat[:, 0] = 0.5 * r
quat[:, 1] = mat[..., 2, 1] - mat[..., 1, 2] * s
quat[:, 2] = mat[..., 0, 2] - mat[..., 2, 0] * s
quat[:, 3] = mat[..., 1, 0] - mat[..., 0, 1] * s
return quat
def axis_angle_rotation(angle: torch.Tensor, axis: str) -> torch.Tensor:
"""
Returns the rotation matrix for a given angle and axis.
Args:
angle (torch.Tensor): The angle of rotation.
axis (str): The axis of rotation.
Returns:
torch.Tensor: The rotation matrix.
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
elif axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
elif axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
else:
raise ValueError("letter must be either X, Y or Z.")
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_quat(euler_angles: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of euler angles to a batch of quaternions.
Args:
euler_angles (torch.Tensor): Batch of euler angles.
convention (str): The convention to use for the conversion.
Returns:
torch.Tensor: The batch of quaternions.
"""
roll, pitch, yaw = torch.unbind(euler_angles, -1)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
quat = torch.zeros(
(euler_angles.shape[0], 4), dtype=euler_angles.dtype, device=euler_angles.device
)
quat[:, 0] = cr * cp * cy + sr * sp * sy
quat[:, 1] = sr * cp * cy - cr * sp * sy
quat[:, 2] = cr * sp * cy + sr * cp * sy
quat[:, 3] = cr * cp * sy - sr * sp * cy
return quat
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
"""
Converts a batch of euler angles to a batch of rotation matrices.
Args:
euler_angles (torch.Tensor): Batch of euler angles.
convention (str): The convention to use for the conversion.
Returns:
torch.Tensor: The batch of rotation matrices.
"""
matrices = [
axis_angle_rotation(e, c)
for c, e in zip(convention, torch.unbind(euler_angles, -1))
]
return torch.matmul(torch.matmul(matrices[2], matrices[1]), matrices[0])
class Core(Core2D):
"""
The base class that implements the core of the task.
"""
def __init__(self, num_envs: int, device: str) -> None:
"""
Initializes the core of the task.
Args:
num_envs (int): Number of environments.
device (str): Device to run the code on.
"""
self._num_envs = num_envs
self._device = device
# Dimensions of the observation tensors
self._dim_orientation: (
6 # theta heading in the world frame (cos(theta), sin(theta)) [0:6]
)
self._dim_velocity: 3 # velocity in the world (x_dot, y_dot) [6:9]
self._dim_omega: 3 # rotation velocity (theta_dot) [9:12]
self._dim_task_label: 1 # label of the task to be executed (int) [12]
self._dim_task_data: 9 # data to be used to fullfil the task (floats) [13:22]
# Observation buffers
self._num_observations = 22
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, 9), device=self._device, dtype=torch.float32
)
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:6] = current_state["orientation"][:, :2, :].reshape(
self._num_envs, 6
)
self._obs_buffer[:, 6:9] = current_state["linear_velocity"]
self._obs_buffer[:, 9:12] = current_state["angular_velocity"]
self._obs_buffer[:, 12] = self._task_label
self._obs_buffer[:, 13:] = self._task_data
return self._obs_buffer
class TaskDict:
"""
A class to store the task dictionary. It is used to pass the task data to the task class.
"""
def __init__(self) -> None:
self.gotoxy = 0
self.gotopose = 1
self.trackxyvel = 2
self.trackxyovel = 3
self.trackxyvelheading = 4
| 6,690 | Python | 29.004484 | 93 | 0.554858 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/__init__.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
| 293 | Python | 28.399997 | 82 | 0.648464 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_gate_seq.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughGateSequenceReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughGateSequenceParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.gate import FixedGate
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import colorsys
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughGateSequenceTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a sequence of points in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoThroughXY task, the robot
has to reach a sequence of points in the 2D plane.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoThroughXYSequence task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughGateSequenceTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughGateSequenceParameters(**task_param)
self._reward_parameters = GoThroughGateSequenceReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
self._spawn_gate_delta_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_gate_heading_curriculum
)
self._spawn_gate_delta_position_sampler = CurriculumSampler(
self._task_parameters.spawn_gate_position_curriculum
)
# Buffers
self._all = torch.arange(self._num_envs, device=self._device)
self._trajectory_completed = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._is_in_reverse = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, self._task_parameters.num_points, 2),
device=self._device,
dtype=torch.float32,
)
self._target_headings = torch.zeros(
(self._num_envs, self._task_parameters.num_points),
device=self._device,
dtype=torch.float32,
)
self._target_index = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.long
)
self._R = torch.zeros(
(self._num_envs, self._task_parameters.num_points, 2, 2),
device=self._device,
dtype=torch.float32,
)
self._previous_is_after_gate = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._previous_is_before_gate = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 5
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.contact_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.contact_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = (
current_state["position"]
- self._target_positions[self._all, self._target_index]
).squeeze()
# Compute target heading as the angle required to be looking at the target
look_at_target_headings = torch.arctan2(
-self._position_error[:, 1], -self._position_error[:, 0]
)
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(look_at_target_headings - heading),
torch.cos(look_at_target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = torch.cos(
self._target_headings[self._all, self._target_index]
)
self._task_data[:, 5] = torch.sin(
self._target_headings[self._all, self._target_index]
)
# position of the other points in the sequence
for i in range(self._task_parameters.num_points - 1):
overflowing = (
self._target_index + i + 1 >= self._task_parameters.num_points
).int()
indices = self._target_index + (i + 1) * (1 - overflowing)
self._task_data[:, 6 + 4 * i : 6 + 4 * i + 2] = (
self._target_positions[self._all, indices] - current_state["position"]
) * (1 - overflowing).view(-1, 1)
heading_error = torch.arctan2(
torch.sin(
self._target_headings[self._all, indices]
- self._target_headings[self._all, indices - 1]
),
torch.cos(
self._target_headings[self._all, indices]
- self._target_headings[self._all, indices - 1]
),
)
self._task_data[:, 6 + 4 * i + 2] = torch.cos(heading_error) * (
1 - overflowing
)
self._task_data[:, 6 + 4 * i + 3] = torch.cos(heading_error) * (
1 - overflowing
)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
position_progress = self._previous_position_dist - self.position_dist
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# Heading
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# contact penalty
contact_penalty, self._contact_kills = (
self._task_parameters.contact_penalty.compute_penalty(
current_state["net_contact_forces"], step
)
)
# Project the position error into the gate frame
pos_proj = torch.matmul(
self._R[self._all, self._target_index], self._position_error.unsqueeze(-1)
).squeeze(-1)
is_after_gate = torch.logical_and(
torch.logical_and(
pos_proj[:, 0] >= 0,
pos_proj[:, 0] < self._task_parameters.gate_width / 4,
),
torch.abs(pos_proj[:, 1]) < self._task_parameters.gate_width / 2,
)
is_before_gate = torch.logical_and(
torch.logical_and(
pos_proj[:, 0] < 0,
pos_proj[:, 0] > -self._task_parameters.gate_width / 4,
),
torch.abs(pos_proj[:, 1]) < self._task_parameters.gate_width / 2,
)
# Checks if the goal is reached
goal_reached = torch.logical_and(
is_after_gate, self._previous_is_before_gate
).int()
reached_ids = goal_reached.nonzero(as_tuple=False).squeeze(-1)
# if the goal is reached, the target index is updated
self._target_index = self._target_index + goal_reached
self._trajectory_completed = (
self._target_index >= self._task_parameters.num_points
).int()
# Check if the robot goes through the gate in the wrong direction
self._is_in_reverse = torch.logical_and(
self._previous_is_after_gate, is_before_gate
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
)
self._previous_position_dist = self.position_dist.clone()
self._previous_is_after_gate = is_after_gate.clone()
self._previous_is_before_gate = is_before_gate.clone()
# If goal is reached make next progress null
self._previous_position_dist[reached_ids] = 0
self._previous_is_after_gate[reached_ids] = 0
self._previous_is_before_gate[reached_ids] = 0
return (
self.progress_reward
+ self.heading_reward
- self.boundary_penalty
- contact_penalty
- self._reward_parameters.time_penalty
+ goal_reached * self._reward_parameters.terminal_reward
+ self._trajectory_completed * self._reward_parameters.terminal_reward
- self._reward_parameters.reverse_penalty * self._is_in_reverse
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._trajectory_completed, dtype=torch.long)
ones = torch.ones_like(self._trajectory_completed, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._trajectory_completed > 0, ones, die)
die = torch.where(self._is_in_reverse > 0, ones, die)
die = torch.where(self._contact_kills, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
stats = self._task_parameters.contact_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._trajectory_completed[env_ids] = 0
self._target_index[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
for i in range(self._task_parameters.num_points):
# Initial position is random
if i == 0:
self._target_positions[env_ids, i] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
self._target_headings[env_ids, i] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
# The other positions are generated from the previous one
else:
# Randomizes the position of the gate
# Distance from previous gate
r = self._spawn_position_sampler.sample(
num_goals, step, device=self._device
)
# Deviation from previous gate's heading
delta_theta_position = self._spawn_gate_delta_position_sampler.sample(
num_goals, step, device=self._device
)
theta = delta_theta_position + self._target_headings[env_ids, i - 1]
point = torch.zeros((num_goals, 2), device=self._device)
point[:, 0] = r * torch.cos(theta)
point[:, 1] = r * torch.sin(theta)
self._target_positions[env_ids, i] = (
self._target_positions[env_ids, i - 1] + point
)
# Randomizes the heading of the new gate with respect to the previous one
delta_theta_heading = self._spawn_gate_delta_heading_sampler.sample(
num_goals, step, device=self._device
)
self._target_headings[env_ids, i] = (
self._target_headings[env_ids, i - 1] + delta_theta_heading
)
# Compute the rotation matrix of the gate
self._R[env_ids, i, 0, 0] = torch.cos(self._target_headings[env_ids, i])
self._R[env_ids, i, 0, 1] = torch.sin(self._target_headings[env_ids, i])
self._R[env_ids, i, 1, 0] = -torch.sin(self._target_headings[env_ids, i])
self._R[env_ids, i, 1, 1] = torch.cos(self._target_headings[env_ids, i])
# Creates tensors to save position and orientation
p = torch.zeros(
(num_goals, self._task_parameters.num_points, 3), device=self._device
)
q = torch.zeros(
(num_goals, self._task_parameters.num_points, 4),
device=self._device,
dtype=torch.float32,
)
q[:, :, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, :, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
p[:, :, :2] = self._target_positions[env_ids]
p[:, :, 2] = 0.5
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = (
self._spawn_gate_delta_position_sampler.sample(
num_resets, step, device=self._device
)
+ self._target_headings[env_ids, 0]
+ math.pi
)
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 0, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
target_position_local = (
self._target_positions[env_ids, 0, :2] - initial_position[:, :2]
)
target_heading = torch.arctan2(
target_position_local[:, 1], target_position_local[:, 0]
)
theta = target_heading + self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
for i in range(self._task_parameters.num_points):
FixedGate(
prim_path=path + "/gate_" + str(i),
translation=position,
name="target_" + str(i),
gate_width=self._task_parameters.gate_width,
gate_thickness=self._task_parameters.gate_thickness,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/gate_[0-5]")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
dict = self._task_parameters.contact_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 25,944 | Python | 36.765648 | 106 | 0.570729 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_gate.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughGateReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughGateParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.gate import FixedGate
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughGateTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a point in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoToXY task,
the robot has to go through the target point and keep moving.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughGateTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughGateParameters(**task_param)
self._reward_parameters = GoThroughGateReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._is_in_reverse = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._R = torch.zeros(
(self._num_envs, 2, 2), device=self._device, dtype=torch.float32
)
self._previous_is_after_gate = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._previous_is_before_gate = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.contact_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.contact_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = current_state["position"] - self._target_positions
# Compute target heading as the angle required to be looking at the target
look_at_target_headings = torch.arctan2(
-self._position_error[:, 1], -self._position_error[:, 0]
)
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(look_at_target_headings - heading),
torch.cos(look_at_target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = torch.cos(self._target_headings)
self._task_data[:, 5] = torch.sin(self._target_headings)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
position_progress = self._previous_position_dist - self.position_dist
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# Compute heading error
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# contact penalty
contact_penalty, self._contact_kills = (
self._task_parameters.contact_penalty.compute_penalty(
current_state["net_contact_forces"], step
)
)
# Project the position error into the gate frame
pos_proj = torch.matmul(self._R, self._position_error.unsqueeze(-1)).squeeze(-1)
is_after_gate = torch.logical_and(
torch.logical_and(
pos_proj[:, 0] >= 0,
pos_proj[:, 0] < self._task_parameters.gate_width / 4,
),
torch.abs(pos_proj[:, 1]) < self._task_parameters.gate_width / 2,
)
is_before_gate = torch.logical_and(
torch.logical_and(
pos_proj[:, 0] < 0,
pos_proj[:, 0] > -self._task_parameters.gate_width / 4,
),
torch.abs(pos_proj[:, 1]) < self._task_parameters.gate_width / 2,
)
# Checks if the goal is reached
self._goal_reached = torch.logical_and(
is_after_gate, self._previous_is_before_gate
).int()
# Check if the robot goes through the gate in the wrong direction
self._is_in_reverse = torch.logical_and(
self._previous_is_after_gate, is_before_gate
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
)
self._previous_position_dist = self.position_dist.clone()
self._previous_is_after_gate = is_after_gate.clone()
self._previous_is_before_gate = is_before_gate.clone()
return (
self.progress_reward
+ self.heading_reward
- boundary_penalty
- contact_penalty
- self._reward_parameters.time_penalty
+ self._reward_parameters.terminal_reward * self._goal_reached
- self._reward_parameters.reverse_penalty * self._is_in_reverse
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._is_in_reverse > 0, ones, die)
die = torch.where(self._goal_reached > 0, ones, die)
die = torch.where(self._contact_kills, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
stats = self._task_parameters.contact_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
self._is_in_reverse[env_ids] = 0
self._previous_position_dist[env_ids] = 0
self._previous_is_after_gate[env_ids] = 0
self._previous_is_before_gate[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, :2] += self._target_positions[env_ids]
p[:, 2] = 0.5
# Randomize heading
self._target_headings[env_ids] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
# Compute the rotation matrix
self._R[env_ids, 0, 0] = torch.cos(self._target_headings[env_ids])
self._R[env_ids, 0, 1] = torch.sin(self._target_headings[env_ids])
self._R[env_ids, 1, 0] = -torch.sin(self._target_headings[env_ids])
self._R[env_ids, 1, 1] = torch.cos(self._target_headings[env_ids])
# Compute the quaternion
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
q[:, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = (
self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
+ self._target_headings[env_ids]
)
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
color = torch.tensor([1, 0, 0])
FixedGate(
prim_path=path + "/gate",
translation=position,
name="target_0",
gate_width=self._task_parameters.gate_width,
gate_thickness=self._task_parameters.gate_thickness,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/gate")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
dict = self._task_parameters.contact_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 21,160 | Python | 35.17265 | 93 | 0.585161 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_pose.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughPoseParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow import VisualArrow
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughPoseTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a point in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoToXY task,
the robot has to go through the target point and keep moving.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughPoseTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughPoseParameters(**task_param)
self._reward_parameters = GoThroughPoseReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._target_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._delta_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# linear velocity error (normed velocity)
self.linear_velocity_err = self._target_velocities - torch.norm(
current_state["linear_velocity"], dim=-1
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = self.linear_velocity_err
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.abs(self.linear_velocity_err)
position_progress = (
self._previous_position_dist - self.position_dist
) / torch.abs(self._target_velocities)
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# boundary penalty
self.heading_dist = torch.abs(self._heading_error)
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
position_goal_reached = (
self.position_dist < self._task_parameters.position_tolerance
)
heading_goal_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
)
self._goal_reached = (position_goal_reached * heading_goal_reached).int()
# rewards
(
self.progress_reward,
self.heading_reward,
self.linear_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
self.linear_velocity_dist,
)
self._previous_position_dist = self.position_dist.clone()
return (
self.progress_reward
+ self.heading_reward
+ self.linear_velocity_reward
- self.boundary_penalty
- self._reward_parameters.time_penalty
+ self._reward_parameters.terminal_reward * self._goal_reached
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._goal_reached > 0, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, :2] += self._target_positions[env_ids]
p[:, 2] = 2
# Randomize heading
self._target_headings[env_ids] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
q[:, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
self._target_velocities[env_ids] = r
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = (
self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
+ self._target_headings[env_ids]
)
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
color = torch.tensor([1, 0, 0])
body_radius = 0.1
body_length = 0.5
head_radius = 0.2
head_length = 0.5
poll_radius = 0.025
poll_length = 2
VisualArrow(
prim_path=path + "/arrow",
translation=position,
name="target_0",
body_radius=body_radius,
body_length=body_length,
poll_radius=poll_radius,
poll_length=poll_length,
head_radius=head_radius,
head_length=head_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/arrow")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 20,720 | Python | 34.179966 | 93 | 0.586824 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_track_xyo_velocity.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
TrackXYOVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
TrackXYOVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYOVelocityTask(Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
) -> None:
"""
Initializes the GoToPoseTask.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(TrackXYOVelocityTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = TrackXYOVelocityParameters(**task_param)
self._reward_parameters = TrackXYOVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._target_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.target_angular_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_linear_velocities = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_angular_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 3
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "angular_velocity_reward" in stats.keys():
stats["angular_velocity_reward"] = torch_zeros()
if not "angular_velocity_error" in stats.keys():
stats["angular_velocity_error"] = torch_zeros()
self.log_with_wandb = []
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._linear_velocity_error = (
self._target_linear_velocities - current_state["linear_velocity"]
)
self._angular_velocity_error = (
self._target_angular_velocities - current_state["angular_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :2] = self._linear_velocity_error
self._task_data[:, 2] = self._angular_velocity_error
return self.update_observation_tensor(current_state)
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.sqrt(
torch.square(self._linear_velocity_error).sum(-1)
)
self.angular_velocity_dist = torch.abs(self._angular_velocity_error)
# Checks if the goal is reached
lin_goal_is_reached = (
self.linear_velocity_dist < self._task_parameters.lin_vel_tolerance
).int()
ang_goal_is_reached = (
self.angular_velocity_dist < self._task_parameters.ang_vel_tolerance
).int()
goal_is_reached = lin_goal_is_reached * ang_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
(
self.linear_velocity_reward,
self.angular_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
self.linear_velocity_dist,
self.angular_velocity_dist,
)
return self.linear_velocity_reward + self.angular_velocity_reward
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["angular_velocity_reward"] += self.angular_velocity_reward
stats["angular_velocity_error"] += self.angular_velocity_dist
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
self._target_linear_velocities[env_ids, 0] = r * torch.cos(theta)
self._target_linear_velocities[env_ids, 1] = r * torch.sin(theta)
# Randomizes the target angular velocity
omega = self._target_angular_velocity_sampler.sample(
num_goals, step, device=self._device
)
self._target_angular_velocities[env_ids] = omega
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, 2] = 2
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the target.
"""
pass
def add_visual_marker_to_scene(self, scene: Usd.Stage) -> Tuple[Usd.Stage, None]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, None]: The scene and the visual marker.
"""
return scene, None
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the target angular velocity
d = self._target_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
d = d.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(r, bins=32)
ax[0].set_title("Target normed linear velocity")
ax[0].set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(d, bins=32)
ax[1].set_title("Target normed angular velocity")
ax[1].set_xlim(
self._target_angular_velocity_sampler.get_min_bound(),
self._target_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = {}
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 15,555 | Python | 33.114035 | 93 | 0.592093 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_thruster_generator.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass
import torch
import math
@dataclass
class ConfigurationParameters:
"""
Thruster configuration parameters."""
use_four_configurations: bool = False
num_anchors: int = 4
offset: float = math.pi / 4
thrust_force: float = 1.0
visualize: bool = False
save_path: str = "thruster_configuration.png"
def __post_init__(self):
assert self.num_anchors > 1, "num_anchors must be larger or equal to 2."
@dataclass
class PlatformParameters:
"""
Platform physical parameters."""
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
enable_collision: bool = False
@dataclass
class PlatformRandomization:
"""
Platform randomization parameters."""
random_permutation: bool = False
random_offset: bool = False
randomize_thruster_position: bool = False
min_random_radius: float = 0.125
max_random_radius: float = 0.25
random_theta: float = 0.125
randomize_thrust_force: bool = False
min_thrust_force: float = 0.5
max_thrust_force: float = 1.0
kill_thrusters: bool = False
max_thruster_kill: int = 1
def compute_actions(cfg_param: ConfigurationParameters):
"""
Computes the number of actions for the thruster configuration."""
if cfg_param.use_four_configurations:
return 10
else:
return cfg_param.num_anchors * 2
class VirtualPlatform:
"""
Generates a virtual floating platform with thrusters."""
def __init__(self, num_envs: int, platform_cfg: dict, device: str) -> None:
self._num_envs = num_envs
self._device = device
# Generates dataclasses from the configuration file
self.core_cfg = PlatformParameters(**platform_cfg["core"])
self.rand_cfg = PlatformRandomization(**platform_cfg["randomization"])
self.thruster_cfg = ConfigurationParameters(**platform_cfg["configuration"])
# Computes the number of actions
self._max_thrusters = compute_actions(self.thruster_cfg)
# Sets the empty buffers
self.transforms2D = torch.zeros(
(num_envs, self._max_thrusters, 3, 3),
device=self._device,
dtype=torch.float32,
)
self.current_transforms = torch.zeros(
(num_envs, self._max_thrusters, 5), device=self._device, dtype=torch.float32
)
self.action_masks = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.long
)
self.thrust_force = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.float32
)
# Creates a unit vector to project the forces
self.create_unit_vector()
# Generates a visualization file for the provided thruster configuration
if self.thruster_cfg.visualize:
self.generate_base_platforms(self._num_envs, torch.arange(self._num_envs))
self.visualize(self.thruster_cfg.save_path)
def create_unit_vector(self) -> None:
"""
Creates a unit vector to project the forces.
The forces are in 2D so the unit vector is a 2D vector."""
tmp_x = torch.ones(
(self._num_envs, self._max_thrusters, 1),
device=self._device,
dtype=torch.float32,
)
tmp_y = torch.zeros(
(self._num_envs, self._max_thrusters, 1),
device=self._device,
dtype=torch.float32,
)
self.unit_vector = torch.cat([tmp_x, tmp_y], dim=-1)
def project_forces(self, forces: torch.Tensor) -> list:
"""
Projects the forces on the platform."""
# Applies force scaling, applies action masking
rand_forces = forces * self.thrust_force * (1 - self.action_masks)
# Split transforms into translation and rotation
R = self.transforms2D[:, :, :2, :2].reshape(-1, 2, 2)
T = self.transforms2D[:, :, 2, :2].reshape(-1, 2)
# Create a zero tensor to add 3rd dimmension
zero = torch.zeros((T.shape[0], 1), device=self._device, dtype=torch.float32)
# Generate positions
positions = torch.cat([T, zero], dim=-1)
# Project forces
force_vector = self.unit_vector * rand_forces.view(-1, self._max_thrusters, 1)
rotated_forces = torch.matmul(R.reshape(-1, 2, 2), force_vector.view(-1, 2, 1))
projected_forces = torch.cat([rotated_forces[:, :, 0], zero], dim=-1)
return positions, projected_forces
def randomize_thruster_state(self, env_ids: torch.Tensor, num_resets: int) -> None:
"""
Randomizes the spatial configuration of the thruster."""
self.generate_base_platforms(num_resets, env_ids)
def generate_base_platforms(self, num_envs: int, env_ids: torch.Tensor) -> None:
"""
Generates the spatial configuration of the thruster."""
# ====================
# Basic thruster positioning
# ====================
# Generates a fixed offset between the heading and the first generated thruster
random_offset = (
torch.ones((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
/ self.thruster_cfg.num_anchors
)
# Adds a random offset to each simulated platform between the heading and the first generated thruster
if self.rand_cfg.random_offset:
random_offset += (
torch.rand((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
* 2
)
# Generates a 180 degrees offset between two consecutive thruster (+/- 90 degrees).
thrust_90 = (
(
torch.arange(2, device=self._device)
.repeat(self._max_thrusters // 2)
.expand(self._num_envs, self._max_thrusters)
* 2
- 1
)
* math.pi
/ 2
)
# If four configurations, it generates platforms with 4, 6, 8, and 10 thrusters.
if self.thruster_cfg.use_four_configurations:
# Generates N, two by two thruster
thrust_offset1 = (
torch.arange(5, device=self._device)
.repeat_interleave(2)
.expand(self._num_envs // 4, self._max_thrusters)
/ 2
* math.pi
* 2
)
thrust_offset2 = (
torch.arange(5, device=self._device)
.repeat_interleave(2)
.expand(self._num_envs // 4, self._max_thrusters)
/ 3
* math.pi
* 2
)
thrust_offset3 = (
torch.arange(5, device=self._device)
.repeat_interleave(2)
.expand(self._num_envs // 4, self._max_thrusters)
/ 4
* math.pi
* 2
)
thrust_offset4 = (
torch.arange(5, device=self._device)
.repeat_interleave(2)
.expand(self._num_envs // 4, self._max_thrusters)
/ 5
* math.pi
* 2
)
thrust_offset = torch.cat(
[thrust_offset1, thrust_offset2, thrust_offset3, thrust_offset4], 0
)
# Generates a mask indicating if the thrusters are usable or not. Used by the transformer to mask the sequence.
mask1 = torch.ones(
(self._num_envs // 4, self._max_thrusters), device=self._device
)
mask2 = torch.ones(
(self._num_envs // 4, self._max_thrusters), device=self._device
)
mask3 = torch.ones(
(self._num_envs // 4, self._max_thrusters), device=self._device
)
mask4 = torch.ones(
(self._num_envs // 4, self._max_thrusters), device=self._device
)
# TODO document this
mask1[:, 4:] = 0
mask2[:, 6:] = 0
mask3[:, 8:] = 0
mask = torch.cat([mask1, mask2, mask3, mask4], 0)
else:
# Generates N, two by two thruster
thrust_offset = (
torch.arange(self.thruster_cfg.num_anchors, device=self._device)
.repeat_interleave(2)
.expand(self._num_envs, self._max_thrusters)
/ self.thruster_cfg.num_anchors
* math.pi
* 2
)
# Generates a mask indicating if the thrusters are usable or not. Used by the transformer to mask the sequence.
mask = torch.ones(
(self._num_envs, self._max_thrusters), device=self._device
)
# ====================
# Random thruster killing
# ====================
# Kill thrusters:
if self.rand_cfg.kill_thrusters:
# Generates 0 and 1 to decide how many thrusters will be killed
weights = torch.ones((self._num_envs, 2), device=self._device)
kills = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=True
)
# Selects L indices to set to N+1
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
kill_ids = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=False
)
# Multiplies kill or not kill with the ids.
# If no kill, then the value is set to max_thrusters + 1, such that it can be filtered out later
final_kill_ids = kills * kill_ids + (1 - kills) * self._max_thrusters
# Creates a mask from the kills:
kill_mask = torch.sum(
torch.nn.functional.one_hot(final_kill_ids, self._max_thrusters + 1),
dim=1,
)
# Removes the duplicates
kill_mask = 1 - kill_mask[:, : self._max_thrusters]
if self.thruster_cfg.use_four_configurations:
mask[self._num_envs // 4 :] = (
mask[self._num_envs // 4 :] * kill_mask[self._num_envs // 4 :]
)
else:
mask = mask * kill_mask
# Generates the transforms and masks
transforms2D = torch.zeros_like(self.transforms2D) # Used to project the forces
action_masks = torch.zeros_like(self.action_masks) # Used to mask actions
current_transforms = torch.zeros_like(
self.current_transforms
) # Used to feed to the transformer
# ====================
# Randomizes the thruster poses and characteristics.
# ====================
# Randomizes the thrust force:
if self.rand_cfg.randomize_thrust_force:
thrust_force = (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_thrust_force - self.rand_cfg.min_thrust_force)
+ self.rand_cfg.min_thrust_force
)
else:
thrust_force = torch.ones(
(self._num_envs, self._max_thrusters), device=self._device
)
# Thruster angular position with regards to the center of mass.
theta2 = random_offset + thrust_offset
# Randomizes thruster poses if requested:
if self.rand_cfg.randomize_thruster_position:
radius = self.core_cfg.radius * (
1
+ torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_random_radius + self.rand_cfg.min_random_radius)
- self.rand_cfg.min_random_radius
)
theta2 += (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.random_theta * 2)
- self.rand_cfg.random_theta
)
else:
radius = self.core_cfg.radius
# Thruster angle:
theta = theta2 + thrust_90
# ====================
# Computes the 2D transforms of the thruster locations.
# ====================
# 2D transforms defining the thruster locations.
transforms2D[:, :, 0, 0] = torch.cos(theta) * mask
transforms2D[:, :, 0, 1] = torch.sin(-theta) * mask
transforms2D[:, :, 1, 0] = torch.sin(theta) * mask
transforms2D[:, :, 1, 1] = torch.cos(theta) * mask
transforms2D[:, :, 2, 0] = torch.cos(theta2) * radius * mask
transforms2D[:, :, 2, 1] = torch.sin(theta2) * radius * mask
transforms2D[:, :, 2, 2] = 1 * mask
# Actions masks to define which thrusters can be used.
action_masks[:, :] = 1 - mask.long()
# Transforms to feed to the transformer.
current_transforms[:, :, 0] = torch.cos(theta) * mask
current_transforms[:, :, 1] = torch.sin(-theta) * mask
current_transforms[:, :, 2] = torch.cos(theta2) * radius * mask
current_transforms[:, :, 3] = torch.sin(theta2) * radius * mask
current_transforms[:, :, 4] = thrust_force * mask
# Applies random permutations to the thrusters while keeping the non-used thrusters at the end of the sequence.
if self.rand_cfg.random_permutation:
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
selected_thrusters = torch.multinomial(
weights, num_samples=self._max_thrusters, replacement=False
)
mask = torch.gather(1 - mask, 1, selected_thrusters)
_, sorted_idx = mask.sort(1)
selected_thrusters = torch.gather(selected_thrusters, 1, sorted_idx)
transforms2D = torch.gather(
transforms2D,
1,
selected_thrusters.view(
self._num_envs, self._max_thrusters, 1, 1
).expand(self._num_envs, self._max_thrusters, 3, 3),
)
current_transforms = torch.gather(
current_transforms,
1,
selected_thrusters.view(self._num_envs, self._max_thrusters, 1).expand(
self._num_envs, self._max_thrusters, 5
),
)
action_masks = torch.gather(action_masks, 1, selected_thrusters)
thrust_force = torch.gather(thrust_force, 1, selected_thrusters)
# Updates the proper indices
self.thrust_force[env_ids] = thrust_force[env_ids]
self.action_masks[env_ids] = action_masks[env_ids]
self.current_transforms[env_ids] = current_transforms[env_ids]
self.transforms2D[env_ids] = transforms2D[env_ids]
def visualize(self, save_path: str = None):
"""
Visualizes the thruster configuration."""
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
# Creates a list of color
cmap = cm.get_cmap("hsv")
colors = []
for i in range(self._max_thrusters):
colors.append(cmap(i / self._max_thrusters))
# Split into 1/4th of the envs, so that we can visualize all the configs in use_four_configuration mode.
env_ids = [
0,
1,
2,
3,
self._num_envs // 4,
self._num_envs // 4 + 1,
self._num_envs // 4 + 2,
self._num_envs // 4 + 3,
2 * self._num_envs // 4,
2 * self._num_envs // 4 + 1,
2 * self._num_envs // 4 + 2,
2 * self._num_envs // 4 + 3,
3 * self._num_envs // 4,
3 * self._num_envs // 4 + 1,
3 * self._num_envs // 4 + 2,
3 * self._num_envs // 4 + 3,
]
# Generates a thrust on all the thrusters
forces = torch.ones(
(self._num_envs, self._max_thrusters),
device=self._device,
dtype=torch.float32,
)
# Project
p, f = self.project_forces(forces)
# Reshape and get only the 2D values for plot.
p = p.reshape(self._num_envs, self._max_thrusters, 3)[:, :, :2]
f = f.reshape(self._num_envs, self._max_thrusters, 3)[:, :, :2]
p = np.array(p.cpu())
f = np.array(f.cpu())
# Plot
fig, axs = plt.subplots(4, 4)
fig.set_size_inches(20, 20)
for i in range(4):
for j in range(4):
idx = env_ids[i * 4 + j]
axs[i, j].quiver(
p[idx, :, 0],
p[idx, :, 1],
f[idx, :, 0],
f[idx, :, 1],
color=colors,
scale=4,
scale_units="xy",
angles="xy",
)
axs[i, j].set_xlim([-0.75, 0.75])
axs[i, j].set_ylim([-0.75, 0.75])
fig.tight_layout()
fig.savefig(save_path, dpi=300)
plt.close()
| 17,821 | Python | 37.162741 | 123 | 0.534538 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_core.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Tuple
from pxr import Usd
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
def quat_addition(q1, q2):
q3 = torch.zeros_like(q1)
q3[:, 0] = (
q1[:, 0] * q2[:, 0]
- q1[:, 1] * q2[:, 1]
- q1[:, 2] * q2[:, 2]
- q1[:, 3] * q2[:, 3]
)
q3[:, 1] = (
q1[:, 0] * q2[:, 1]
+ q1[:, 1] * q2[:, 0]
+ q1[:, 2] * q2[:, 3]
- q1[:, 3] * q2[:, 2]
)
q3[:, 2] = (
q1[:, 0] * q2[:, 2]
- q1[:, 1] * q2[:, 3]
+ q1[:, 2] * q2[:, 0]
+ q1[:, 3] * q2[:, 1]
)
q3[:, 3] = (
q1[:, 0] * q2[:, 3]
+ q1[:, 1] * q2[:, 2]
- q1[:, 2] * q2[:, 1]
+ q1[:, 3] * q2[:, 0]
)
q3 /= torch.norm(q3 + EPS, dim=-1, keepdim=True)
class Core:
"""
The base class that implements the core of the task.
"""
def __init__(self, num_envs: int, device: str) -> None:
"""
The base class for the different subtasks.
Args:
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
# Dimensions of the observation tensors
self._dim_orientation = (
2 # theta heading in the world frame (cos(theta), sin(theta)) [0:2]
)
self._dim_velocity = 2 # velocity in the world (x_dot, y_dot) [2:4]
self._dim_omega = 1 # rotation velocity (theta_dot) [4]
self._dim_task_label = 1 # label of the task to be executed (int) [5]
self._dim_task_data = 22 # data to be used to fullfil the task (floats) [6:16]
# Observation buffers
self._num_observations = 28
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, self._dim_task_data),
device=self._device,
dtype=torch.float32,
)
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:2] = current_state["orientation"]
self._obs_buffer[:, 2:4] = current_state["linear_velocity"]
self._obs_buffer[:, 4] = current_state["angular_velocity"]
self._obs_buffer[:, 5] = self._task_label
self._obs_buffer[:, 6:28] = self._task_data
return self._obs_buffer
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
raise NotImplementedError
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
raise NotImplementedError
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
raise NotImplementedError
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
raise NotImplementedError
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
raise NotImplementedError
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
raise NotImplementedError
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
Returns:
list: The target positions and orientations.
"""
raise NotImplementedError
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
raise NotImplementedError
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the target.
"""
raise NotImplementedError
def add_visual_marker_to_scene(self, scene: Usd.Stage) -> Tuple[Usd.Stage, None]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, None]: The scene and the visual marker.
"""
raise NotImplementedError
class TaskDict:
"""
A class to store the task dictionary. It is used to pass the task data to the task class.
"""
def __init__(self) -> None:
self.gotoxy = 0
self.gotopose = 1
self.trackxyvel = 2
self.trackxyovel = 3
self.trackxyvelheading = 4
| 7,370 | Python | 27.792969 | 93 | 0.555631 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_penalties.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BasePenalty,
EnergyPenalty,
LinearVelocityPenalty,
scaling_functions,
BoundaryPenalty,
)
from dataclasses import dataclass, field
from typing import Dict
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class AngularVelocityPenalty(BasePenalty):
"""
This class has access to the angular velocity and applies a penalty based on its norm.
"""
weight: float = 0.1
scaling_function: str = "linear"
scaling_parameter: float = 1.0
min_value: float = 0
max_value: float = float("inf")
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(
self, state: Dict["str", torch.Tensor], actions: torch.Tensor, step: int
):
"""
Computes the penalty based on the norm of the angular velocity.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
# compute the norm of the angular velocity
norm = torch.norm(state["angular_velocity"], dim=-1) - self.min_value
# apply ranging function
norm[norm < 0] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
return norm * self.last_rate * self.weight
else:
return torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
penalty_classes = {
"energy_penalty": EnergyPenalty,
"linear_velocity_penalty": LinearVelocityPenalty,
"angular_velocity_penalty": AngularVelocityPenalty,
}
@dataclass
class EnvironmentPenalties:
energy_penalty: EnergyPenalty = field(default_factory=dict)
linear_velocity_penalty: LinearVelocityPenalty = field(default_factory=dict)
angular_velocity_penalty: AngularVelocityPenalty = field(default_factory=dict)
def __post_init__(self):
self.penalties = []
self.energy_penalty = EnergyPenalty(**self.energy_penalty)
if self.energy_penalty.enable:
self.penalties.append(self.energy_penalty)
self.linear_velocity_penalty = LinearVelocityPenalty(
**self.linear_velocity_penalty
)
if self.linear_velocity_penalty.enable:
self.penalties.append(self.linear_velocity_penalty)
self.angular_velocity_penalty = AngularVelocityPenalty(
**self.angular_velocity_penalty
)
if self.angular_velocity_penalty.enable:
self.penalties.append(self.angular_velocity_penalty)
def compute_penalty(
self, state: Dict[str, torch.Tensor], actions: torch.Tensor, step: int
) -> torch.Tensor:
"""
Computes the penalties.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
penalties = torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
for penalty in self.penalties:
penalties += penalty.compute_penalty(state, actions, step)
return penalties
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
names = []
for penalty in self.penalties:
names.append("penalties/" + penalty.name)
return names
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
for penalty in self.penalties:
stats["penalties/" + penalty.name] += penalty.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
dict = {}
for penalty in self.penalties:
dict["penalties/" + penalty.name + "_weight"] = (
penalty.get_last_rate() * penalty.weight
)
return dict
| 5,361 | Python | 29.64 | 90 | 0.602873 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_penalties.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumRateParameters,
)
from dataclasses import dataclass, field
from typing import Dict
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
scaling_functions = {
"linear": lambda x, p=0.0: x * p,
"constant": lambda x, p=0.0: (x > 0) * p,
"log": lambda x, p=0.0: torch.log(x + EPS),
"exp": lambda x, p=0.0: torch.exp(x),
"sqrt": lambda x, p=0.0: torch.sqrt(x),
"square": lambda x, p=0.0: torch.pow(x, 2),
"cube": lambda x, p=0.0: torch.pow(x, 3),
"inv_exp": lambda x, p=1.0: torch.exp(-x / (p + EPS)),
}
@dataclass
class BasePenalty:
"""
This class implements the base for all penalties
"""
curriculum: CurriculumRateParameters = field(default_factory=dict)
enable: bool = False
def __post_init__(self):
self.curriculum = CurriculumRateParameters(**self.curriculum)
self.name = "".join(
[
"_" + c.lower() if (c.isupper() and i != 0) else c.lower()
for i, c in enumerate(type(self).__name__)
]
)
self.last_rate = None
self.last_penalties = None
def get_rate(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.curriculum.get(step)
def compute_penalty(self, value, step):
raise NotImplementedError
def get_unweigthed_penalties(self):
if self.last_rate is not None:
return self.last_penalties
def get_last_rate(self) -> float:
if self.last_rate is not None:
return self.last_rate
@dataclass
class EnergyPenalty(BasePenalty):
"""
This class has access to the actions and applies a penalty based on how many actions are taken.
"""
weight: float = 0.1
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
def compute_penalty(
self, state: Dict["str", torch.Tensor], actions: torch.Tensor, step: int
):
"""
Computes the penalty based on the number of actions taken.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
self.last_penalties = torch.sum(torch.abs(actions), -1)
return self.last_penalties * self.last_rate * self.weight
else:
return torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
@dataclass
class LinearVelocityPenalty(BasePenalty):
"""
This class has access to the linear velocity and applies a penalty based on its norm.
"""
weight: float = 0.1
scaling_function: str = "linear"
scaling_parameter: float = 1.0
min_value: float = 0
max_value: float = float("inf")
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(
self, state: Dict["str", torch.Tensor], actions: torch.Tensor, step: int
):
"""
Computes the penalty based on the norm of the linear velocity.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
# compute the norm of the linear velocity
norm = torch.norm(state["linear_velocity"], dim=-1) - self.min_value
# apply ranging function
norm[norm < 0] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
return norm * self.last_rate * self.weight
else:
return torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
@dataclass
class AngularVelocityPenalty(BasePenalty):
"""
This class has access to the angular velocity and applies a penalty based on its norm.
"""
weight: float = 0.1
scaling_function: str = "linear"
scaling_parameter: float = 1.0
min_value: float = 0
max_value: float = float("inf")
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(
self, state: Dict["str", torch.Tensor], actions: torch.Tensor, step: int
):
"""
Computes the penalty based on the norm of the angular velocity.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
# compute the norm of the angular velocity
norm = torch.abs(state["angular_velocity"]) - self.min_value
# apply ranging function
norm[norm < 0] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
return norm * self.last_rate * self.weight
else:
return torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
penalty_classes = {
"energy_penalty": EnergyPenalty,
"linear_velocity_penalty": LinearVelocityPenalty,
"angular_velocity_penalty": AngularVelocityPenalty,
}
@dataclass
class EnvironmentPenalties:
energy_penalty: EnergyPenalty = field(default_factory=dict)
linear_velocity_penalty: LinearVelocityPenalty = field(default_factory=dict)
angular_velocity_penalty: AngularVelocityPenalty = field(default_factory=dict)
def __post_init__(self):
self.penalties = []
self.energy_penalty = EnergyPenalty(**self.energy_penalty)
if self.energy_penalty.enable:
self.penalties.append(self.energy_penalty)
self.linear_velocity_penalty = LinearVelocityPenalty(
**self.linear_velocity_penalty
)
if self.linear_velocity_penalty.enable:
self.penalties.append(self.linear_velocity_penalty)
self.angular_velocity_penalty = AngularVelocityPenalty(
**self.angular_velocity_penalty
)
if self.angular_velocity_penalty.enable:
self.penalties.append(self.angular_velocity_penalty)
def compute_penalty(
self, state: Dict[str, torch.Tensor], actions: torch.Tensor, step: int
) -> torch.Tensor:
"""
Computes the penalties.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
penalties = torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
for penalty in self.penalties:
penalties += penalty.compute_penalty(state, actions, step)
return penalties
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
names = []
for penalty in self.penalties:
names.append("penalties/" + penalty.name)
return names
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
for penalty in self.penalties:
stats["penalties/" + penalty.name] += penalty.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
dict = {}
for penalty in self.penalties:
dict["penalties/" + penalty.name + "_weight"] = (
penalty.get_last_rate() * penalty.weight
)
return dict
@dataclass
class BoundaryPenalty(BasePenalty):
"""
This class has access to the state and applies a penalty based on the distance to the boundaries.
"""
weight: float = 10.0
scaling_function: str = "inv_exp"
scaling_parameter: float = 0.5
saturation_value: float = 2.0
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert self.saturation_value > 0, "Saturation value must be positive"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(self, distance, step: int):
"""
Computes the penalty based on the distance to the boundaries.
Args:
state (Dict[str, torch.Tensor]): State of the system.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
self.last_rate = self.get_rate(step)
self.last_penalties = torch.clamp(
self.scaling_function(distance, self.scaling_parameter),
0,
self.saturation_value,
)
return self.last_penalties * self.last_rate * self.weight
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
return ["penalties/" + self.name]
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
stats["penalties/" + self.name] += self.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
return {
"penalties/" + self.name + "_weight": self.get_last_rate() * self.weight
}
@dataclass
class ContactPenalty(BasePenalty):
"""
This class has access to the state and applies a penalty based on the collision.
"""
weight: float = 10.0
scaling_function: str = "constant"
scaling_parameter: float = 1.0
kill_on_contact: bool = False
kill_threshold: float = 1.0
min_value: float = 0
max_value: float = float("inf")
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(self, forces: torch.Tensor, step: int):
"""
Computes the penalty based on the norm of the angular velocity.
Args:
forces (torch.Tensor): The contact forces.
step (int): Current step.
Returns:
Tuple(torch.Tensor, torch.Tensor): Penalty, kills.
"""
self.last_rate = self.get_rate(step)
if self.enable:
# computes the threshold for the contact forces
norm = forces - self.min_value
# apply ranging function
norm[norm < EPS] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
# kill the episode if the contact forces are too high
if self.kill_on_contact:
kills = forces > self.kill_threshold
else:
kills = torch.zeros(
[norm.shape[0]], dtype=torch.float32, device=norm.device
)
return norm * self.last_rate * self.weight, kills
else:
self.last_penalties = torch.zeros(
[forces.shape[0]], dtype=torch.float32, device=forces.device
)
return torch.zeros(
[forces.shape[0]], dtype=torch.float32, device=forces.device
), torch.zeros([forces.shape[0]], dtype=torch.bool, device=forces.device)
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
return ["penalties/" + self.name]
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
stats["penalties/" + self.name] += self.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
return {
"penalties/" + self.name + "_weight": self.get_last_rate() * self.weight
}
@dataclass
class ConeShapePenalty(BasePenalty):
"""
This class has access to the realtive angle of FP to dock and applies a penalty based on its norm.
"""
weight: float = 0.1
scaling_function: str = "linear"
scaling_parameter: float = 1.0
min_value: float = 0.1745 # pi/18=10deg
max_value: float = 3.1415
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(self, relative_angle: torch.Tensor, step: int):
"""
Computes the penalty based on the norm of the angular velocity.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
# compute the norm of the angular velocity
norm = torch.abs(relative_angle) - self.min_value
# apply ranging function
norm[norm < 0] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
return norm * self.last_rate * self.weight
else:
return torch.zeros(
[relative_angle.shape[0]],
dtype=torch.float32,
device=relative_angle.device,
)
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
return ["penalties/" + self.name]
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
stats["penalties/" + self.name] += self.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
return {
"penalties/" + self.name + "_weight": self.get_last_rate() * self.weight
}
| 17,843 | Python | 29.398637 | 102 | 0.575576 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_track_xyz_velocity.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
TrackXYZVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
TrackXYZVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xy_velocity import (
TrackXYVelocityTask as TrackXYVelocityTask2D,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYZVelocityTask(TrackXYVelocityTask2D, Core):
"""
Implements the TrackXYVelocity task. The robot has to reach a target linear velocity.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
):
"""
Initializes the task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = TrackXYZVelocityParameters(**task_param)
self._reward_parameters = TrackXYZVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_velocities = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 2
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._velocity_error = (
self._target_velocities - current_state["linear_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :3] = self._velocity_error
return self.update_observation_tensor(current_state)
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
step (int, optional): The current step. Defaults to 0.
Returns:
list: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
phi = torch.rand((num_goals,), device=self._device) * math.pi
self._target_velocities[env_ids, 0] = r * torch.cos(theta) * torch.sin(phi)
self._target_velocities[env_ids, 1] = r * torch.sin(theta) * torch.sin(phi)
self._target_velocities[env_ids, 2] = r * torch.cos(phi)
# This does not matter
return target_positions, target_orientations
def get_initial_conditions(
self, env_ids: torch.Tensor, step: int = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
uvw = torch.rand((num_resets, 3), device=self._device)
initial_orientation[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(
uvw[:, 2] * 2 * math.pi
)
initial_orientation[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(
uvw[:, 2] * 2 * math.pi
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return (
initial_position,
initial_orientation,
initial_velocity,
)
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The dictionary containing the spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The dictionary containing the target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
target_velocities = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
| 10,835 | Python | 34.181818 | 89 | 0.601477 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_disturbances.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
ForceDisturbance as ForceDisturbance2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
TorqueDisturbance as TorqueDisturbance2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
NoisyActions as NoisyActions2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
NoisyObservations as NoisyObservations2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
MassDistributionDisturbances as MassDistributionDisturbances2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances as Disturbances2D,
)
from typing import Tuple
import torch
import math
import omni
class MassDistributionDisturbances(MassDistributionDisturbances2D):
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
parameters: MassDistributionDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(MassDistributionDisturbances, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the mass disturbances.
"""
super().instantiate_buffers()
self.platforms_CoM = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def randomize_masses(self, env_ids: torch.Tensor, step: int = 0) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
num_resets = len(env_ids)
self.platforms_mass[env_ids, 0] = self.mass_sampler.sample(
num_resets, step, device=self._device
)
r = self.CoM_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device) * math.pi
)
self.platforms_CoM[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.platforms_CoM[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.platforms_CoM[env_ids, 2] = torch.cos(phi) * r
def set_coms(
self,
body: omni.isaac.core.prims.XFormPrimView,
env_ids: torch.Tensor,
joints_idx: Tuple[int, int],
) -> None:
"""
Sets the CoM of the platforms.
Args:
body (omni.isaac.core.XFormPrimView): The rigid bodies containing the prismatic joints controlling the position of the CoMs.
env_ids (torch.Tensor): The ids of the environments to reset.
joints_idx (Tuple[int, int]): The ids of the x and y joints respectively.
"""
joints_position = torch.zeros(
(len(env_ids), 3), device=self._device, dtype=torch.float32
)
joints_position[:, joints_idx[0]] = self.platforms_CoM[env_ids, 0]
joints_position[:, joints_idx[1]] = self.platforms_CoM[env_ids, 1]
joints_position[:, joints_idx[2]] = self.platforms_CoM[env_ids, 2]
if self.parameters.enable:
body.set_joint_positions(joints_position, indices=env_ids)
class ForceDisturbance(ForceDisturbance2D):
"""
Creates disturbances on the platform by simulating an uneven floor.
"""
def __init__(
self, parameters: ForceDisturbanceParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The task configuration.
num_envs (int): The number of environments to create.
device (str): The device to use for the computation.
"""
super(ForceDisturbance, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the uneven floor disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_z_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_z_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_forces = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.forces = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_floor(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the uneven floor.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_z_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_z_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_forces[env_ids] = self.force_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.force_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
)
self.forces[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.forces[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.forces[env_ids, 2] = torch.cos(phi) * r
def get_floor_forces(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the floor forces for the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The floor forces to apply to the robot.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[:, 0] = (
torch.sin(root_pos[:, 0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[:, 1] = (
torch.sin(root_pos[:, 1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
self.forces[:, 2] = (
torch.sin(root_pos[:, 2] * self._floor_z_freq + self._floor_z_offset)
* self._max_forces
)
return self.forces
class TorqueDisturbance(TorqueDisturbance2D):
"""
Creates disturbances on the platform by simulating a torque applied to its center.
"""
def __init__(
self, parameters: TorqueDisturbanceParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(TorqueDisturbance, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the uneven torque disturbances."""
if self.parameters.use_sinusoidal_patterns:
self._torque_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_z_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_z_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_torques = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.torques = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_torque(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the torque disturbance.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
# use the same min/max frequencies and offsets for the floor
self._torque_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_z_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._torque_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._torque_z_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_torques[env_ids] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.torque_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
)
self.torques[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.torques[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.torques[env_ids, 2] = torch.cos(phi) * r
def get_torque_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the torque forces for the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque forces to apply to the robot.
"""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 0] = (
torch.sin(root_pos[:, 0] * self._torque_x_freq + self._torque_x_offset)
* self._max_torques
)
self.torques[:, 1] = (
torch.sin(root_pos[:, 1] * self._torque_y_freq + self._torque_y_offset)
* self._max_torques
)
self.torques[:, 2] = (
torch.sin(root_pos[:, 2] * self._torque_z_freq + self._torque_z_offset)
* self._max_torques
)
return self.torques
class NoisyObservations(NoisyObservations2D):
"""
Adds noise to the observations of the robot.
"""
def __init__(
self, parameters: NoisyObservationsParameters, num_envs: int, device: str
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(NoisyObservations, self).__init__(parameters, num_envs, device)
class NoisyActions(NoisyActions2D):
"""
Adds noise to the actions of the robot.
"""
def __init__(
self, parameters: NoisyActionsParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(NoisyActions, self).__init__(parameters, num_envs, device)
class Disturbances(Disturbances2D):
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.parameters.mass_disturbance,
num_envs,
device,
)
self.force_disturbances = ForceDisturbance(
self.parameters.force_disturbance,
num_envs,
device,
)
self.torque_disturbances = TorqueDisturbance(
self.parameters.torque_disturbance,
num_envs,
device,
)
self.noisy_observations = NoisyObservations(
self.parameters.observations_disturbance,
num_envs,
device,
)
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
| 18,366 | Python | 36.560327 | 136 | 0.557661 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_task_parameters.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BoundaryPenalty,
ConeShapePenalty,
ContactPenalty,
)
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYParameters:
"""
Parameters for the GoToXY task.
"""
name: str = "GoToXY"
position_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoToPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToPose"
position_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughXYParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughXY"
position_tolerance: float = 0.1
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughXYSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughXYSequence"
position_tolerance: float = 0.1
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
num_points: int = 5
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughPose"
position_tolerance: float = 0.1
heading_tolerance: float = 0.05
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughPoseSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughPoseSequence"
position_tolerance: float = 0.1
heading_tolerance: float = 0.05
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
num_points: int = 5
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughGateParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughGate"
goal_random_position: float = 0.0
kill_dist: float = 10.0
gate_width: float = 1.5
gate_thickness: float = 0.2
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.gate_width > 0, "Gate width must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughGateSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughGate"
goal_random_position: float = 0.0
kill_dist: float = 10.0
gate_width: float = 1.5
gate_thickness: float = 0.2
num_points: int = 5
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_gate_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_gate_heading_curriculum: CurriculumParameters = field(default_factory=dict)
def __post_init__(self) -> None:
assert self.gate_width > 0, "Gate width must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
self.spawn_gate_position_curriculum = CurriculumParameters(
**self.spawn_gate_position_curriculum
)
self.spawn_gate_heading_curriculum = CurriculumParameters(
**self.spawn_gate_heading_curriculum
)
@dataclass
class TrackXYVelocityParameters:
"""
Parameters for the TrackXYVelocity task.
"""
name: str = "TrackXYVelocity"
lin_vel_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_velocity: float = 0.75
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_velocity >= 0, "Goal random velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYOVelocityParameters:
"""
Parameters for the TrackXYOVelocity task.
"""
name: str = "TrackXYOVelocity"
lin_vel_tolerance: float = 0.01
ang_vel_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_linear_velocity: float = 0.75
goal_random_angular_velocity: float = 1
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
target_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.ang_vel_tolerance > 0
), "Angular velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert (
self.goal_random_linear_velocity >= 0
), "Goal random linear velocity must be positive."
assert (
self.goal_random_angular_velocity >= 0
), "Goal random angular velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.target_angular_velocity_curriculum = CurriculumParameters(
**self.target_angular_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYVelocityHeadingParameters:
"""
Parameters for the TrackXYVelocityHeading task.
"""
name: str = "TrackXYVelocityHeading"
velocity_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.velocity_tolerance > 0, "Velocity tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class CloseProximityDockParameters:
"""
Parameters for the GoToPose task."""
name: str = "CloseProximityDock"
position_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
kill_dist: float = 10.0
dock_footprint_diameter: float = 0.8
goal_to_penalty_anchor_dist: float = 2.0
env_x: float = 3.0
env_y: float = 5.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
relative_angle_penalty: ConeShapePenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
fp_footprint_diameter_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_dock_mass_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_dock_space_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_relative_angle_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert (
self.dock_footprint_diameter > 0
), "Dock footprint diameter must be positive."
assert self.env_x > 0, "Environment x dimension must be positive."
assert self.env_y > 0, "Environment y dimension must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.relative_angle_penalty = ConeShapePenalty(**self.relative_angle_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.fp_footprint_diameter_curriculum = CurriculumParameters(
**self.fp_footprint_diameter_curriculum
)
self.spawn_dock_mass_curriculum = CurriculumParameters(
**self.spawn_dock_mass_curriculum
)
self.spawn_dock_space_curriculum = CurriculumParameters(
**self.spawn_dock_space_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_relative_angle_curriculum = CurriculumParameters(
**self.spawn_relative_angle_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
| 25,090 | Python | 38.327586 | 88 | 0.676006 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/curriculum_helpers.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from inspect import isfunction
import dataclasses
import torch
import math
####################################################################################################
# Curriculum growth functions
####################################################################################################
def curriculum_linear_growth(
step: int = 0, start: int = 0, end: int = 1000, **kwargs
) -> float:
"""
Generates a curriculum with a linear growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = current / (relative_end)
return rate
def curriculum_sigmoid_growth(
step: int = 0, start: int = 100, end: int = 1000, extent: float = 3, **kwargs
) -> float:
"""
Generates a curriculum with a sigmoid growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
extent (float, optional): Extent of the sigmoid function.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = (
math.tanh(((extent * 2 * current / relative_end) - extent) / 2)
- math.tanh(-extent / 2)
) / (math.tanh(extent / 2) - math.tanh(-extent / 2))
return rate
def curriculum_pow_growth(
step: int = 0, start: int = 0, end: int = 1000, alpha: float = 2.0, **kwargs
) -> float:
"""
Generates a curriculum with a power growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
alpha (float, optional): Exponent of the power function.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = (current / relative_end) ** alpha
return rate
####################################################################################################
# Curriculum sampling functions
####################################################################################################
def norm_cdf(x: float) -> float:
"""
Computes standard normal cumulative distribution function
Args:
x (float): Input value.
Returns:
float: Value of the standard normal cumulative distribution function
"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
def truncated_normal(
n: int = 1,
mean: float = 0.0,
std: float = 0.5,
min_value: float = 0.0,
max_value: float = 1.0,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Method based on https://github.com/pytorch/pytorch/blob/a40812de534b42fcf0eb57a5cecbfdc7a70100cf/torch/nn/init.py#L22
Values are generated by using a truncated uniform distribution and
then using the inverse CDF for the normal distribution.
Args:
n (int, optional): Number of samples to generate.
mean (float, optional): Mean of the normal distribution.
std (float, optional): Standard deviation of the normal distribution.
min_value (float, optional): Minimum value of the truncated distribution.
max_value (float, optional): Maximum value of the truncated distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a truncated normal distribution.
"""
tensor = torch.zeros((n), dtype=torch.float32, device=device)
# Get upper and lower cdf values
l = norm_cdf((min_value - mean) / std)
u = norm_cdf((max_value - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=min_value, max=max_value)
return tensor
def normal(
n: int = 1,
mean: float = 0.0,
std: float = 0.5,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a normal distribution.
Args:
n (int, optional): Number of samples to generate.
mean (float, optional): Mean of the normal distribution.
std (float, optional): Standard deviation of the normal distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a normal distribution.
"""
return torch.normal(mean, std, (n,), device=device)
def uniform(
n: int = 1,
min_value: float = 0.0,
max_value: float = 1.1,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a uniform distribution.
Args:
n (int, optional): Number of samples to generate.
min_value (float, optional): Minimum value of the uniform distribution.
max_value (float, optional): Maximum value of the uniform distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a uniform distribution.
"""
return torch.rand((n), device=device) * (max_value - min_value) + min_value
####################################################################################################
# Function dictionaries
####################################################################################################
RateFunctionDict = {
"none": lambda step, start, end, **kwargs: 1.0,
"linear": curriculum_linear_growth,
"sigmoid": curriculum_sigmoid_growth,
"pow": curriculum_pow_growth,
}
SampleFunctionDict = {
"uniform": uniform,
"normal": normal,
"truncated_normal": truncated_normal,
}
@dataclasses.dataclass
class CurriculumRateParameters:
start: int = 50
end: int = 1000
function: str = "none"
extent: float = 3
alpha: float = 2.0
def __post_init__(self):
assert self.start >= 0, "Start must be greater than 0"
assert self.end > 0, "End must be greater than 0"
assert self.start < self.end, "Start must be smaller than end"
assert self.function in [
"none",
"linear",
"sigmoid",
"pow",
], "Function must be linear, sigmoid or pow"
assert self.extent > 0, "Extent must be greater than 0"
assert self.alpha > 0, "Alpha must be greater than 0"
self.function = RateFunctionDict[self.function]
self.kwargs = {
key: value for key, value in self.__dict__.items() if not isfunction(value)
}
def get(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.function(
step=step,
**self.kwargs,
)
@dataclasses.dataclass
class CurriculumSamplingParameters:
distribution: str = "uniform"
start_min_value: float = 0.0 # uniform only
start_max_value: float = 0.0 # uniform only
end_min_value: float = 0.0 # uniform only
end_max_value: float = 0.0 # uniform only
start_mean: float = 0.0 # normal and truncated_normal only
start_std: float = 0.0 # normal and truncated_normal only
end_mean: float = 0.0 # normal and truncated_normal only
end_std: float = 0.0 # normal and truncated_normal only
min_value: float = 0.0 # truncated_normal only
max_value: float = 0.0 # truncated_normal only
def __post_init__(self):
assert (
self.min_value <= self.max_value
), "Min value must be smaller than max value"
assert (
self.start_min_value <= self.start_max_value
), "Min value must be smaller than max value"
assert (
self.end_min_value <= self.end_max_value
), "Min value must be smaller than max value"
assert self.start_std >= 0, "Standard deviation must be greater than 0"
assert self.end_std >= 0, "Standard deviation must be greater than 0"
assert self.distribution in [
"uniform",
"normal",
"truncated_normal",
], "Distribution must be uniform, normal or truncated_normal"
self.function = SampleFunctionDict[self.distribution]
@dataclasses.dataclass
class CurriculumParameters:
rate_parameters: CurriculumRateParameters = dataclasses.field(default_factory=dict)
sampling_parameters: CurriculumSamplingParameters = dataclasses.field(
default_factory=dict
)
def __post_init__(self):
self.rate_parameters = CurriculumRateParameters(**self.rate_parameters)
self.sampling_parameters = CurriculumSamplingParameters(
**self.sampling_parameters
)
class CurriculumSampler:
def __init__(
self,
curriculum_parameters: CurriculumParameters,
):
self.rp = curriculum_parameters.rate_parameters
self.sp = curriculum_parameters.sampling_parameters
def get_rate(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.rp.get(step)
def get_min(self) -> float:
"""
Gets the minimum value for the current step.
Returns:
float: Minimum value.
"""
if self.sp.distribution == "truncated_normal":
return self.sp.start_mean
elif self.sp.distribution == "normal":
return self.sp.start_mean
else:
return self.sp.start_min_value
def get_max(self) -> float:
"""
Gets the maximum value for the current step.
Returns:
float: Maximum value.
"""
if self.sp.distribution == "truncated_normal":
return self.sp.end_mean
elif self.sp.distribution == "normal":
return self.sp.end_mean
else:
return self.sp.end_max_value
def get_min_bound(self) -> float:
if self.sp.distribution == "truncated_normal":
return self.sp.min_value
elif self.sp.distribution == "normal":
return max(
[
self.sp.end_mean - 2 * self.sp.end_std,
self.sp.start_mean - 2 * self.sp.end_std,
]
)
else:
return max([self.sp.end_min_value, self.sp.start_min_value])
def get_max_bound(self) -> float:
if self.sp.distribution == "truncated_normal":
return self.sp.max_value
elif self.sp.distribution == "normal":
return max(
[
self.sp.end_mean + 2 * self.sp.end_std,
self.sp.start_mean + 2 * self.sp.end_std,
]
)
else:
return max([self.sp.end_max_value, self.sp.start_max_value])
def sample(self, n: int, step: int, device: str = "cpu") -> torch.Tensor:
"""
Samples values from the curriculum distribution.
Args:
n (int): Number of samples to generate.
step (int): Current step.
device (str): Device to use for the tensor.
Returns:
torch.Tensor: Tensor with values from the curriculum distribution.
"""
# Get the difficulty for the current step
rate = self.get_rate(step)
# Sample values from the curriculum distribution
if self.sp.distribution == "truncated_normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(
n=n,
mean=mean,
std=std,
min_value=self.sp.min_value,
max_value=self.sp.max_value,
device=device,
)
elif self.sp.distribution == "normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(n=n, mean=mean, std=std, device=device)
else:
min = (
self.sp.start_min_value
+ (self.sp.end_min_value - self.sp.start_min_value) * rate
)
max = (
self.sp.start_max_value
+ (self.sp.end_max_value - self.sp.start_max_value) * rate
)
return self.sp.function(n=n, min_value=min, max_value=max, device=device)
| 13,775 | Python | 29.276923 | 121 | 0.561742 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_to_xy.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToXYReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoToXYParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin import VisualPin
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToXYTask(Core):
"""
Implements the GoToXY task. The robot has to reach a target position.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToXY task.
Args:
task_param (dict): Dictionary containing the task parameters.
reward_param (dict): Dictionary containing the reward parameters.
num_envs (int): Number of environments.
device (str): Device to run the task on.
"""
super(GoToXYTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoToXYParameters(**task_param)
self._reward_parameters = GoToXYReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 0
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): Dictionary containing the statistics.
Returns:
dict: Dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "position_reward" in stats.keys():
stats["position_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._position_error = self._target_positions - current_state["position"]
self._task_data[:, :2] = self._position_error
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
self.position_reward = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist
)
return self.position_reward - self.boundary_penalty
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["position_reward"] += self.position_reward
stats["position_error"] += self.position_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
self._target_positions[env_ids] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, :2] += self._target_positions[env_ids]
p[:, 2] = 2
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates spawning positions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Ranomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = r * torch.cos(theta)
initial_position[:, 1] = r * torch.sin(theta)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
A pin is generated to represent the 2D position to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the pin.
"""
color = torch.tensor([1, 0, 0])
ball_radius = 0.2
poll_radius = 0.025
poll_length = 2
VisualPin(
prim_path=path + "/pin",
translation=position,
name="target_0",
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/pin")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Spawn position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/spawn_position"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_linear_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
return {}
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 14,811 | Python | 32.285393 | 93 | 0.587131 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_track_xy_velocity_heading.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
TrackXYVelocityHeadingReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
TrackXYVelocityHeadingParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow import VisualArrow
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYVelocityHeadingTask(Core):
"""
Implements the TrackXYVelHeading task. The robot has to reach a target velocity and a target heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the TrackXYVelHeading task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(TrackXYVelocityHeadingTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = TrackXYVelocityHeadingParameters(**task_param)
self._reward_parameters = TrackXYVelocityHeadingReward(**reward_param)
# Curriculum samplers
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_velocities = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 4
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "velocity_reward" in stats.keys():
stats["velocity_reward"] = torch_zeros()
if not "velocity_error" in stats.keys():
stats["velocity_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
self.log_with_wandb = []
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# velocity distance
self._velocity_error = (
self._target_velocities - current_state["linear_velocity"]
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._velocity_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
# Position
self._position_error = current_state["position"]
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# velocity error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.velocity_dist = torch.sqrt(torch.square(self._velocity_error).sum(-1))
self.heading_dist = torch.abs(self._heading_error)
# Checks if the goal is reached
velocity_goal_is_reached = (
self.velocity_dist < self._task_parameters.velocity_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
).int()
goal_is_reached = velocity_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.velocity_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.velocity_dist, self.heading_dist
)
return self.velocity_reward + self.heading_reward
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["velocity_reward"] += self.velocity_reward
stats["heading_reward"] += self.heading_reward
stats["velocity_error"] += self.velocity_dist
stats["heading_error"] += self.heading_dist
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
self._target_velocities[env_ids, 0] = r * torch.cos(theta)
self._target_velocities[env_ids, 1] = r * torch.sin(theta)
# Randomize heading
self._target_headings[env_ids] = (
torch.rand(num_goals, device=self._device) * math.pi * 2
)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, 2] = 2
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
q[:, 0] = torch.cos(self._target_headings[env_ids] * 0.5)
q[:, 3] = torch.sin(self._target_headings[env_ids] * 0.5)
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = (
self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
+ self._target_headings[env_ids]
)
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
pass
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
return scene, None
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = {}
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 16,352 | Python | 32.579055 | 105 | 0.588246 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_task_factory.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_xy import GoToXYTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xy_velocity import (
TrackXYVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xyo_velocity import (
TrackXYOVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xy_velocity_heading import (
TrackXYVelocityHeadingTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_close_proximity_dock import (
CloseProximityDockTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_xy import GoThroughXYTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_xy_seq import GoThroughXYSequenceTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_pose import GoThroughPoseTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_pose_seq import (
GoThroughPoseSequenceTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_gate import GoThroughGateTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_gate_seq import (
GoThroughGateSequenceTask,
)
class TaskFactory:
"""
Factory class to create tasks."""
def __init__(self):
self.creators = {}
def register(self, name: str, task):
"""
Registers a new task."""
self.creators[name] = task
def get(
self, task_dict: dict, reward_dict: dict, num_envs: int, device: str
) -> object:
"""
Returns a task."""
assert (
task_dict["name"] == reward_dict["name"]
), "The mode of both the task and the reward must match."
mode = task_dict["name"]
assert task_dict["name"] in self.creators.keys(), "Unknown task mode."
return self.creators[mode](task_dict, reward_dict, num_envs, device)
task_factory = TaskFactory()
task_factory.register("GoToXY", GoToXYTask)
task_factory.register("GoToPose", GoToPoseTask)
task_factory.register("TrackXYVelocity", TrackXYVelocityTask)
task_factory.register("TrackXYOVelocity", TrackXYOVelocityTask)
task_factory.register("TrackXYVelocityHeading", TrackXYVelocityHeadingTask)
task_factory.register("CloseProximityDock", CloseProximityDockTask)
task_factory.register("GoThroughXY", GoThroughXYTask)
task_factory.register("GoThroughXYSequence", GoThroughXYSequenceTask)
task_factory.register("GoThroughPose", GoThroughPoseTask)
task_factory.register("GoThroughPoseSequence", GoThroughPoseSequenceTask)
task_factory.register("GoThroughGate", GoThroughGateTask)
task_factory.register("GoThroughGateSequence", GoThroughGateSequenceTask)
| 2,842 | Python | 35.922077 | 86 | 0.743842 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_track_xy_velocity.py | __author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
TrackXYVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
TrackXYVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYVelocityTask(Core):
"""
Implements the TrackXYVelocity task. The robot has to reach a target linear velocity.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
):
"""
Initializes the task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(TrackXYVelocityTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = TrackXYVelocityParameters(**task_param)
self._reward_parameters = TrackXYVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_velocities = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 2
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "velocity_reward" in stats.keys():
stats["velocity_reward"] = torch_zeros()
if not "velocity_error" in stats.keys():
stats["velocity_error"] = torch_zeros()
self.log_with_wandb = []
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._velocity_error = (
self._target_velocities - current_state["linear_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :2] = self._velocity_error
return self.update_observation_tensor(current_state)
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.velocity_dist = torch.sqrt(torch.square(self._velocity_error).sum(-1))
# Checks if the goal is reached
goal_is_reached = (
self.velocity_dist < self._task_parameters.lin_vel_tolerance
).int()
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
self.velocity_reward = self._reward_parameters.compute_reward(
current_state, actions, self.velocity_dist
)
return self.velocity_reward
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["velocity_reward"] += self.velocity_reward
stats["velocity_error"] += self.velocity_dist
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
self._target_velocities[env_ids, 0] = r * torch.cos(theta)
self._target_velocities[env_ids, 1] = r * torch.sin(theta)
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, 2] = 2
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
return p, q
def get_initial_conditions(
self, env_ids: torch.Tensor, step: int = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the target.
"""
pass
def add_visual_marker_to_scene(self, scene: Usd.Stage) -> Tuple[Usd.Stage, None]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, None]: The scene and the visual marker.
"""
return scene, None
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The dictionary containing the spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The dictionary containing the target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = {}
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 13,384 | Python | 31.646341 | 93 | 0.591079 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/test_curriculum_helpers.py | import numpy as np
import unittest
import torch
import math
import omniisaacgymenvs.tasks.MFP.curriculum_helpers as ch
sigmoid_dict_0 = {"function": "sigmoid", "start": 0, "end": 1000, "extent": 1.5}
sigmoid_dict_1 = {"function": "sigmoid", "start": 100, "end": 1200, "extent": 3.0}
sigmoid_dict_2 = {"function": "sigmoid", "start": 200, "end": 1400, "extent": 4.5}
sigmoid_dict_3 = {"function": "sigmoid", "start": 400, "end": 1800, "extent": 6.0}
none_dict_0 = {"function": "none", "start": 0, "end": 1000}
none_dict_1 = {"function": "none", "start": 100, "end": 1200}
none_dict_2 = {"function": "none", "start": 200, "end": 1400}
none_dict_3 = {"function": "none", "start": 400, "end": 1800}
lin_dict_0 = {"function": "linear", "start": 0, "end": 1000}
lin_dict_1 = {"function": "linear", "start": 100, "end": 1200}
lin_dict_2 = {"function": "linear", "start": 200, "end": 1400}
lin_dict_3 = {"function": "linear", "start": 400, "end": 1800}
pow_dict_0 = {"function": "pow", "start": 0, "end": 1000, "alpha": 0.5}
pow_dict_1 = {"function": "pow", "start": 100, "end": 1200, "alpha": 0.75}
pow_dict_2 = {"function": "pow", "start": 200, "end": 1400, "alpha": 1.5}
pow_dict_3 = {"function": "pow", "start": 400, "end": 1800, "alpha": 3.0}
rate_list = [
sigmoid_dict_0,
sigmoid_dict_1,
sigmoid_dict_2,
sigmoid_dict_3,
none_dict_0,
none_dict_1,
none_dict_2,
none_dict_3,
lin_dict_0,
lin_dict_1,
lin_dict_2,
lin_dict_3,
pow_dict_0,
pow_dict_1,
pow_dict_2,
pow_dict_3,
]
trunc_norm_dict_0 = {
"distribution": "truncated_normal",
"start_mean": -0.5,
"start_std": 0.5,
"end_mean": 5.0,
"end_std": 0.5,
"min_value": -0.5,
"max_value": 0.5,
}
trunc_norm_dict_1 = {
"distribution": "truncated_normal",
"start_mean": 0.0,
"start_std": 0.01,
"end_mean": 4.0,
"end_std": 0.01,
"min_value": 0.25,
"max_value": 6.0,
}
trunc_norm_dict_2 = {
"distribution": "truncated_normal",
"start_mean": 0.25,
"start_std": 0.5,
"end_mean": 3.0,
"end_std": 2.0,
"min_value": 0.25,
"max_value": 3.0,
}
trunc_norm_dict_3 = {
"distribution": "truncated_normal",
"start_mean": 0.5,
"start_std": 0.5,
"end_mean": 2.0,
"end_std": 1.0,
"min_value": 0.25,
"max_value": 4.0,
}
norm_dict_0 = {
"distribution": "normal",
"start_mean": -0.5,
"start_std": 0.5,
"end_mean": 5.0,
"end_std": 0.5,
}
norm_dict_1 = {
"distribution": "normal",
"start_mean": 0.0,
"start_std": 0.01,
"end_mean": 4.0,
"end_std": 0.01,
}
norm_dict_2 = {
"distribution": "normal",
"start_mean": 0.25,
"start_std": 0.5,
"end_mean": 3.0,
"end_std": 2.0,
}
norm_dict_3 = {
"distribution": "normal",
"start_mean": 0.5,
"start_std": 0.5,
"end_mean": 2.0,
"end_std": 1.0,
}
uniform_dict_0 = {
"distribution": "uniform",
"start_min_value": -0.5,
"start_max_value": 0.5,
"end_min_value": 5.0,
"end_max_value": 5.0,
}
uniform_dict_1 = {
"distribution": "uniform",
"start_min_value": 0.0,
"start_max_value": 0.0,
"end_min_value": 1.0,
"end_max_value": 4.0,
}
uniform_dict_2 = {
"distribution": "uniform",
"start_min_value": 0.2,
"start_max_value": 0.3,
"end_min_value": 2.0,
"end_max_value": 3.0,
}
uniform_dict_3 = {
"distribution": "uniform",
"start_min_value": 0.5,
"start_max_value": 0.5,
"end_min_value": -2.0,
"end_max_value": 2.0,
}
dist_list = [
trunc_norm_dict_0,
trunc_norm_dict_1,
trunc_norm_dict_2,
trunc_norm_dict_3,
norm_dict_0,
norm_dict_1,
norm_dict_2,
norm_dict_3,
uniform_dict_0,
uniform_dict_1,
uniform_dict_2,
uniform_dict_3,
]
class TestCurriculumLoaders(unittest.TestCase):
def test_loading_all_rate_loaders(self):
success = False
try:
for rate in rate_list:
ch.CurriculumRateParameters(**rate)
success = True
except:
pass
self.assertTrue(success)
def test_all_sampler_loaders(self):
success = False
try:
for dist in dist_list:
ch.CurriculumSamplingParameters(**dist)
success = True
except:
pass
self.assertTrue(success)
def test_sigmoid_rate_loader(self):
rate = ch.CurriculumRateParameters(**sigmoid_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["sigmoid"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
self.assertEqual(rate.extent, 1.5)
def test_none_rate_loader(self):
rate = ch.CurriculumRateParameters(**none_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["none"])
def test_linear_rate_loader(self):
rate = ch.CurriculumRateParameters(**lin_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["linear"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
def test_pow_rate_loader(self):
rate = ch.CurriculumRateParameters(**pow_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["pow"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
self.assertEqual(rate.alpha, 0.5)
def test_error_handling_rate_loader(self):
success = 1
try:
rate = ch.CurriculumRateParameters(
**{"function": "none", "start": 0, "end": -1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "none", "start": -100, "end": 1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "sigmoid", "start": 100, "end": 1000, "extent": -1}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "sigmoid", "start": 100, "end": 1000, "extent": 0}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "linear", "start": 100, "end": -1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "linear", "start": -1000, "end": -100}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "pow", "start": 100, "end": 1000, "alpha": -1}
)
success *= 0
except:
pass
self.assertTrue(success == 1)
def test_load_empty_rate_loader(self):
success = False
try:
rate = ch.CurriculumRateParameters(**{})
success = True
except:
pass
self.assertTrue(success)
def test_load_empty_sampler_loader(self):
success = False
try:
dist = ch.CurriculumSamplingParameters(**{})
success = True
except:
pass
self.assertTrue(success)
def test_load_trunc_norm_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**trunc_norm_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["truncated_normal"])
self.assertEqual(dist.start_mean, -0.5)
self.assertEqual(dist.start_std, 0.5)
self.assertEqual(dist.end_mean, 5.0)
self.assertEqual(dist.end_std, 0.5)
self.assertEqual(dist.min_value, -0.5)
self.assertEqual(dist.max_value, 0.5)
def test_load_norm_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**norm_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["normal"])
self.assertEqual(dist.start_mean, -0.5)
self.assertEqual(dist.start_std, 0.5)
self.assertEqual(dist.end_mean, 5.0)
self.assertEqual(dist.end_std, 0.5)
def test_load_uniform_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**uniform_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["uniform"])
self.assertEqual(dist.start_min_value, -0.5)
self.assertEqual(dist.start_max_value, 0.5)
self.assertEqual(dist.end_min_value, 5.0)
self.assertEqual(dist.end_max_value, 5.0)
if __name__ == "__main__":
unittest.main()
| 8,633 | Python | 27.401316 | 82 | 0.553226 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/run_tasks_tests.py | from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp()
import unittest
testmodules = [
"omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_curriculum_helpers",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_disturbances",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_parameters",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_rewards",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_core",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_go_to_xy",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_go_to_pose",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_track_xy_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_track_xyo_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_disturbances",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_parameters",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_rewards",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_core",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_go_to_xyz",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_go_to_pose",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_track_xyz_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_track_6d_vel",
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ["suite"])
suitefn = getattr(mod, "suite")
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner(verbosity=2).run(suite)
simulation_app.close()
| 2,172 | Python | 50.738094 | 93 | 0.752762 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/test_MFP2D_go_to_pose.py | from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoToPoseParameters,
)
import numpy as np
import unittest
import torch
import math
# =============================================================================
# Default parameters
# =============================================================================
default_params = GoToPoseParameters(
position_tolerance=0.01,
heading_tolerance=0.025,
kill_after_n_steps_in_tolerance=5,
goal_random_position=0.0,
max_spawn_dist=6.0,
min_spawn_dist=3.0,
kill_dist=8.0,
spawn_curriculum=False,
spawn_curriculum_min_dist=0.5,
spawn_curriculum_max_dist=2.5,
spawn_curriculum_mode="linear",
spawn_curriculum_warmup=250,
spawn_curriculum_end=750,
)
default_rewards = GoToPoseReward(
position_reward_mode="linear",
heading_reward_mode="linear",
position_exponential_reward_coeff=0.25,
heading_exponential_reward_coeff=0.25,
position_scale=1.0,
heading_scale=1.0,
)
default_num_envs = 4
default_device = "cuda:0"
# =============================================================================
# create_stats & update_statistics
# =============================================================================
class TestCreateStats(unittest.TestCase):
def setUp(self) -> None:
torch_zeros = lambda: torch.zeros(
default_num_envs,
dtype=torch.float,
device=default_device,
requires_grad=False,
)
self.stats = {
"position_reward": torch_zeros(),
"heading_reward": torch_zeros(),
"position_error": torch_zeros(),
"heading_error": torch_zeros(),
}
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
self.position = 1.0
self.heading = 1.0
self.position_error = 1.0
self.heading_error = 1.0
self.new_stats = {
"position_reward": torch_zeros() + self.position,
"heading_reward": torch_zeros() + self.heading,
"position_error": torch_zeros() + self.position_error,
"heading_error": torch_zeros() + self.heading_error,
}
def test_create_stats(self):
stats = self.obj.create_stats({})
self.assertEqual(stats.keys(), self.stats.keys())
def test_update_statistics(self):
stats = self.obj.create_stats({})
self.obj.position_reward = self.stats["position_reward"]
self.obj.heading_reward = self.stats["heading_reward"]
self.obj.position_dist = self.stats["position_error"]
self.obj.heading_dist = self.stats["heading_error"]
stats = self.obj.update_statistics(self.new_stats)
self.assertTrue(
torch.all(stats["position_reward"] == self.new_stats["position_reward"])
)
self.assertTrue(
torch.all(stats["heading_reward"] == self.new_stats["heading_reward"])
)
self.assertTrue(
torch.all(stats["position_error"] == self.new_stats["position_error"])
)
self.assertTrue(
torch.all(stats["heading_error"] == self.new_stats["heading_error"])
)
# =============================================================================
# get_state_observations
# =============================================================================
class TestGetStateObservation(unittest.TestCase):
def setUp(self):
# Current state of the robots
self.positions = torch.tensor(
[[0, 0], [1, 1], [2, 2], [-1, -1]], dtype=torch.float, device=default_device
)
self.headings = torch.tensor(
[[0], [np.pi / 2], [np.pi], [-np.pi / 2]],
dtype=torch.float,
device=default_device,
)
self.orientations = torch.tensor(
[
[torch.cos(self.headings[0]), torch.sin(self.headings[0])],
[torch.cos(self.headings[1]), torch.sin(self.headings[1])],
[torch.cos(self.headings[2]), torch.sin(self.headings[2])],
[torch.cos(self.headings[3]), torch.sin(self.headings[3])],
],
dtype=torch.float,
device=default_device,
)
# Targets state of the robots
self.target_headings = torch.tensor(
[np.pi * 2, np.pi, np.pi / 2, np.pi / 4],
dtype=torch.float,
device=default_device,
)
self.target_positions = torch.tensor(
[[0, 0], [-1, -1], [-2, 2], [-1, -1]],
dtype=torch.float,
device=default_device,
)
# Expected state observations
self.expected_position = torch.tensor(
[[0, 0], [-2, -2], [-4, 0], [0, 0]],
dtype=torch.float,
device=default_device,
)
self.expected_heading = torch.tensor(
[0, np.pi / 2, -np.pi / 2, np.pi * 3 / 4],
dtype=torch.float,
device=default_device,
)
# Recreate the state dict sent to the task
self.current_state = {
"position": torch.tensor(
self.positions, dtype=torch.float, device=default_device
),
"orientation": torch.tensor(
self.orientations, dtype=torch.float, device=default_device
),
"linear_velocity": torch.zeros(
(default_num_envs, 2), dtype=torch.float, device=default_device
),
"angular_velocity": torch.zeros(
(default_num_envs), dtype=torch.float, device=default_device
),
}
# Generate the task
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
# Overriding the target positions and headings
self.obj._target_headings = self.target_headings
self.obj._target_positions = self.target_positions
def test_get_state_position(self):
# Generate the state observation to be passed to the agent
state_observation = self.obj.get_state_observations(self.current_state)
# Position error in the world frame
gen_position = state_observation[:, 6:8]
self.assertTrue(torch.allclose(gen_position, self.expected_position))
def test_get_state_orientation(self):
# Generate the state observation to be passed to the agent
state_observation = self.obj.get_state_observations(self.current_state)
# Heading error in the world frame (cos(theta), sin(theta))
gen_heading = torch.arctan2(state_observation[:, 9], state_observation[:, 8])
self.assertTrue(
torch.allclose(gen_heading, self.expected_heading, rtol=1e-3, atol=1e-4)
)
# =============================================================================
# compute_reward & update_kills
# =============================================================================
class TestComputeReward(unittest.TestCase):
def setUp(self):
# Current state of the robots
self.positions = torch.tensor(
[[0, 0], [1, 1], [2, 2], [-1, -1]], dtype=torch.float, device=default_device
)
self.headings = torch.tensor(
[[0], [np.pi / 2], [np.pi], [-np.pi / 2]],
dtype=torch.float,
device=default_device,
)
self.orientations = torch.tensor(
[
[torch.cos(self.headings[0]), torch.sin(self.headings[0])],
[torch.cos(self.headings[1]), torch.sin(self.headings[1])],
[torch.cos(self.headings[2]), torch.sin(self.headings[2])],
[torch.cos(self.headings[3]), torch.sin(self.headings[3])],
],
dtype=torch.float,
device=default_device,
)
# Targets state of the robots
self.target_headings = torch.tensor(
[0, np.pi, np.pi / 2, np.pi / 4],
dtype=torch.float,
device=default_device,
)
self.target_positions = torch.tensor(
[[0, 0], [-1, -1], [-2, 2], [-1, -1]],
dtype=torch.float,
device=default_device,
)
# Expected state observations
self.expected_position = torch.tensor(
[[0, 0], [-2, -2], [-4, 0], [0, 0]],
dtype=torch.float,
device=default_device,
)
self.expected_heading = torch.tensor(
[0, np.pi / 2, -np.pi / 2, np.pi * 3 / 4],
dtype=torch.float,
device=default_device,
)
# Recreate the state dict sent to the task
self.current_state = {
"position": torch.tensor(
self.positions, dtype=torch.float, device=default_device
),
"orientation": torch.tensor(
self.orientations, dtype=torch.float, device=default_device
),
"linear_velocity": torch.zeros(
(default_num_envs, 2), dtype=torch.float, device=default_device
),
"angular_velocity": torch.zeros(
(default_num_envs), dtype=torch.float, device=default_device
),
}
# Generate the task
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
# Overriding the target positions and headings
self.obj._target_headings = self.target_headings
self.obj._target_positions = self.target_positions
def test_get_compute_reward_goal_logic_1(self):
# Will run 3 steps to check if the condition for goal reached is working
# Tests shifts in position
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 1)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 2)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
self.current_state["position"][0, 0] = 2 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 0)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
def test_get_compute_reward_goal_logic_2(self):
# Will run 3 steps to check if the condition for goal reached is working
# Tests shifts in heading
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 1)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 2)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
self.current_state["orientation"][0, 0] = np.cos(
np.pi / 2
) # moving away from the goal.
self.current_state["orientation"][0, 1] = np.sin(
np.pi / 2
) # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 0)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
def test_get_compute_reward_position_dist_is_ok(self):
# Checks if the position distance is being computed correctly
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
expected_dist = torch.sqrt(torch.square(self.expected_position).sum(-1))
self.assertTrue(
torch.allclose(self.obj.position_dist, expected_dist, rtol=1e-3, atol=1e-4)
)
def test_get_compute_reward_heading_dist_is_ok(self):
# Checks if the heading distance is being computed correctly
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
expected_dist = torch.abs(self.expected_heading)
self.assertTrue(
torch.allclose(self.obj.heading_dist, expected_dist, rtol=1e-3, atol=1e-4)
)
def test_update_kills_1(self):
# Check if the kill condition is being updated correctly
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die1 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die2 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die3 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die4 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die5 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die6 = self.obj.update_kills()
self.assertTrue(
torch.all(die1 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die2 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die3 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die4 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die5 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die6 == torch.tensor([1, 0, 0, 0], device=default_device))
)
def test_update_kills_2(self):
# Check if the kill condition is being updated correctly
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die1 = self.obj.update_kills()
self.current_state["position"][0, 0] = 20 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die2 = self.obj.update_kills()
self.current_state["position"][0, 0] = 0 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die3 = self.obj.update_kills()
self.assertTrue(
torch.all(die1 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die2 == torch.tensor([1, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die3 == torch.tensor([0, 0, 0, 0], device=default_device))
)
class TestGetGoals(unittest.TestCase):
def setUp(self):
self.num_envs = 1000
self.obj = GoToPoseTask({}, {}, self.num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._target_positions = torch.zeros(
(self.num_envs, 2), device=default_device
)
self.obj._target_headings = torch.zeros(self.num_envs, device=default_device)
self.target_positions = torch.zeros((self.num_envs, 2), device=default_device)
self.target_orientations = torch.zeros(
(self.num_envs, 4), device=default_device
)
def test_get_goals(self):
env_ids = torch.range(
0, self.num_envs - 1, 1, device=default_device, dtype=torch.int64
)
target_positions, target_orientations = self.obj.get_goals(
env_ids, self.target_positions, self.target_orientations
)
# Check if target positions and orientations are updated correctly
self.assertTrue(torch.all(target_positions[env_ids, :2] != 0))
self.assertTrue(torch.all(target_orientations[env_ids, 0] != 1))
self.assertTrue(torch.all(target_orientations[env_ids, 3] != 0))
# Check if target positions and orientations are within the specified range
self.assertTrue(
torch.all(
torch.abs(target_positions[env_ids, :2])
<= self.obj._task_parameters.goal_random_position
)
)
self.assertTrue(
torch.all(
(torch.abs(target_orientations[env_ids, 0]) <= 1)
* (torch.abs(target_orientations[env_ids, 3]) <= 1)
)
)
# Check if target headings are within the range of [0, 2*pi]
self.assertTrue(
torch.all(
(self.obj._target_headings[env_ids] >= 0)
* (self.obj._target_headings[env_ids] <= 2 * math.pi)
)
)
if __name__ == "__main__":
unittest.main()
| 19,306 | Python | 39.222917 | 88 | 0.576919 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/warp/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class HumanoidLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTaskWarp.set_up_scene(self, scene)
self._humanoids = ArticulationView(prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False)
scene.add(self._humanoids)
return
def get_humanoid(self):
humanoid = Humanoid(prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions)
self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path),
self._sim_config.parse_actor_config("Humanoid"))
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = wp.array(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
dtype=wp.float32
)
self.max_motor_effort = 135.0
self.motor_effort_ratio = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(compute_effort_ratio, dim=self._humanoids._num_dof,
inputs=[self.motor_effort_ratio, self.joint_gears, self.max_motor_effort], device=self._device)
dof_limits = self._humanoids.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._humanoids._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = wp.array([self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._humanoids._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale], device=self._device)
return self.dof_at_limit_cost
@wp.kernel
def compute_effort_ratio(motor_effort_ratio: wp.array(dtype=wp.float32),
joint_gears: wp.array(dtype=wp.float32),
max_motor_effort: float):
tid = wp.tid()
motor_effort_ratio[tid] = joint_gears[tid] / max_motor_effort
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1]
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32),
joints_at_limit_cost_scale: float):
i, j = wp.tid()
dof_i = j + 12
scaled_cost = joints_at_limit_cost_scale * (wp.abs(obs_buf[i, dof_i]) - 0.98) / 0.02
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.98:
cost = scaled_cost * motor_effort_ratio[j]
dof_at_limit_cost[i] = cost
| 6,707 | Python | 42.558441 | 143 | 0.640376 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/warp/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class AntLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = wp.array([0, 0, 0.5], dtype=wp.float32, device="cpu")
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTaskWarp.set_up_scene(self, scene)
self._ants = ArticulationView(prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False)
scene.add(self._ants)
return
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings("Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant"))
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = wp.array([15, 15, 15, 15, 15, 15, 15, 15], dtype=wp.float32, device=self._device)
dof_limits = self._ants.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._ants._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.motor_effort_ratio = wp.array([1, 1, 1, 1, 1, 1, 1, 1], dtype=wp.float32, device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = wp.array([self._ants._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._ants._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio], device=self._device)
return self.dof_at_limit_cost
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32)):
i, j = wp.tid()
dof_i = j + 12
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.99:
cost = 1.0
dof_at_limit_cost[i] = cost
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1] | 5,242 | Python | 44.991228 | 136 | 0.685616 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/warp/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class CartpoleTask(RLTaskWarp):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = wp.array([0.0, 0.0, 2.0], dtype=wp.float32)
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTaskWarp.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False)
scene.add(self._cartpoles)
return
def get_cartpole(self):
cartpole = Cartpole(prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole"))
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, dof_pos, dof_vel, self._cart_dof_idx, self._pole_dof_idx], device=self._device)
observations = {
self._cartpoles.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
forces = wp.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=wp.float32, device=self._device)
wp.launch(compute_forces, dim=self._num_envs,
inputs=[forces, actions_wp, self._cart_dof_idx, self._max_push_effort], device=self._device)
self._cartpoles.set_joint_efforts(forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_idx, num_resets,
inputs=[self.dof_pos, self.dof_vel, indices, self.reset_buf, self.progress_buf, self._cart_dof_idx, self._pole_dof_idx, self._rand_seed],
device=self._device)
# apply resets
self._cartpoles.set_joint_positions(self.dof_pos[indices], indices=indices)
self._cartpoles.set_joint_velocities(self.dof_vel[indices], indices=indices)
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
self.dof_pos = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
# randomize all envs
self.reset_idx()
def calculate_metrics(self) -> None:
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.obs_buf, self.rew_buf, self._reset_dist], device=self._device)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.reset_buf, self.progress_buf, self._reset_dist, self._max_episode_length],
device=self._device)
@wp.kernel
def reset_idx(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
indices: wp.array(dtype=wp.int32),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
cart_dof_idx: int,
pole_dof_idx: int,
rand_seed: int):
i = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i)
# randomize DOF positions
dof_pos[idx, cart_dof_idx] = 1.0 * (1.0 - 2.0 * wp.randf(rand_state))
dof_pos[idx, pole_dof_idx] = 0.125 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# randomize DOF velocities
dof_vel[idx, cart_dof_idx] = 0.5 * (1.0 - 2.0 * wp.randf(rand_state))
dof_vel[idx, pole_dof_idx] = 0.25 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# bookkeeping
progress_buf[idx] = 0
reset_buf[idx] = 0
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
max_push_effort: float):
i = wp.tid()
forces[i, cart_dof_idx] = max_push_effort * actions[i, 0]
@wp.kernel
def get_observations(obs_buf: wp.array(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
pole_dof_idx: int):
i = wp.tid()
obs_buf[i, 0] = dof_pos[i, cart_dof_idx]
obs_buf[i, 1] = dof_vel[i, cart_dof_idx]
obs_buf[i, 2] = dof_pos[i, pole_dof_idx]
obs_buf[i, 3] = dof_vel[i, pole_dof_idx]
@wp.kernel
def calculate_metrics(obs_buf: wp.array(dtype=wp.float32, ndim=2),
rew_buf: wp.array(dtype=wp.float32),
reset_dist: float):
i = wp.tid()
cart_pos = obs_buf[i, 0]
cart_vel = obs_buf[i, 1]
pole_angle = obs_buf[i, 2]
pole_vel = obs_buf[i, 3]
rew_buf[i] = 1.0 - pole_angle * pole_angle - 0.01 * wp.abs(cart_vel) - 0.005 * wp.abs(pole_vel)
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_angle) > warp_utils.PI / 2.0:
rew_buf[i] = -2.0
@wp.kernel
def is_done(obs_buf: wp.array(dtype=wp.float32, ndim=2),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
reset_dist: float,
max_episode_length: int):
i = wp.tid()
cart_pos = obs_buf[i, 0]
pole_pos = obs_buf[i, 2]
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_pos) > warp_utils.PI / 2.0 or progress_buf[i] > max_episode_length:
reset_buf[i] = 1
else:
reset_buf[i] = 0
| 8,665 | Python | 38.390909 | 154 | 0.635661 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/warp/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class LocomotionTask(RLTaskWarp):
def __init__(
self,
name,
env,
offset=None
) -> None:
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._num_sensors = 2
RLTaskWarp.__init__(self, name, env)
return
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces()
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, torso_position, torso_rotation, self._env_pos, velocities, dof_pos, dof_vel,
self.prev_potentials, self.potentials, self.dt, self.target,
self.basis_vec0, self.basis_vec1, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
sensor_force_torques, self.contact_force_scale, self.actions, self.angular_velocity_scale,
self._robots._num_dof, self._num_sensors, self._sensor_indices], device=self._device
)
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
self.actions = actions_wp
wp.launch(compute_forces, dim=(self._num_envs, self._robots._num_dof),
inputs=[self.forces, self.actions, self.joint_gears, self.power_scale], device=self._device)
# applies joint torques
self._robots.set_joint_efforts(self.forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_dofs, dim=(num_resets, self._robots._num_dof),
inputs=[self.dof_pos, self.dof_vel, self.initial_dof_pos, self.dof_limits_lower, self.dof_limits_upper, indices, self._rand_seed],
device=self._device)
wp.launch(reset_idx, dim=num_resets,
inputs=[self.root_pos, self.root_rot, self.initial_root_pos, self.initial_root_rot, self._env_pos,
self.target, self.prev_potentials, self.potentials, self.dt,
self.reset_buf, self.progress_buf, indices, self._rand_seed],
device=self._device)
# apply resets
self._robots.set_joint_positions(self.dof_pos[indices], indices=indices)
self._robots.set_joint_velocities(self.dof_vel[indices], indices=indices)
self._robots.set_world_poses(self.root_pos[indices], self.root_rot[indices], indices=indices)
self._robots.set_velocities(self.root_vel[indices], indices=indices)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.basis_vec0 = wp.vec3(1, 0, 0)
self.basis_vec1 = wp.vec3(0, 0, 1)
self.target = wp.vec3(1000, 0, 0)
self.dt = 1.0 / 60.0
# initialize potentials
self.potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
self.prev_potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
wp.launch(init_potentials, dim=self._num_envs,
inputs=[self.potentials, self.prev_potentials, self.dt], device=self._device)
self.actions = wp.zeros((self.num_envs, self.num_actions), device=self._device, dtype=wp.float32)
self.forces = wp.zeros((self._num_envs, self._robots._num_dof), dtype=wp.float32, device=self._device)
self.dof_pos = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.root_pos = wp.zeros((self.num_envs, 3), device=self._device, dtype=wp.float32)
self.root_rot = wp.zeros((self.num_envs, 4), device=self._device, dtype=wp.float32)
self.root_vel = wp.zeros((self.num_envs, 6), device=self._device, dtype=wp.float32)
# randomize all env
self.reset_idx()
def calculate_metrics(self) -> None:
dof_at_limit_cost = self.get_dof_at_limit_cost()
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.rew_buf, self.obs_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials,
self.actions_cost_scale, self.energy_cost_scale, self.termination_height,
self.death_cost, self._robots.num_dof, dof_at_limit_cost, self.alive_reward_scale, self.motor_effort_ratio],
device=self._device
)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length],
device=self._device
)
#####################################################################
###==========================warp kernels=========================###
#####################################################################
@wp.kernel
def init_potentials(potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
dt: float):
i = wp.tid()
potentials[i] = -1000.0 / dt
prev_potentials[i] = -1000.0 / dt
@wp.kernel
def reset_idx(root_pos: wp.array(dtype=wp.float32, ndim=2),
root_rot: wp.array(dtype=wp.float32, ndim=2),
initial_root_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
initial_root_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
target: wp.vec3,
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i = wp.tid()
idx = indices[i]
# reset root states
for j in range(3):
root_pos[idx, j] = initial_root_pos[idx, j]
for j in range(4):
root_rot[idx, j] = initial_root_rot[idx, j]
# reset potentials
to_target = target - wp.vec3(initial_root_pos[idx, 0] - env_pos[idx, 0], initial_root_pos[idx, 1] - env_pos[idx, 1], target[2])
prev_potentials[idx] = -wp.length(to_target) / dt
potentials[idx] = -wp.length(to_target) / dt
temp = potentials[idx] - prev_potentials[idx]
# bookkeeping
reset_buf[idx] = 0
progress_buf[idx] = 0
@wp.kernel
def reset_dofs(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
initial_dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i, j = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i * j + j)
# randomize DOF positions and velocities
dof_pos[idx, j] = wp.clamp(wp.randf(rand_state, -0.2, 0.2) + initial_dof_pos[idx, j], dof_limits_lower[j], dof_limits_upper[j])
dof_vel[idx, j] = wp.randf(rand_state, -0.1, 0.1)
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
joint_gears: wp.array(dtype=wp.float32),
power_scale: float):
i, j = wp.tid()
forces[i, j] = actions[i, j] * joint_gears[j] * power_scale
@wp.func
def get_euler_xyz(q: wp.quat):
qx = 0
qy = 1
qz = 2
qw = 3
# roll (x-axis rotation)
sinr_cosp = 2.0 * (q[qw] * q[qx] + q[qy] * q[qz])
cosr_cosp = q[qw] * q[qw] - q[qx] * q[qx] - q[qy] * q[qy] + q[qz] * q[qz]
roll = wp.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (q[qw] * q[qy] - q[qz] * q[qx])
if wp.abs(sinp) >= 1:
pitch = warp_utils.PI / 2.0 * (wp.abs(sinp)/sinp)
else:
pitch = wp.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2.0 * (q[qw] * q[qz] + q[qx] * q[qy])
cosy_cosp = q[qw] * q[qw] + q[qx] * q[qx] - q[qy] * q[qy] - q[qz] * q[qz]
yaw = wp.atan2(siny_cosp, cosy_cosp)
rpy = wp.vec3(roll % (2.0 * warp_utils.PI), pitch % (2.0 * warp_utils.PI), yaw % (2.0 * warp_utils.PI))
return rpy
@wp.func
def compute_up_vec(torso_rotation: wp.quat, vec1: wp.vec3):
up_vec = wp.quat_rotate(torso_rotation, vec1)
return up_vec
@wp.func
def compute_heading_vec(torso_rotation: wp.quat, vec0: wp.vec3):
heading_vec = wp.quat_rotate(torso_rotation, vec0)
return heading_vec
@wp.func
def unscale(x:float, lower:float, upper:float):
return (2.0 * x - upper - lower) / (upper - lower)
@wp.func
def normalize_angle(x: float):
return wp.atan2(wp.sin(x), wp.cos(x))
@wp.kernel
def get_observations(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
torso_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
torso_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
velocity: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
target: wp.vec3,
basis_vec0: wp.vec3,
basis_vec1: wp.vec3,
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_vel_scale: float,
sensor_force_torques: wp.indexedarray(dtype=wp.float32, ndim=3),
contact_force_scale: float,
actions: wp.array(dtype=wp.float32, ndim=2),
angular_velocity_scale: float,
num_dofs: int,
num_sensors: int,
sensor_indices: wp.array(dtype=wp.int32)
):
i = wp.tid()
torso_position_x = torso_pos[i, 0] - env_pos[i, 0]
torso_position_y = torso_pos[i, 1] - env_pos[i, 1]
torso_position_z = torso_pos[i, 2] - env_pos[i, 2]
to_target = target - wp.vec3(torso_position_x, torso_position_y, target[2])
prev_potentials[i] = potentials[i]
potentials[i] = -wp.length(to_target) / dt
temp = potentials[i] - prev_potentials[i]
torso_quat = wp.quat(torso_rot[i, 1], torso_rot[i, 2], torso_rot[i, 3], torso_rot[i, 0])
up_vec = compute_up_vec(torso_quat, basis_vec1)
up_proj = up_vec[2]
heading_vec = compute_heading_vec(torso_quat, basis_vec0)
target_dir = wp.normalize(to_target)
heading_proj = wp.dot(heading_vec, target_dir)
lin_velocity = wp.vec3(velocity[i, 0], velocity[i, 1], velocity[i, 2])
ang_velocity = wp.vec3(velocity[i, 3], velocity[i, 4], velocity[i, 5])
rpy = get_euler_xyz(torso_quat)
vel_loc = wp.quat_rotate_inv(torso_quat, lin_velocity)
angvel_loc = wp.quat_rotate_inv(torso_quat, ang_velocity)
walk_target_angle = wp.atan2(target[2] - torso_position_z, target[0] - torso_position_x)
angle_to_target = walk_target_angle - rpy[2] # yaw
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs_offset = 0
obs_buf[i, 0] = torso_position_z
obs_offset = obs_offset + 1
for j in range(3):
obs_buf[i, j+obs_offset] = vel_loc[j]
obs_offset = obs_offset + 3
for j in range(3):
obs_buf[i, j+obs_offset] = angvel_loc[j] * angular_velocity_scale
obs_offset = obs_offset + 3
obs_buf[i, obs_offset+0] = normalize_angle(rpy[2])
obs_buf[i, obs_offset+1] = normalize_angle(rpy[0])
obs_buf[i, obs_offset+2] = normalize_angle(angle_to_target)
obs_buf[i, obs_offset+3] = up_proj
obs_buf[i, obs_offset+4] = heading_proj
obs_offset = obs_offset + 5
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = unscale(dof_pos[i, j], dof_limits_lower[j], dof_limits_upper[j])
obs_offset = obs_offset + num_dofs
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = dof_vel[i, j] * dof_vel_scale
obs_offset = obs_offset + num_dofs
for j in range(num_sensors):
sensor_idx = sensor_indices[j]
for k in range(6):
obs_buf[i, obs_offset+j*6+k] = sensor_force_torques[i, sensor_idx, k] * contact_force_scale
obs_offset = obs_offset + (num_sensors * 6)
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = actions[i, j]
@wp.kernel
def is_done(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
termination_height: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
max_episode_length: int
):
i = wp.tid()
if obs_buf[i, 0] < termination_height or progress_buf[i] >= max_episode_length - 1:
reset_buf[i] = 1
else:
reset_buf[i] = 0
@wp.kernel
def calculate_metrics(
rew_buf: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
up_weight: float,
heading_weight: float,
potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
actions_cost_scale: float,
energy_cost_scale: float,
termination_height: float,
death_cost: float,
num_dof: int,
dof_at_limit_cost: wp.array(dtype=wp.float32),
alive_reward_scale: float,
motor_effort_ratio: wp.array(dtype=wp.float32)
):
i = wp.tid()
# heading reward
if obs_buf[i, 11] > 0.8:
heading_reward = heading_weight
else:
heading_reward = heading_weight * obs_buf[i, 11] / 0.8
# aligning up axis of robot and environment
up_reward = 0.0
if obs_buf[i, 10] > 0.93:
up_reward = up_weight
# energy penalty for movement
actions_cost = float(0.0)
electricity_cost = float(0.0)
for j in range(num_dof):
actions_cost = actions_cost + (actions[i, j] * actions[i, j])
electricity_cost = electricity_cost + (wp.abs(actions[i, j] * obs_buf[i, 12+num_dof+j]) * motor_effort_ratio[j])
# reward for duration of staying alive
progress_reward = potentials[i] - prev_potentials[i]
total_reward = (
progress_reward
+ alive_reward_scale
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost[i]
)
# adjust reward for fallen agents
if obs_buf[i, 0] < termination_height:
total_reward = death_cost
rew_buf[i] = total_reward
| 18,233 | Python | 39.52 | 147 | 0.624198 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/base/rl_task.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from abc import abstractmethod
import numpy as np
import omni.isaac.core.utils.warp.tensor as wp_utils
import omni.kit
import omni.usd
import torch
import warp as wp
from gym import spaces
from omni.isaac.cloner import GridCloner
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer
from pxr import Gf, UsdGeom, UsdLux
class RLTask(RLTaskInterface):
"""This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
"""Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
BaseTask.__init__(self, name=name, offset=offset)
self._rand_seed = self._cfg["seed"]
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
# set up randomizer for DR
self._dr_randomizer = Randomizer(self._cfg, self._task_cfg)
if self._dr_randomizer.randomize:
import omni.replicator.isaac as dr
self.dr = dr
# set up replicator for camera data collection
self.enable_cameras = self._task_cfg["sim"].get("enable_cameras", False)
if self.enable_cameras:
from omni.replicator.isaac.scripts.writers.pytorch_writer import (
PytorchWriter,
)
from omni.replicator.isaac.scripts.writers.pytorch_listener import (
PytorchListener,
)
import omni.replicator.core as rep
self.rep = rep
self.PytorchWriter = PytorchWriter
self.PytorchListener = PytorchListener
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf)
self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
self.rendering_interval = self._task_cfg.get("renderingInterval", 1)
# parse default viewport camera position and lookat target and resolution (width, height)
self.camera_position = [10, 10, 3]
self.camera_target = [0, 0, 0]
self.viewport_camera_width = 1280
self.viewport_camera_height = 720
if "viewport" in self._task_cfg:
self.camera_position = self._task_cfg["viewport"].get(
"camera_position", self.camera_position
)
self.camera_target = self._task_cfg["viewport"].get(
"camera_target", self.camera_target
)
self.viewport_camera_width = self._task_cfg["viewport"].get(
"viewport_camera_width", self.viewport_camera_width
)
self.viewport_camera_height = self._task_cfg["viewport"].get(
"viewport_camera_height", self.viewport_camera_height
)
print("RL device: ", self.rl_device)
self._env = env
self.is_extension = False
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(
np.ones(self.num_actions, dtype=np.float32) * -1.0,
np.ones(self.num_actions, dtype=np.float32) * 1.0,
)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(
np.ones(self.num_observations, dtype=np.float32) * -np.Inf,
np.ones(self.num_observations, dtype=np.float32) * np.Inf,
)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(
np.ones(self.num_states, dtype=np.float32) * -np.Inf,
np.ones(self.num_states, dtype=np.float32) * np.Inf,
)
self.cleanup()
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros(
(self._num_envs, self.num_observations),
device=self._device,
dtype=torch.float,
)
self.states_buf = torch.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
def set_up_scene(
self,
scene,
replicate_physics=True,
collision_filter_global_paths=[],
filter_collisions=True,
copy_from_source=False,
) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance.
collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked.
filter_collisions (bool): Mask off collision between environments.
copy_from_source (bool): Copy from source prim when cloning instead of inheriting.
"""
super().set_up_scene(scene)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
stage = omni.usd.get_context().get_stage()
UsdGeom.Xform.Define(stage, self.default_zero_env_path)
if self._task_cfg["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths,
replicate_physics=replicate_physics,
copy_from_source=copy_from_source,
)
self._env_pos = torch.tensor(
np.array(self._env_pos), device=self._device, dtype=torch.float
)
if filter_collisions:
self._cloner.filter_collisions(
self._env.world.get_physics_context().prim_path,
"/World/collisions",
prim_paths,
collision_filter_global_paths,
)
if self._env.render_enabled:
self.set_initial_camera_params(
camera_position=self.camera_position, camera_target=self.camera_target
)
if self._task_cfg["sim"].get("add_distant_light", True):
self._create_distant_light(intensity=10000)
# initialize capturer for viewport recording
# this has to be called after initializing replicator for DR
if (
self._cfg.get("enable_recording", False)
and not self._dr_randomizer.randomize
):
self._env.create_viewport_render_product(
resolution=(self.viewport_camera_width, self.viewport_camera_height)
)
def set_initial_camera_params(self, camera_position, camera_target):
from omni.kit.viewport.utility import get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(
Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True
)
camera_state.set_target_world(
Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True
)
def _create_distant_light(
self, prim_path="/World/defaultDistantLight", intensity=5000
):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.CreateIntensityAttr().Set(intensity)
def initialize_views(self, scene):
"""Optionally implemented by individual task classes to initialize views used in the task.
This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage.
Args:
scene (Scene): Scene to remove existing views and initialize/add new views.
"""
self._cloner = GridCloner(spacing=self._env_spacing)
pos, _ = self._cloner.get_clone_transforms(self._num_envs)
self._env_pos = torch.tensor(
np.array(pos), device=self._device, dtype=torch.float
)
if self._env.render_enabled:
# initialize capturer for viewport recording
if (
self._cfg.get("enable_recording", False)
and not self._dr_randomizer.randomize
):
self._env.create_viewport_render_product(
resolution=(self.viewport_camera_width, self.viewport_camera_height)
)
@property
def default_base_env_path(self):
"""Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
"""Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
def reset(self):
"""Flags all environments for reset."""
self.reset_buf = torch.ones_like(self.reset_buf)
def pre_physics_step(self, actions):
"""Optionally implemented by individual task classes to process actions.
Args:
actions (torch.Tensor): Actions generated by RL policy.
"""
pass
def propagate_forces(self):
pass
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env.world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
@property
def world(self):
"""Retrieves the World object for simulation.
Returns:
world(World): Simulation World.
"""
return self._env.world
@property
def cfg(self):
"""Retrieves the main config.
Returns:
cfg(dict): Main config dictionary.
"""
return self._cfg
def set_is_extension(self, is_extension):
self.is_extension = is_extension
class RLTaskWarp(RLTask):
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = wp.zeros(
(self._num_envs, self.num_observations),
device=self._device,
dtype=wp.float32,
)
self.states_buf = wp.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=wp.float32
)
self.rew_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.float32)
self.reset_buf = wp_utils.ones(
self._num_envs, device=self._device, dtype=wp.int32
)
self.progress_buf = wp.zeros(
self._num_envs, device=self._device, dtype=wp.int32
)
self.zero_states_buf_torch = torch.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=torch.float32
)
self.extras = {}
def reset(self):
"""Flags all environments for reset."""
wp.launch(
reset_progress,
dim=self._num_envs,
inputs=[self.progress_buf],
device=self._device,
)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
wp.launch(
increment_progress,
dim=self._num_envs,
inputs=[self.progress_buf],
device=self._device,
)
if self._env.world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
obs_buf_torch = wp.to_torch(self.obs_buf)
rew_buf_torch = wp.to_torch(self.rew_buf)
reset_buf_torch = wp.to_torch(self.reset_buf)
return obs_buf_torch, rew_buf_torch, reset_buf_torch, self.extras
def get_states(self):
"""API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
if self.num_states > 0:
return wp.to_torch(self.states_buf)
else:
return self.zero_states_buf_torch
def set_up_scene(self, scene) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
"""
super().set_up_scene(scene)
self._env_pos = wp.from_torch(self._env_pos)
@wp.kernel
def increment_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = progress_buf[i] + 1
@wp.kernel
def reset_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = 1
| 17,406 | Python | 36.841304 | 118 | 0.618293 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: base class.
Inherits Gym's RLTask class and abstract base class. Inherited by environment classes. Not directly executed.
Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml.
"""
import carb
import hydra
import math
import numpy as np
import torch
from omni.isaac.core.objects import FixedCuboid
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.factory_franka import FactoryFranka
from pxr import PhysxSchema, UsdPhysics
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from omniisaacgymenvs.tasks.factory.factory_schema_config_base import (
FactorySchemaConfigBase,
)
class FactoryBase(RLTask, FactoryABCBase):
def __init__(self, name, sim_config, env) -> None:
"""Initialize instance variables. Initialize RLTask superclass."""
# Set instance variables from base YAML
self._get_base_yaml_params()
self._env_spacing = self.cfg_base.env.env_spacing
# Set instance variables from task and train YAMLs
self._sim_config = sim_config
self._cfg = sim_config.config # CL args, task config, and train config
self._task_cfg = sim_config.task_config # just task config
self._num_envs = sim_config.task_config["env"]["numEnvs"]
self._num_observations = sim_config.task_config["env"]["numObservations"]
self._num_actions = sim_config.task_config["env"]["numActions"]
super().__init__(name, env)
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase)
config_path = (
"task/FactoryBase.yaml" # relative to Gym's Hydra search path (cfg dir)
)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][
"tasks"
]["factory"][
"yaml"
] # strip superfluous nesting
def import_franka_assets(self, add_to_stage=True):
"""Set Franka and table asset options. Import assets."""
self._stage = get_current_stage()
if add_to_stage:
franka_translation = np.array([self.cfg_base.env.franka_depth, 0.0, 0.0])
franka_orientation = np.array([0.0, 0.0, 0.0, 1.0])
franka = FactoryFranka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
translation=franka_translation,
orientation=franka_orientation,
)
self._sim_config.apply_articulation_settings(
"franka",
get_prim_at_path(franka.prim_path),
self._sim_config.parse_actor_config("franka"),
)
for link_prim in franka.prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(
self._stage, link_prim.GetPrimPath()
)
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(False)
if self.cfg_base.sim.add_damping:
rb.GetLinearDampingAttr().Set(
1.0
) # default = 0.0; increased to improve stability
rb.GetMaxLinearVelocityAttr().Set(
1.0
) # default = 1000.0; reduced to prevent CUDA errors
rb.GetAngularDampingAttr().Set(
5.0
) # default = 0.5; increased to improve stability
rb.GetMaxAngularVelocityAttr().Set(
2 / math.pi * 180
) # default = 64.0; reduced to prevent CUDA errors
else:
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.5)
rb.GetMaxAngularVelocityAttr().Set(64 / math.pi * 180)
table_translation = np.array(
[0.0, 0.0, self.cfg_base.env.table_height * 0.5]
)
table_orientation = np.array([1.0, 0.0, 0.0, 0.0])
table = FixedCuboid(
prim_path=self.default_zero_env_path + "/table",
name="table",
translation=table_translation,
orientation=table_orientation,
scale=np.array(
[
self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width,
self.cfg_base.env.table_height,
]
),
size=1.0,
color=np.array([0, 0, 0]),
)
self.parse_controller_spec(add_to_stage=add_to_stage)
def acquire_base_tensors(self):
"""Acquire tensors."""
self.num_dofs = 9
self.env_pos = self._env_pos
self.dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_torque = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_dof_pos = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.ctrl_target_gripper_dof_pos = torch.zeros(
(self.num_envs, 2), device=self.device
)
self.ctrl_target_fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.prev_actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def refresh_base_tensors(self):
"""Refresh tensors."""
if not self.world.is_playing():
return
self.dof_pos = self.frankas.get_joint_positions(clone=False)
self.dof_vel = self.frankas.get_joint_velocities(clone=False)
# Jacobian shape: [4, 11, 6, 9] (root has no Jacobian)
self.franka_jacobian = self.frankas.get_jacobians()
self.franka_mass_matrix = self.frankas.get_mass_matrices(clone=False)
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_mass_matrix = self.franka_mass_matrix[
:, 0:7, 0:7
] # for Franka arm (not gripper)
self.hand_pos, self.hand_quat = self.frankas._hands.get_world_poses(clone=False)
self.hand_pos -= self.env_pos
hand_velocities = self.frankas._hands.get_velocities(clone=False)
self.hand_linvel = hand_velocities[:, 0:3]
self.hand_angvel = hand_velocities[:, 3:6]
(
self.left_finger_pos,
self.left_finger_quat,
) = self.frankas._lfingers.get_world_poses(clone=False)
self.left_finger_pos -= self.env_pos
left_finger_velocities = self.frankas._lfingers.get_velocities(clone=False)
self.left_finger_linvel = left_finger_velocities[:, 0:3]
self.left_finger_angvel = left_finger_velocities[:, 3:6]
self.left_finger_jacobian = self.franka_jacobian[:, 8, 0:6, 0:7]
left_finger_forces = self.frankas._lfingers.get_net_contact_forces(clone=False)
self.left_finger_force = left_finger_forces[:, 0:3]
(
self.right_finger_pos,
self.right_finger_quat,
) = self.frankas._rfingers.get_world_poses(clone=False)
self.right_finger_pos -= self.env_pos
right_finger_velocities = self.frankas._rfingers.get_velocities(clone=False)
self.right_finger_linvel = right_finger_velocities[:, 0:3]
self.right_finger_angvel = right_finger_velocities[:, 3:6]
self.right_finger_jacobian = self.franka_jacobian[:, 9, 0:6, 0:7]
right_finger_forces = self.frankas._rfingers.get_net_contact_forces(clone=False)
self.right_finger_force = right_finger_forces[:, 0:3]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
(
self.fingertip_centered_pos,
self.fingertip_centered_quat,
) = self.frankas._fingertip_centered.get_world_poses(clone=False)
self.fingertip_centered_pos -= self.env_pos
fingertip_centered_velocities = self.frankas._fingertip_centered.get_velocities(
clone=False
)
self.fingertip_centered_linvel = fingertip_centered_velocities[:, 0:3]
self.fingertip_centered_angvel = fingertip_centered_velocities[:, 3:6]
self.fingertip_centered_jacobian = self.franka_jacobian[:, 10, 0:6, 0:7]
self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) / 2
self.fingertip_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length,
device=self.device,
)
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
# TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf)
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(
self.fingertip_centered_angvel,
(self.fingertip_midpoint_pos - self.fingertip_centered_pos),
dim=1,
)
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (
self.left_finger_jacobian + self.right_finger_jacobian
) * 0.5
def parse_controller_spec(self, add_to_stage):
"""Parse controller specification into lower-level controller configuration."""
cfg_ctrl_keys = {
"num_envs",
"jacobian_type",
"gripper_prop_gains",
"gripper_deriv_gains",
"motor_ctrl_mode",
"gain_space",
"ik_method",
"joint_prop_gains",
"joint_deriv_gains",
"do_motion_ctrl",
"task_prop_gains",
"task_deriv_gains",
"do_inertial_comp",
"motion_ctrl_axes",
"do_force_ctrl",
"force_ctrl_method",
"wrench_prop_gains",
"force_ctrl_axes",
}
self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys}
self.cfg_ctrl["num_envs"] = self.num_envs
self.cfg_ctrl["jacobian_type"] = self.cfg_task.ctrl.all.jacobian_type
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
ctrl_type = self.cfg_task.ctrl.ctrl_type
if ctrl_type == "gym_default":
self.cfg_ctrl["motor_ctrl_mode"] = "gym"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.gym_default.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "joint_space_ik":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_ik.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
elif ctrl_type == "joint_space_id":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_id.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
elif ctrl_type == "task_space_impedance":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "operational_space_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "open_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "open"
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "closed_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "hybrid_force_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
if add_to_stage:
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "angular")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["joint_prop_gains"][0, i].item() * np.pi / 180
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["joint_deriv_gains"][0, i].item() * np.pi / 180
)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "linear")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "angular")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl["jacobian_type"] == "geometric":
self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian
elif self.cfg_ctrl["jacobian_type"] == "analytic":
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_midpoint_jacobian,
num_envs=self.num_envs,
device=self.device,
)
# Set PD joint pos target or joint torque
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
self._set_dof_pos_target()
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device,
)
self.frankas.set_joint_position_targets(positions=self.ctrl_target_dof_pos)
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
fingertip_midpoint_linvel=self.fingertip_midpoint_linvel,
fingertip_midpoint_angvel=self.fingertip_midpoint_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device,
)
self.frankas.set_joint_efforts(efforts=self.dof_torque)
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
gravity = [0.0, 0.0, -gravity_mag]
self.world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
def disable_gravity(self):
"""Disable gravity."""
gravity = [0.0, 0.0, 0.0]
self.world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
| 26,820 | Python | 45.88986 | 148 | 0.588479 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_config_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for task class configurations.
Used by Hydra. Defines template for task class YAML files. Not enforced.
"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Sim:
use_gpu_pipeline: bool # use GPU pipeline
dt: float # timestep size
gravity: list[float] # gravity vector
@dataclass
class Env:
numObservations: int # number of observations per env; camel case required by VecTask
numActions: int # number of actions per env; camel case required by VecTask
numEnvs: int # number of envs; camel case required by VecTask
@dataclass
class Randomize:
franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7)
@dataclass
class RL:
pos_action_scale: list[
float
] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m
rot_action_scale: list[
float
] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad
force_action_scale: list[
float
] # scale on force targets (3), to convert [-1, 1] to +- x N
torque_action_scale: list[
float
] # scale on torque targets (3), to convert [-1, 1] to +- x Nm
clamp_rot: bool # clamp small values of rotation actions to zero
clamp_rot_thresh: float # smallest acceptable value
max_episode_length: int # max number of timesteps in each episode
@dataclass
class All:
jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic}
gripper_prop_gains: list[
float
] # proportional gains on left and right Franka gripper finger DOF position (2)
gripper_deriv_gains: list[
float
] # derivative gains on left and right Franka gripper finger DOF position (2)
@dataclass
class GymDefault:
joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7)
joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7)
@dataclass
class JointSpaceIK:
ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd}
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class JointSpaceID:
ik_method: str
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class TaskSpaceImpedance:
motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6)
task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6)
task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6)
@dataclass
class OperationalSpaceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
@dataclass
class OpenLoopForce:
force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6)
@dataclass
class ClosedLoopForce:
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float] # proportional gains on Franka finger force (6)
@dataclass
class HybridForceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float]
@dataclass
class Ctrl:
ctrl_type: str # {gym_default,
# joint_space_ik,
# joint_space_id,
# task_space_impedance,
# operational_space_motion,
# open_loop_force,
# closed_loop_force,
# hybrid_force_motion}
gym_default: GymDefault
joint_space_ik: JointSpaceIK
joint_space_id: JointSpaceID
task_space_impedance: TaskSpaceImpedance
operational_space_motion: OperationalSpaceMotion
open_loop_force: OpenLoopForce
closed_loop_force: ClosedLoopForce
hybrid_force_motion: HybridForceMotion
@dataclass
class FactorySchemaConfigTask:
name: str
physics_engine: str
sim: Sim
env: Env
rl: RL
ctrl: Ctrl
| 5,517 | Python | 30.895954 | 130 | 0.719413 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt place task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPlace
"""
import asyncio
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
import omni.isaac.core.utils.torch as torch_utils
from omni.isaac.core.utils.torch.transformations import tf_combine
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPlacePPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Nut-bolt tensors
self.nut_base_pos_local = self.bolt_head_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths
self.bolt_tip_pos_local = bolt_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_nut = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut within gripper
self.nut_pos[env_ids, 0] = 0.0
self.nut_pos[env_ids, 1] = 0.0
fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset
nut_base_pos_local = self.bolt_head_heights.squeeze(-1)
self.nut_pos[env_ids, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local
nut_noise_pos_in_gripper = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag(
torch.tensor(
self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device
)
)
self.nut_pos[env_ids, :] += nut_noise_pos_in_gripper[env_ids]
nut_rot_euler = torch.tensor(
[0.0, 0.0, math.pi * 0.5], device=self.device
).repeat(len(env_ids), 1)
nut_noise_rot_in_gripper = 2 * (
torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5
) # [-1, 1]
nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper
nut_rot_euler[:, 2] += nut_noise_rot_in_gripper
nut_rot_quat = torch_utils.quat_from_euler_xyz(
nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2]
)
self.nut_quat[env_ids, :] = nut_rot_quat
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.bolt_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self.world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
# Compute pos of keypoints on gripper, nut, and bolt in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_nut[:, idx] = tf_combine(
self.nut_quat,
self.nut_pos,
self.identity_quat,
(keypoint_offset + self.nut_base_pos_local),
)[1]
self.keypoints_bolt[:, idx] = tf_combine(
self.bolt_quat,
self.bolt_pos,
self.identity_quat,
(keypoint_offset + self.bolt_tip_pos_local),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_pos,
self.nut_quat,
self.bolt_pos,
self.bolt_quat,
]
if self.cfg_task.rl.add_obs_bolt_tip_pos:
obs_tensors += [self.bolt_tip_pos_local]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is close enough to bolt
is_nut_close_to_bolt = self._check_nut_close_to_bolt()
self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(is_nut_close_to_bolt.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance between nut and bolt."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
SimulationContext.step(self.world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
SimulationContext.step(self.world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
SimulationContext.step(self.world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _close_gripper(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
SimulationContext.step(self.world, render=True)
async def _close_gripper_async(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_nut_close_to_bolt(self) -> torch.Tensor:
"""Check if nut is close to bolt."""
keypoint_dist = torch.norm(
self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1
)
is_nut_close_to_bolt = torch.where(
torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,
torch.ones_like(self.progress_buf),
torch.zeros_like(self.progress_buf),
)
return is_nut_close_to_bolt
| 28,968 | Python | 37.780455 | 131 | 0.594518 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_config_env.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for environment class configurations.
Used by Hydra. Defines template for environment class YAML files.
"""
from dataclasses import dataclass
@dataclass
class Sim:
disable_franka_collisions: bool # disable collisions between Franka and objects
@dataclass
class Env:
env_name: str # name of scene
@dataclass
class FactorySchemaConfigEnv:
sim: Sim
env: Env
| 1,960 | Python | 36.711538 | 84 | 0.776531 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_class_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for task classes.
Inherits ABC class. Inherited by task classes. Defines template for task classes.
"""
from abc import ABC, abstractmethod
class FactoryABCTask(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize environment superclass."""
pass
@abstractmethod
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
@abstractmethod
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
@abstractmethod
def pre_physics_step(self):
"""Reset environments. Apply actions from policy as controller targets. Simulation step called after this method."""
pass
@abstractmethod
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
pass
@abstractmethod
def get_observations(self):
"""Compute observations."""
pass
@abstractmethod
def calculate_metrics(self):
"""Detect successes and failures. Update reward and reset buffers."""
pass
@abstractmethod
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
@abstractmethod
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
@abstractmethod
def reset_idx(self):
"""Reset specified environments."""
pass
@abstractmethod
def _reset_franka(self):
"""Reset DOF states and DOF targets of Franka."""
pass
@abstractmethod
def _reset_object(self):
"""Reset root state of object."""
pass
@abstractmethod
def _reset_buffers(self):
"""Reset buffers."""
pass
| 3,492 | Python | 31.342592 | 124 | 0.69559 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_class_env.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for environment classes.
Inherits ABC class. Inherited by environment classes. Defines template for environment classes.
"""
from abc import ABC, abstractmethod
class FactoryABCEnv(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize base superclass. Acquire tensors."""
pass
@abstractmethod
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def set_up_scene(self):
"""Set env options. Import assets. Create actors."""
pass
@abstractmethod
def _import_env_assets(self):
"""Set asset options. Import assets."""
pass
@abstractmethod
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
| 2,489 | Python | 37.906249 | 95 | 0.73644 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt screw task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltScrew
"""
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltScrewPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
self.reset_idx(indices)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
target_heights = (
self.cfg_base.env.table_height
+ self.bolt_head_heights
+ self.nut_heights * 0.5
)
self.target_pos = target_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def reset_idx(self, env_ids) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max[env_ids] * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max[env_ids] * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root state of nut."""
nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids]
self.nut_pos[env_ids, :] = nut_pos * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat(len(env_ids), 1)
nut_rot = (
self.cfg_task.randomize.nut_rot_initial
* torch.ones((len(env_ids), 1), device=self.device)
* math.pi
/ 180.0
)
self.nut_quat[env_ids, :] = torch.cat(
(
torch.cos(nut_rot * 0.5),
torch.zeros((len(env_ids), 1), device=self.device),
torch.zeros((len(env_ids), 1), device=self.device),
torch.sin(nut_rot * 0.5),
),
dim=-1,
)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if self.cfg_task.rl.unidirectional_pos:
pos_actions[:, 2] = -(pos_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if self.cfg_task.rl.unidirectional_rot:
rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if self.cfg_task.rl.unidirectional_force:
force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self.world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
self.fingerpad_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length
- self.asset_info_franka_table.franka_fingerpad_length * 0.5,
device=self.device,
)
self.finger_nut_keypoint_dist = self._get_keypoint_dist(body="finger_nut")
self.nut_keypoint_dist = self._get_keypoint_dist(body="nut")
self.nut_dist_to_target = torch.norm(
self.target_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and target
self.nut_dist_to_fingerpads = torch.norm(
self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and midpoint between centers of fingerpads
self.was_success = torch.zeros_like(self.progress_buf, dtype=torch.bool)
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_com_pos,
self.nut_com_quat,
self.nut_com_linvel,
self.nut_com_angvel,
]
if self.cfg_task.rl.add_obs_finger_force:
obs_tensors += [self.left_finger_force, self.right_finger_force]
else:
obs_tensors += [
torch.zeros_like(self.left_finger_force),
torch.zeros_like(self.right_finger_force),
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
# Get successful and failed envs at current timestep
curr_successes = self._get_curr_successes()
curr_failures = self._get_curr_failures(curr_successes)
self._update_reset_buf(curr_successes, curr_failures)
self._update_rew_buf(curr_successes)
if torch.any(self.is_expired):
self.extras["successes"] = torch.mean(curr_successes.float())
def _update_reset_buf(self, curr_successes, curr_failures) -> None:
"""Assign environments for reset if successful or failed."""
self.reset_buf[:] = self.is_expired
def _update_rew_buf(self, curr_successes) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist)
action_penalty = torch.norm(self.actions, p=2, dim=-1)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
+ curr_successes * self.cfg_task.rl.success_bonus
)
def _get_keypoint_dist(self, body) -> torch.Tensor:
"""Get keypoint distance."""
axis_length = (
self.asset_info_franka_table.franka_hand_length
+ self.asset_info_franka_table.franka_finger_length
)
if body == "finger" or body == "nut":
# Keypoint distance between finger/nut and target
if body == "finger":
self.keypoint1 = self.fingertip_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
elif body == "nut":
self.keypoint1 = self.nut_com_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint1_targ = self.target_pos
self.keypoint2_targ = self.keypoint1_targ + torch.tensor(
[0.0, 0.0, axis_length], device=self.device
)
elif body == "finger_nut":
# Keypoint distance between finger and nut
self.keypoint1 = self.fingerpad_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
self.keypoint1_targ = self.nut_com_pos
self.keypoint2_targ = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0
self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0
self.keypoint3_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0
)
self.keypoint4_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0
)
keypoint_dist = (
torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1)
+ torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1)
+ torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1)
+ torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1)
)
return keypoint_dist
def _get_curr_successes(self) -> torch.Tensor:
"""Get success mask at current timestep."""
curr_successes = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If nut is close enough to target pos
is_close = torch.where(
self.nut_dist_to_target < self.thread_pitches.squeeze(-1) * 5,
torch.ones_like(curr_successes),
torch.zeros_like(curr_successes),
)
curr_successes = torch.logical_or(curr_successes, is_close)
return curr_successes
def _get_curr_failures(self, curr_successes) -> torch.Tensor:
"""Get failure mask at current timestep."""
curr_failures = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If max episode length has been reached
self.is_expired = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut is too far from target pos
self.is_far = torch.where(
self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut has slipped (distance-based definition)
self.is_slipped = torch.where(
self.nut_dist_to_fingerpads
> self.asset_info_franka_table.franka_fingerpad_length * 0.5
+ self.nut_heights.squeeze(-1) * 0.5,
torch.ones_like(curr_failures),
curr_failures,
)
self.is_slipped = torch.logical_and(
self.is_slipped, torch.logical_not(curr_successes)
) # ignore slip if successful
# If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt)
self.is_fallen = torch.logical_and(
torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1)
> self.bolt_widths.squeeze(-1) * 0.5,
self.nut_com_pos[:, 2]
< self.cfg_base.env.table_height
+ self.bolt_head_heights.squeeze(-1)
+ self.bolt_shank_lengths.squeeze(-1)
+ self.nut_heights.squeeze(-1) * 0.5,
)
curr_failures = torch.logical_or(curr_failures, self.is_expired)
curr_failures = torch.logical_or(curr_failures, self.is_far)
curr_failures = torch.logical_or(curr_failures, self.is_slipped)
curr_failures = torch.logical_or(curr_failures, self.is_fallen)
return curr_failures
| 20,027 | Python | 37.367816 | 131 | 0.589105 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt pick task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPick
"""
import asyncio
import hydra
import omegaconf
import torch
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.torch.transformations import tf_combine
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPickPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Grasp pose tensors
nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM
self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.nut_grasp_quat_local = (
torch.tensor([0.0, 0.0, 1.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_gripper = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_nut = torch.zeros_like(
self.keypoints_gripper, device=self.device
)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self.world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut
nut_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_xy = nut_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_pos_xy_noise, device=self.device)
)
self.nut_pos[env_ids, 0] = (
self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[env_ids, 0]
)
self.nut_pos[env_ids, 1] = (
self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[env_ids, 1]
)
self.nut_pos[
env_ids, 2
] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1)
self.nut_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self.world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if self.cfg_task.env.close_and_lift:
self._close_gripper(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self._lift_gripper(
franka_gripper_width=0.0,
lift_distance=0.3,
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps,
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
async def post_physics_step_async(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self.world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if self.cfg_task.env.close_and_lift:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if is_last_step:
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
await self._lift_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of nut grasping frame
self.nut_grasp_quat, self.nut_grasp_pos = tf_combine(
self.nut_quat,
self.nut_pos,
self.nut_grasp_quat_local,
self.nut_grasp_pos_local,
)
# Compute pos of keypoints on gripper and nut in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gripper[:, idx] = tf_combine(
self.fingertip_midpoint_quat,
self.fingertip_midpoint_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_nut[:, idx] = tf_combine(
self.nut_grasp_quat,
self.nut_grasp_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_grasp_pos,
self.nut_grasp_quat,
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_multiple=3.0)
self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(lift_success.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _close_gripper(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
SimulationContext.step(self.world, render=True)
def _lift_gripper(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
SimulationContext.step(self.world, render=True)
async def _close_gripper_async(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps=20
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
async def _lift_gripper_async(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_lift_success(self, height_multiple) -> torch.Tensor:
"""Check if nut is above table by more than specified multiple times height of nut."""
lift_success = torch.where(
self.nut_pos[:, 2]
> self.cfg_base.env.table_height
+ self.nut_heights.squeeze(-1) * height_multiple,
torch.ones((self.num_envs,), device=self.device),
torch.zeros((self.num_envs,), device=self.device),
)
return lift_success
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
SimulationContext.step(self.world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
if not self.world.is_playing():
return
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
SimulationContext.step(self.world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
SimulationContext.step(self.world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
self.world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
| 31,484 | Python | 37.822441 | 131 | 0.589506 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_class_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for base class.
Inherits ABC class. Inherited by base class. Defines template for base class.
"""
from abc import ABC, abstractmethod
class FactoryABCBase(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize VecTask superclass."""
pass
@abstractmethod
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
pass
@abstractmethod
def refresh_base_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
@abstractmethod
def parse_controller_spec(self):
"""Parse controller specification into lower-level controller configuration."""
pass
@abstractmethod
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
pass
@abstractmethod
def enable_gravity(self):
"""Enable gravity."""
pass
@abstractmethod
def disable_gravity(self):
"""Disable gravity."""
pass
| 2,843 | Python | 35 | 88 | 0.721069 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_schema_config_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for base class configuration.
Used by Hydra. Defines template for base class YAML file.
"""
from dataclasses import dataclass
@dataclass
class Mode:
export_scene: bool # export scene to USD
export_states: bool # export states to NPY
@dataclass
class Sim:
dt: float # timestep size (default = 1.0 / 60.0)
num_substeps: int # number of substeps (default = 2)
num_pos_iters: int # number of position iterations for PhysX TGS solver (default = 4)
num_vel_iters: int # number of velocity iterations for PhysX TGS solver (default = 1)
gravity_mag: float # magnitude of gravitational acceleration
add_damping: bool # add damping to stabilize gripper-object interactions
@dataclass
class Env:
env_spacing: float # lateral offset between envs
franka_depth: float # depth offset of Franka base relative to env origin
table_height: float # height of table
franka_friction: float # coefficient of friction associated with Franka
table_friction: float # coefficient of friction associated with table
@dataclass
class FactorySchemaConfigBase:
mode: Mode
sim: Sim
env: Env
| 2,724 | Python | 39.073529 | 90 | 0.757342 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_env_nut_bolt.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for nut-bolt env.
Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed.
Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml.
"""
import hydra
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omni.physx.scripts import physicsUtils, utils
from omniisaacgymenvs.robots.articulations.views.factory_franka_view import (
FactoryFrankaView,
)
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_base import FactoryBase
from omniisaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from omniisaacgymenvs.tasks.factory.factory_schema_config_env import (
FactorySchemaConfigEnv,
)
class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv):
def __init__(self, name, sim_config, env) -> None:
"""Initialize base superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_env_yaml_params()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv)
config_path = (
"task/FactoryEnvNutBolt.yaml" # relative to Hydra search path (cfg dir)
)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml"
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._num_observations = self._task_cfg["env"]["numObservations"]
self._num_actions = self._task_cfg["env"]["numActions"]
self._env_spacing = self.cfg_base["env"]["env_spacing"]
self._get_env_yaml_params()
def set_up_scene(self, scene) -> None:
"""Import assets. Add to scene."""
# Increase buffer size to prevent overflow for Place and Screw tasks
physxSceneAPI = self.world.get_physics_context()._physx_scene_api
physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(256 * 1024 * 1024)
self.import_franka_assets(add_to_stage=True)
self.create_nut_bolt_material()
RLTask.set_up_scene(self, scene, replicate_physics=False)
self._import_env_assets(add_to_stage=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*",
name="nuts_view",
track_contact_forces=True,
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*",
name="bolts_view",
track_contact_forces=True,
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
return
def initialize_views(self, scene) -> None:
"""Initialize views for extension workflow."""
super().initialize_views(scene)
self.import_franka_assets(add_to_stage=False)
self._import_env_assets(add_to_stage=False)
if scene.object_exists("frankas_view"):
scene.remove_object("frankas_view", registry_only=True)
if scene.object_exists("nuts_view"):
scene.remove_object("nuts_view", registry_only=True)
if scene.object_exists("bolts_view"):
scene.remove_object("bolts_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("fingertips_view"):
scene.remove_object("fingertips_view", registry_only=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*", name="nuts_view"
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*", name="bolts_view"
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
def create_nut_bolt_material(self):
"""Define nut and bolt material."""
self.nutboltPhysicsMaterialPath = "/World/Physics_Materials/NutBoltMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.nutboltPhysicsMaterialPath,
density=self.cfg_env.env.nut_bolt_density,
staticFriction=self.cfg_env.env.nut_bolt_friction,
dynamicFriction=self.cfg_env.env.nut_bolt_friction,
restitution=0.0,
)
def _import_env_assets(self, add_to_stage=True):
"""Set nut and bolt asset options. Import assets."""
self.nut_heights = []
self.nut_widths_max = []
self.bolt_widths = []
self.bolt_head_heights = []
self.bolt_shank_lengths = []
self.thread_pitches = []
assets_root_path = get_assets_root_path()
for i in range(0, self._num_envs):
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_nut_bolt[subassembly])
nut_translation = torch.tensor(
[
0.0,
self.cfg_env.env.nut_lateral_offset,
self.cfg_base.env.table_height,
],
device=self._device,
)
nut_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
nut_height = self.asset_info_nut_bolt[subassembly][components[0]]["height"]
nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]][
"width_max"
]
self.nut_heights.append(nut_height)
self.nut_widths_max.append(nut_width_max)
nut_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[0]]["usd_path"]
)
if add_to_stage:
add_reference_to_stage(nut_file, f"/World/envs/env_{i}" + "/nut")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/nut",
translation=nut_translation,
orientation=nut_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/nut/factory_{components[0]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/nut/factory_{components[0]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"nut",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/nut"),
self._sim_config.parse_actor_config("nut"),
)
bolt_translation = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self._device
)
bolt_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]["width"]
bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]][
"head_height"
]
bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]][
"shank_length"
]
self.bolt_widths.append(bolt_width)
self.bolt_head_heights.append(bolt_head_height)
self.bolt_shank_lengths.append(bolt_shank_length)
if add_to_stage:
bolt_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[1]]["usd_path"]
)
add_reference_to_stage(bolt_file, f"/World/envs/env_{i}" + "/bolt")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/bolt",
translation=bolt_translation,
orientation=bolt_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/bolt/factory_{components[1]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/bolt/factory_{components[1]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"bolt",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/bolt"),
self._sim_config.parse_actor_config("bolt"),
)
thread_pitch = self.asset_info_nut_bolt[subassembly]["thread_pitch"]
self.thread_pitches.append(thread_pitch)
# For computing body COM pos
self.nut_heights = torch.tensor(
self.nut_heights, device=self._device
).unsqueeze(-1)
self.bolt_head_heights = torch.tensor(
self.bolt_head_heights, device=self._device
).unsqueeze(-1)
# For setting initial state
self.nut_widths_max = torch.tensor(
self.nut_widths_max, device=self._device
).unsqueeze(-1)
self.bolt_shank_lengths = torch.tensor(
self.bolt_shank_lengths, device=self._device
).unsqueeze(-1)
# For defining success or failure
self.bolt_widths = torch.tensor(
self.bolt_widths, device=self._device
).unsqueeze(-1)
self.thread_pitches = torch.tensor(
self.thread_pitches, device=self._device
).unsqueeze(-1)
def refresh_env_tensors(self):
"""Refresh tensors."""
# Nut tensors
self.nut_pos, self.nut_quat = self.nuts.get_world_poses(clone=False)
self.nut_pos -= self.env_pos
self.nut_com_pos = fc.translate_along_local_z(
pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device,
)
self.nut_com_quat = self.nut_quat # always equal
nut_velocities = self.nuts.get_velocities(clone=False)
self.nut_linvel = nut_velocities[:, 0:3]
self.nut_angvel = nut_velocities[:, 3:6]
self.nut_com_linvel = self.nut_linvel + torch.cross(
self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1
)
self.nut_com_angvel = self.nut_angvel # always equal
self.nut_force = self.nuts.get_net_contact_forces(clone=False)
# Bolt tensors
self.bolt_pos, self.bolt_quat = self.bolts.get_world_poses(clone=False)
self.bolt_pos -= self.env_pos
self.bolt_force = self.bolts.get_net_contact_forces(clone=False)
| 14,703 | Python | 39.284931 | 110 | 0.603414 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/factory/factory_control.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: control module.
Imported by base, environment, and task classes. Not directly executed.
"""
import math
import omni.isaac.core.utils.torch as torch_utils
import torch
def compute_dof_pos_target(
cfg_ctrl,
arm_dof_pos,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
jacobian,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos,
device,
):
"""Compute Franka DOF position target to move fingertips towards target pose."""
ctrl_target_dof_pos = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos
ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints
return ctrl_target_dof_pos
def compute_dof_torque(
cfg_ctrl,
dof_pos,
dof_vel,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
left_finger_force,
right_finger_force,
jacobian,
arm_mass_matrix,
ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench,
device,
):
"""Compute Franka DOF torque to move fingertips towards target pose."""
# References:
# 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# 2) Modern Robotics
dof_torque = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
if cfg_ctrl["gain_space"] == "joint":
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
dof_torque[:, 0:7] = cfg_ctrl[
"joint_prop_gains"
] * delta_arm_dof_pos + cfg_ctrl["joint_deriv_gains"] * (0.0 - dof_vel[:, 0:7])
if cfg_ctrl["do_inertial_comp"]:
# Set tau = M * tau, where M is the joint-space mass matrix
arm_mass_matrix_joint = arm_mass_matrix
dof_torque[:, 0:7] = (
arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)
).squeeze(-1)
elif cfg_ctrl["gain_space"] == "task":
task_wrench = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
if cfg_ctrl["do_motion_ctrl"]:
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98)
task_wrench_motion = _apply_task_space_gains(
delta_fingertip_pose=delta_fingertip_pose,
fingertip_midpoint_linvel=fingertip_midpoint_linvel,
fingertip_midpoint_angvel=fingertip_midpoint_angvel,
task_prop_gains=cfg_ctrl["task_prop_gains"],
task_deriv_gains=cfg_ctrl["task_deriv_gains"],
)
if cfg_ctrl["do_inertial_comp"]:
# Set tau = Lambda * tau, where Lambda is the task-space mass matrix
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
arm_mass_matrix_task = torch.inverse(
jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T
) # ETH eq. 3.86; geometric Jacobian is assumed
task_wrench_motion = (
arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)
).squeeze(-1)
task_wrench = (
task_wrench + cfg_ctrl["motion_ctrl_axes"] * task_wrench_motion
)
if cfg_ctrl["do_force_ctrl"]:
# Set tau = tau + F_t, where F_t is the target contact wrench
task_wrench_force = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
task_wrench_force = (
task_wrench_force + ctrl_target_fingertip_contact_wrench
) # open-loop force control (building towards ETH eq. 3.96-3.98)
if cfg_ctrl["force_ctrl_method"] == "closed":
force_error, torque_error = _get_wrench_error(
left_finger_force=left_finger_force,
right_finger_force=right_finger_force,
ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench,
num_envs=cfg_ctrl["num_envs"],
device=device,
)
# Set tau = tau + k_p * contact_wrench_error
task_wrench_force = task_wrench_force + cfg_ctrl[
"wrench_prop_gains"
] * torch.cat(
(force_error, torque_error), dim=1
) # part of Modern Robotics eq. 11.61
task_wrench = (
task_wrench
+ torch.tensor(cfg_ctrl["force_ctrl_axes"], device=device).unsqueeze(0)
* task_wrench_force
)
# Set tau = J^T * tau, i.e., map tau into joint space as desired
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1)
dof_torque[:, 7:9] = cfg_ctrl["gripper_prop_gains"] * (
ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]
) + cfg_ctrl["gripper_deriv_gains"] * (
0.0 - dof_vel[:, 7:9]
) # gripper finger joints
dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0)
return dof_torque
def get_pose_error(
fingertip_midpoint_pos,
fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
jacobian_type,
rot_error_type,
):
"""Compute task-space error between target Franka fingertip pose and current pose."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# Compute pos error
pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos
# Compute rot error
if (
jacobian_type == "geometric"
): # See example 2.9.8; note use of J_g and transformation between rotation vectors
# Compute quat error (i.e., difference quat)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html
fingertip_midpoint_quat_norm = torch_utils.quat_mul(
fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat)
)[
:, 0
] # scalar component
fingertip_midpoint_quat_inv = torch_utils.quat_conjugate(
fingertip_midpoint_quat
) / fingertip_midpoint_quat_norm.unsqueeze(-1)
quat_error = torch_utils.quat_mul(
ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv
)
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
elif (
jacobian_type == "analytic"
): # See example 2.9.7; note use of J_a and difference of rotation vectors
# Compute axis-angle error
axis_angle_error = axis_angle_from_quat(
ctrl_target_fingertip_midpoint_quat
) - axis_angle_from_quat(fingertip_midpoint_quat)
if rot_error_type == "quat":
return pos_error, quat_error
elif rot_error_type == "axis_angle":
return pos_error, axis_angle_error
def _get_wrench_error(
left_finger_force,
right_finger_force,
ctrl_target_fingertip_contact_wrench,
num_envs,
device,
):
"""Compute task-space error between target Franka fingertip contact wrench and current wrench."""
fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device)
fingertip_contact_wrench[:, 0:3] = (
left_finger_force + right_finger_force
) # net contact force on fingers
# Cols 3 to 6 are all zeros, as we do not have enough information
force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (
-fingertip_contact_wrench[:, 0:3]
)
torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (
-fingertip_contact_wrench[:, 3:6]
)
return force_error, torque_error
def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device):
"""Get delta Franka DOF position from delta pose using specified IK method."""
# References:
# 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
# 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47)
if ik_method == "pinv": # Jacobian pseudoinverse
k_val = 1.0
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "trans": # Jacobian transpose
k_val = 1.0
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "dls": # damped least squares (Levenberg-Marquardt)
lambda_val = 0.1
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val**2) * torch.eye(
n=jacobian.shape[1], device=device
)
delta_dof_pos = (
jacobian_T
@ torch.inverse(jacobian @ jacobian_T + lambda_matrix)
@ delta_pose.unsqueeze(-1)
)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "svd": # adaptive SVD
k_val = 1.0
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1.0 / S
min_singular_value = 1.0e-5
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = (
torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6]
@ torch.diag_embed(S_inv)
@ torch.transpose(U, dim0=1, dim1=2)
)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
return delta_dof_pos
def _apply_task_space_gains(
delta_fingertip_pose,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
task_prop_gains,
task_deriv_gains,
):
"""Interpret PD gains as task-space gains. Apply to task-space error."""
task_wrench = torch.zeros_like(delta_fingertip_pose)
# Apply gains to lin error components
lin_error = delta_fingertip_pose[:, 0:3]
task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + task_deriv_gains[
:, 0:3
] * (0.0 - fingertip_midpoint_linvel)
# Apply gains to rot error components
rot_error = delta_fingertip_pose[:, 3:6]
task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + task_deriv_gains[
:, 3:6
] * (0.0 - fingertip_midpoint_angvel)
return task_wrench
def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device):
"""Convert geometric Jacobian to analytic Jacobian."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# NOTE: Gym returns world-space geometric Jacobians by default
batch = num_envs
# Overview:
# x = [x_p; x_r]
# From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot
# From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv)
# Eq. 2.12 gives an expression for E_p_inv
# Eq. 2.107 gives an expression for E_r_inv
# Compute E_inv_top (i.e., [E_p_inv, 0])
I = torch.eye(3, device=device)
E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3)
E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2)
# Compute E_inv_bottom (i.e., [0, E_r_inv])
fingertip_axis_angle = axis_angle_from_quat(fingertip_quat)
fingertip_axis_angle_cross = get_skew_symm_matrix(
fingertip_axis_angle, device=device
)
fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1)
factor_1 = 1 / (fingertip_angle**2)
factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (
1 - torch.cos(fingertip_angle)
)
factor_3 = factor_1 * factor_2
E_r_inv = (
I
- 1 * 0.5 * fingertip_axis_angle_cross
+ (fingertip_axis_angle_cross @ fingertip_axis_angle_cross)
* factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3))
)
E_inv_bottom = torch.cat(
(torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2
)
E_inv = torch.cat(
(E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1
).reshape((batch, 6, 6))
J_a = E_inv @ fingertip_jacobian
return J_a
def get_skew_symm_matrix(vec, device):
"""Convert vector to skew-symmetric matrix."""
# Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
batch = vec.shape[0]
I = torch.eye(3, device=device)
skew_symm = torch.transpose(
torch.cross(
vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1))
).reshape(batch, 3, 3),
dim0=1,
dim1=2,
)
return skew_symm
def translate_along_local_z(pos, quat, offset, device):
"""Translate global body position along local Z-axis and express in global coordinates."""
num_vecs = pos.shape[0]
offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat(
(num_vecs, 1)
)
_, translated_pos = torch_utils.tf_combine(
q1=quat,
t1=pos,
q2=torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat((num_vecs, 1)),
t2=offset_vec,
)
return translated_pos
def axis_angle_from_euler(euler):
"""Convert tensor of Euler angles to tensor of axis-angles."""
quat = torch_utils.quat_from_euler_xyz(
roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2]
)
quat = quat * torch.sign(quat[:, 0]).unsqueeze(-1) # smaller rotation
axis_angle = axis_angle_from_quat(quat)
return axis_angle
def axis_angle_from_quat(quat, eps=1.0e-6):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544
mag = torch.linalg.norm(quat[:, 1:4], dim=1)
half_angle = torch.atan2(mag, quat[:, 0])
angle = 2.0 * half_angle
sin_half_angle_over_angle = torch.where(
torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle**2.0 / 48
)
axis_angle = quat[:, 1:4] / sin_half_angle_over_angle.unsqueeze(-1)
return axis_angle
def axis_angle_from_quat_naive(quat):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation
# NOTE: Susceptible to undesirable behavior due to divide-by-zero
mag = torch.linalg.vector_norm(quat[:, 1:4], dim=1) # zero when quat = [1, 0, 0, 0]
axis = quat[:, 1:4] / mag.unsqueeze(-1)
angle = 2.0 * torch.atan2(mag, quat[:, 0])
axis_angle = axis * angle.unsqueeze(-1)
return axis_angle
def get_rand_quat(num_quats, device):
"""Generate tensor of random quaternions."""
# Reference: http://planning.cs.uiuc.edu/node198.html
u = torch.rand((num_quats, 3), device=device)
quat = torch.zeros((num_quats, 4), device=device)
quat[:, 0] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2])
quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1])
quat[:, 2] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1])
quat[:, 3] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2])
return quat
def get_nonrand_quat(num_quats, rot_perturbation, device):
"""Generate tensor of non-random quaternions by composing random Euler rotations."""
quat = torch_utils.quat_from_euler_xyz(
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
)
return quat
| 19,859 | Python | 37.864971 | 163 | 0.627574 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.