file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
ft-lab/omniverse_sample_scripts/PLATEAU/calcLatLongToOmniverse.py | # ------------------------------------------------------------------.
# 緯度経度を平面直角座標に変換し、Omniverse(USD)のY-Up/cmに変換.
# 参考 : https://vldb.gsi.go.jp/sokuchi/surveycalc/surveycalc/bl2xyf.html
#
# ただし、日本地図上での計算になる点に注意.
# ------------------------------------------------------------------.
import math
# --------------------------------------.
# Input Parameters.
# --------------------------------------.
# Latitude and longitude.
in_lat = 35.680908
in_longi = 139.767348
# ---------------------------------------------------------.
# 平面直角座標系の原点の緯度と経度を取得.
# 参考 : https://www.gsi.go.jp/LAW/heimencho.html
# 東京都の場合は9を指定.
# ---------------------------------------------------------.
def getOriginLatAndLongi (index : int = 9):
latV0 = 0.0
longiV0 = 0.0
# I.
if index == 1:
latV0 = 33.0
longiV0 = 129.5
# II.
elif index == 2:
latV0 = 33.0
longiV0 = 131.0
# III.
elif index == 3:
latV0 = 36.0
longiV0 = 131.16666666
# IV.
elif index == 4:
latV0 = 33.0
longiV0 = 133.5
# V.
elif index == 5:
latV0 = 36.0
longiV0 = 134.33333333
# VI.
elif index == 6:
latV0 = 36.0
longiV0 = 136.0
# VII.
elif index == 7:
latV0 = 36.0
longiV0 = 137.16666666
# VIII.
elif index == 8:
latV0 = 36.0
longiV0 = 138.5
# IX. // 東京都(デフォルト).
elif index == 9:
latV0 = 36.0
longiV0 = 139.83333333
# X.
elif index == 10:
latV0 = 40.0
longiV0 = 140.83333333
# XI.
elif index == 11:
latV0 = 44.0
longiV0 = 140.25
# XII.
elif index == 12:
latV0 = 44.0
longiV0 = 142.25
# XIII.
elif index == 13:
latV0 = 44.0
longiV0 = 144.25
# XIV.
elif index == 14:
latV0 = 26.0
longiV0 = 142.0
# XV.
elif index == 15:
latV0 = 26.0
longiV0 = 127.5
# XVI.
elif index == 16:
latV0 = 26.0
longiV0 = 124.0
# XVII.
elif index == 17:
latV0 = 26.0
longiV0 = 131.0
# XVIII.
elif index == 18:
latV0 = 20.0
longiV0 = 136.0
# XIX.
elif index == 19:
latV0 = 26.0
longiV0 = 154.0
return latV0, longiV0
# ---------------------------------------------.
# 緯度経度を平面直角座標に変換.
# @param[in] latV 緯度 (10進数の度数指定).
# @param[in] longiV 経度 (10進数の度数指定).
# @param[in] originIndex 平面直角座標系の原点の番号.
# https://www.gsi.go.jp/LAW/heimencho.html
# @return x, y (m単位)
# ---------------------------------------------.
def calcLatLongToHeimenChokaku (latV : float, longiV : float, originIndex : int = 9):
# 赤道半径 (km) = 楕円体の長半径.
R = 6378.137
# 極半径 (km).
R2 = 6356.752
# 逆扁平率.
F = 298.257222101
# 平面直角座標系のX軸上における縮尺係数.
m0 = 0.9999
# 平面直角座標系の原点の緯度と経度.
# https://www.gsi.go.jp/LAW/heimencho.html
# 地域によってこれは変わる。東京の場合はIX(9)番目のものを使用.
latV0, longiV0 = getOriginLatAndLongi(originIndex)
# 度数をラジアンに変換.
lat0R = latV0 * math.pi / 180.0
longi0R = longiV0 * math.pi / 180.0
latR = latV * math.pi / 180.0
longiR = longiV * math.pi / 180.0
n = 1.0 / (2.0 * F - 1.0)
A0 = 1.0 + (n**2) / 4.0 + (n**4) / 64.0
A1 = (-3.0 / 2.0) * (n - (n**3) / 8.0 - (n**5) / 64.0)
A2 = (15.0 / 16.0) * ((n**2) - (n**4) / 4.0)
A3 = (-35.0/ 48.0) * ((n**3) - (5.0 / 16.0) * (n**5))
A4 = (315.0 / 512.0) * (n**4)
A5 = (-693.0/1280.0) * (n**5)
A_Array = [A0, A1, A2, A3 , A4, A5]
a1 = (1.0 / 2.0) * n - (2.0 / 3.0) * (n**2) + (5.0 / 16.0) * (n**3) + (41.0 / 180.0) * (n**4) - (127.0 / 288.0) * (n**5)
a2 = (13.0 / 48.0) * (n**2) - (3.0 / 5.0) * (n**3) + (557.0 / 1440.0) * (n**4) + (281.0 / 630.0) * (n**5)
a3 = (61.0 / 240.0) * (n**3) - (103.0 / 140.0) * (n**4) + (15061.0 / 26880.0) * (n**5)
a4 = (49561.0 / 161280.0) * (n**4) - (179.0 / 168.0) * (n**5)
a5 = (34729.0 / 80640.0) * (n**5)
a_Array = [0.0, a1, a2, a3, a4, a5]
A_ = ((m0 * R) / (1.0 + n)) * A0
v = 0.0
for i in range(5):
v += A_Array[i + 1] * math.sin(2.0 * (float)(i + 1) * lat0R)
S_ = ((m0 * R) / (1.0 + n)) * (A0 * lat0R + v)
lambdaC = math.cos(longiR - longi0R)
lambdaS = math.sin(longiR - longi0R)
t = math.sinh(math.atanh(math.sin(latR)) - ((2.0 * math.sqrt(n)) / (1.0 + n)) * math.atanh(((2.0 * math.sqrt(n)) / (1.0 + n)) * math.sin(latR)))
t_ = math.sqrt(1.0 + t * t)
xi2 = math.atan(t / lambdaC)
eta2 = math.atanh(lambdaS / t_)
v = 0.0
for i in range(5):
v += a_Array[i + 1] * math.sin(2.0 * (float)(i + 1) * xi2) * math.cosh(2.0 * (float)(i + 1) * eta2)
x = A_ * (xi2 + v) - S_
v = 0.0
for i in range(5):
v += a_Array[i + 1] * math.cos(2.0 * (float)(i + 1) * xi2) * math.sinh(2.0 * (float)(i + 1) * eta2)
y = A_ * (eta2 + v)
# kmからmに変換して返す.
return (x * 1000.0), (y * 1000.0)
# ----------------------------------------------------------.
# 緯度経度から平面直角座標に変換(単位 m).
originIndex = 9 # Tokyo.
x,y = calcLatLongToHeimenChokaku(in_lat, in_longi, originIndex)
print("Latitude = " + str(in_lat))
print("Longitude = " + str(in_longi))
print(" X = " + str(x) + " (m)")
print(" Y = " + str(y) + " (m)")
# Omniverse(USD)のY-up/右手座標系/cmに変換.
x2 = y * 100.0
z2 = -x * 100.0
print("[ Omniverse ] (Y-up/right hand/cm)")
print(" x = " + str(x2) + " (cm)")
print(" z = " + str(z2) + " (cm)")
| 5,518 | Python | 26.733668 | 148 | 0.436934 |
ft-lab/omniverse_sample_scripts/PLATEAU/calcDistance.py | from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
import omni.usd
# Get stage.
stage = omni.usd.get_context().get_stage()
# -------------------------------------------------.
# Calculate bounding box in world coordinates.
# -------------------------------------------------.
def _calcWorldBoundingBox (prim : Usd.Prim):
# Calc world boundingBox.
bboxCache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), ["default"])
bboxD = bboxCache.ComputeWorldBound(prim).ComputeAlignedRange()
bb_min = Gf.Vec3f(bboxD.GetMin())
bb_max = Gf.Vec3f(bboxD.GetMax())
return bb_min, bb_max
# -------------------------------------------------.
# Calculate the distance between two selected shapes.
# -------------------------------------------------.
# Get selection.
selection = omni.usd.get_context().get_selection()
paths = selection.get_selected_prim_paths()
wPosList = []
for path in paths:
# Get prim.
prim = stage.GetPrimAtPath(path)
if prim.IsValid():
bbMin, bbMax = _calcWorldBoundingBox(prim)
wCenter = Gf.Vec3f((bbMax[0] + bbMin[0]) * 0.5, (bbMax[1] + bbMin[1]) * 0.5, (bbMax[2] + bbMin[2]) * 0.5)
wPosList.append(wCenter)
continue
if len(wPosList) == 2:
distV = (wPosList[1] - wPosList[0]).GetLength()
print("Distance : " + str(distV) + " cm ( " + str(distV * 0.01) + " m)")
| 1,359 | Python | 33.871794 | 113 | 0.554084 |
ft-lab/omniverse_sample_scripts/Animation/readme.md | # Animation
アニメーション関連の処理を行います。
|ファイル|説明|
|---|---|
|[GetTimeCode.py](./GetTimeCode.py)|現在のStageの開始/終了TimeCode、TimeCodesPerSecond(フレームレート)を取得。|
|[GetCurrentTimeCode.py](./GetCurrentTimeCode.py)|現在のタイムコード(フレーム位置)を取得。|
| 244 | Markdown | 26.222219 | 96 | 0.67623 |
ft-lab/omniverse_sample_scripts/Animation/GetTimeCode.py | from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
# Get stage.
stage = omni.usd.get_context().get_stage()
# Get TimeCode.
print(f"Start TimeCode : {stage.GetStartTimeCode()}")
print(f"End TimeCode : {stage.GetEndTimeCode()}")
# Get frame rate.
print(f"TimeCodesPerSecond : {stage.GetTimeCodesPerSecond()}")
| 325 | Python | 24.076921 | 63 | 0.723077 |
ft-lab/omniverse_sample_scripts/Animation/GetCurrentTimeCode.py | from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
import omni.usd
import omni.timeline
# Get stage.
stage = omni.usd.get_context().get_stage()
# Get current timeCode.
time_code = omni.timeline.get_timeline_interface().get_current_time() * stage.GetTimeCodesPerSecond()
print(f"Current timeCode : {time_code}")
| 327 | Python | 26.333331 | 101 | 0.746177 |
ft-lab/Omniverse_OmniGraph_ClockSample/readme.md | # ft_lab.OmniGraph.GetDateTime
This sample uses OmniGraph to reflect the current time on analog and digital clocks created as 3D models.

This is a sample project in which OmniGraph custom nodes are prepared with a Python Extension to control a pre-prepared 3D model.
## How to use
I have confirmed that it works with ~~Omniverse Create 2022.3.3~~ USD Composer 2023.2.2(Kit 105.1.2).
Download and use this repository locally.
```
[extension]
[ft_lab.OmniGraph.GetDateTime] ... Extension(OmniGraph Nodes) used in this project
[usds] sample scene
[Clock]
[textures]
clock.usd
[ClockDigital]
[textures]
clock_digital.usd
clock_stage.usd ... Open and use this locally.
```
* Assign and activate Extension to Omniverse Create.
Copy "[ft_lab.OmniGraph.GetDateTime](./extension/ft_lab.OmniGraph.GetDateTime/)" to a folder where Omniverse can find it as an Extension.

* Open "[clock_stage.usd](./usds/clock_stage.usd)" in Omniverse Create.
References two USD "[clock.usd](./usds/Clock/clock.usd)" and "[clock_digital.usd](./usds/ClockDigital/clock_digital.usd)".
You can now see the current time reflected in the analog and digital clocks.

## Documents
* [Description of OmniGraph nodes](./OmniGraphNodes.md)
## Documents for Development
* [Extension Structure](./docs/ExtensionStructure.md)
* [GetDateTime](./docs/node_GetDateTime.md)
* [RotationByTime](./docs/node_RotationByTime.md)
* [OutputToLCD](./docs/node_OutputToLCD.md)
* [3D Models](./docs/Modeling3D.md)
## Change Log
* [Change Log](./ChangeLog.md)
## License
This software is released under the MIT License, see [LICENSE.txt](./LICENSE.txt).
| 1,864 | Markdown | 31.155172 | 143 | 0.714056 |
ft-lab/Omniverse_OmniGraph_ClockSample/ChangeLog.md | # Change Log
## December 22, 2023
Fixed in USD Composer 2023.2.2 (Kit.105.1.2)
### xxxxDatabase.py
The icons were not reflected until these two internal versions were updated.
* GENERATOR_VERSION : (1, 31, 1) -> (1, 41, 3)
* TARGET_VERSION : (2, 107, 4) -> (2, 139, 12)
## July 11, 2023
Fixed in USD Composer 2023.1.0-beta (Kit.105) from Omniverse Create 2022.3.3 (Kit.104).
### [RotationByTime.ogn](extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/RotationByTime.ogn)
"type": "float3" -> "float[3]"
### xxxxDatabase.py
* GENERATOR_VERSION : (1, 17, 2) -> (1, 31, 1)
* TARGET_VERSION : (2, 65, 4) -> (2, 107, 4)
| 673 | Markdown | 23.962962 | 118 | 0.63893 |
ft-lab/Omniverse_OmniGraph_ClockSample/OmniGraphNodes.md | # Description of OmniGraph nodes
This extension consists of three custom nodes.

Three nodes are added to "Examples" as Graph.
These are nodes that act as Push Graphs.

## Get DateTime
Get the current local date and time.

### Outputs
* Year (int)
* Month (int)
* Day (int)
* Hour (int)
* Minute (int)
* Second (int)
## Rotation By Time
Given an hour, minute, and second, returns the XYZ of each rotation(degree).
Used in analog clock rotation.

### Inputs
* Default RotateXYZ : Default rotation value (float3)
* Rotation Axis : Rotation axis (0:X, 1:Y, 2:Z)
* Hour (int)
* Minute (int)
* Second (int)
### Outputs
* Hour RotateXYZ : Hour rotation value (float3)
* Minute RotateXYZ : Minute rotation value (float3)
* Second RotateXYZ : Second rotation value (float3)
Connect the Output value of the Get DateTime node to the Hour/Minute/Second of Inputs.
The analog clock "[clock.usd](./usds/Clock/clock.usd)" referenced in this stage has a default rotation of Rotate(90, 0, 0).
It also rotates the hands of the clock around the Y axis.
This is the same for Hour/Minute/Second hands.

In Inputs, set "Default RotationXYZ" to (90, 0, 0) and "Rotation Axis" to 1 (Y).
This input returns the calculated rotation values for "Hour RotateXYZ", "Minute RotateXYZ", and "Second RotateXYZ".
Clock hand prims are added to Graph as "Write Prim Attribute".

In this case, select "xformOp:rotateXYZ" for the "Attribute Name".

Connect "Hour RotateXYZ", "Minute RotateXYZ", and "Second RotateXYZ" of "Rotation By Time" to the Value of this node.
This is all that is required to move the hands of an analog clock.
## Time Output To LCD
This node controls a virtual 7-segment LED LCD screen.
Show/Hide the Prim specified in Input to display the digital clock.

### Inputs
* HourNum10 Prim : Specify the 10th digit Prim of hour (token)
* HourNum11 Prim : Specify the 1th digit Prim of hour (token)
* MinuteNum10 Prim : Specify the 10th digit Prim of minute (token)
* MinuteNum11 Prim : Specify the 1th digit Prim of minute (token)
* AM Prim : Specify the prim to display "AM" (token)
* PM Prim : Specify the prim to display "PM" (token)
* Hour (int)
* Minute (int)
* Second (int)
The digital clock is controlled by showing/hiding the respective parts of the virtual LCD screen.

”AM" and "PM" are one prim (mesh) each.
Hours and minutes are on a two-digit, seven-segment LED.
It consists of A, B, C, D, E, F, and G Prim(Mesh) respectively.

By showing/hiding this 7-segment LED component, a numerical value from 0-9 is represented.
The Hour, Minute, and Second inputs to the "Time Output To LCD" node are connected from the output of "Get DateTime".
Each input to the "Time Output To LCD" node uses the "Source Prim Path" of the Read Bundle.

AM, PM and 4 LED's Prim connected.
This allows the digital clock to reflect the current time.
| 3,664 | Markdown | 36.783505 | 123 | 0.704694 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "0.0.1"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["ft-lab"]
# The title and description fields are primarily for displaying extension info in UI
title = "OmniGraph : Get DateTime"
description="OmniGraph sample node.Get datetime."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example", "omnigraph"]
# Location of change log file in target (final) folder of extension, relative to the root. Can also be just a content
# of it instead of file path. More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.jpg"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["*Database.py","*/ogn*"]
# We only depend on testing framework currently:
[dependencies]
"omni.graph" = {}
"omni.graph.nodes" = {}
"omni.graph.tools" = {}
# Main python module this extension provides.
[[python.module]]
name = "ft_lab.OmniGraph.GetDateTime"
| 1,647 | TOML | 31.959999 | 118 | 0.734062 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/extension.py | import omni.ext
import importlib
import os
from .ogn import *
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class SimpleNodeExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[ft_lab.OmniGraph.GetDateTime] startup")
def on_shutdown(self):
print("[ft_lab.OmniGraph.GetDateTime] shutdown")
| 712 | Python | 40.941174 | 119 | 0.738764 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/GetDateTimeDatabase.py | import omni.graph.core as og
import omni.graph.core._omni_graph_core as _og
import omni.graph.tools.ogn as ogn
import numpy
import sys
import traceback
import carb
class GetDateTimeDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.GetDateTime
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
Outputs:
outputs.a1_year
outputs.a2_month
outputs.a3_day
outputs.b1_hour
outputs.b2_minute
outputs.b3_second
"""
# Omniverse Create 2022.3.3 (Kit.104)
#GENERATOR_VERSION = (1, 17, 2)
#TARGET_VERSION = (2, 65, 4)
# Imprint the generator and target ABI versions in the file for JIT generation
# USD Composer 2023.2.2 (Kit.105.1.2)
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('outputs:a1_year', 'int', 0, 'Year', 'output year', {ogn.MetadataKeys.DEFAULT: '2000'}, True, 0, False, ''),
('outputs:a2_month', 'int', 0, 'Month', 'output month', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:a3_day', 'int', 0, 'Day', 'output day', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:b1_hour', 'int', 0, 'Hour', 'output hour', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:b2_minute', 'int', 0, 'Minute', 'output minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('outputs:b3_second', 'int', 0, 'Second', 'output second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
])
# ----------------------------------------------------.
# Processing Output Parameter.
# ----------------------------------------------------.
class ValuesForOutputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" }
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedWriteValues = { }
@property
def a1_year(self):
value = self._batchedWriteValues.get(self._attributes.a1_year)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_year)
return data_view.get()
@a1_year.setter
def a1_year(self, value):
self._batchedWriteValues[self._attributes.a1_year] = value
@property
def a2_month(self):
value = self._batchedWriteValues.get(self._attributes.a2_month)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a2_month)
return data_view.get()
@a2_month.setter
def a2_month(self, value):
self._batchedWriteValues[self._attributes.a2_month] = value
@property
def a3_day(self):
value = self._batchedWriteValues.get(self._attributes.a3_day)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a3_day)
return data_view.get()
@a3_day.setter
def a3_day(self, value):
self._batchedWriteValues[self._attributes.a3_day] = value
@property
def b1_hour(self):
value = self._batchedWriteValues.get(self._attributes.b1_hour)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b1_hour)
return data_view.get()
@b1_hour.setter
def b1_hour(self, value):
self._batchedWriteValues[self._attributes.b1_hour] = value
@property
def b2_minute(self):
value = self._batchedWriteValues.get(self._attributes.b2_minute)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b2_minute)
return data_view.get()
@b2_minute.setter
def b2_minute(self, value):
self._batchedWriteValues[self._attributes.b2_minute] = value
@property
def b3_second(self):
value = self._batchedWriteValues.get(self._attributes.b3_second)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b3_second)
return data_view.get()
@b3_second.setter
def b3_second(self, value):
self._batchedWriteValues[self._attributes.b3_second] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _commit(self):
_og._commit_output_attributes_data(self._batchedWriteValues)
self._batchedWriteValues = { }
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = GetDateTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = GetDateTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
# ----------------------------------------------------.
# Class defining the ABI interface for the node type.
# ----------------------------------------------------.
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.GetDateTime'
@staticmethod
def compute(context, node):
def database_valid():
return True
try:
per_node_data = GetDateTimeDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = GetDateTimeDatabase(node)
per_node_data['_db'] = db
if not database_valid():
per_node_data['_db'] = None
return False
except:
db = GetDateTimeDatabase(node)
try:
compute_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
with og.in_compute():
return GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.outputs._commit()
return False
@staticmethod
def initialize(context, node):
GetDateTimeDatabase._initialize_per_node_data(node)
initialize_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
GetDateTimeDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Get DateTime")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Get current date and time")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type)
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
NODE_TYPE_CLASS = None
@staticmethod
def register(node_type_class):
GetDateTimeDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(GetDateTimeDatabase.abi, 1)
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.GetDateTime")
| 11,389 | Python | 42.473282 | 130 | 0.588375 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/OutputToLCDDatabase.py | import omni.graph.core as og
import omni.graph.core._omni_graph_core as _og
import omni.graph.tools.ogn as ogn
import numpy
import sys
import traceback
import carb
from typing import Any
class OutputToLCDDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.OutputToDatabaseDatabase
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
inputs.a1_hourNum10Prim
inputs.a2_hourNum1Prim
inputs.b1_minuteNum10Prim
inputs.b2_minuteNum1Prim
inputs.c1_amPrim
inputs.c2_pmPrim
inputs.d1_hour
inputs.d2_minute
inputs.d3_second
Outputs:
"""
# Omniverse Create 2022.3.3 (Kit.104)
#GENERATOR_VERSION = (1, 17, 2)
#TARGET_VERSION = (2, 65, 4)
# Imprint the generator and target ABI versions in the file for JIT generation
# USD Composer 2023.2.2 (Kit.105.1.2)
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('inputs:a1_hourNum10Prim', 'token', 0, 'HourNum10 Prim', 'HourNum10 Prim', {}, True, None, False, ''),
('inputs:a2_hourNum1Prim', 'token', 0, 'HourNum1 Prim', 'HourNum1 Prim', {}, True, None, False, ''),
('inputs:b1_minuteNum10Prim', 'token', 0, 'MinuteNum10 Prim', 'MinuteNum10 Prim', {}, True, None, False, ''),
('inputs:b2_minuteNum1Prim', 'token', 0, 'MinuteNum1 Prim', 'MinuteNum1 Prim', {}, True, None, False, ''),
('inputs:c1_amPrim', 'token', 0, 'AM Prim', 'AM Prim', {}, True, None, False, ''),
('inputs:c2_pmPrim', 'token', 0, 'PM Prim', 'PM Prim', {}, True, None, False, ''),
('inputs:d1_hour', 'int', 0, 'Hour', 'Hour', {}, True, 0, False, ''),
('inputs:d2_minute', 'int', 0, 'Minute', 'Minute', {}, True, 0, False, ''),
('inputs:d3_second', 'int', 0, 'Second', 'Second', {}, True, 0, False, ''),
])
# ----------------------------------------------------.
# Processing Input Parameters.
# ----------------------------------------------------.
class ValuesForInputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"}
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second]
self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0]
@property
def a1_hourNum10Prim(self):
return self._batchedReadValues[0]
@a1_hourNum10Prim.setter
def a1_hourNum10Prim(self, value):
self._batchedReadValues[0] = value
@property
def a2_hourNum1Prim(self):
return self._batchedReadValues[1]
@a2_hourNum1Prim.setter
def a2_hourNum1Prim(self, value):
self._batchedReadValues[1] = value
@property
def b1_minuteNum10Prim(self):
return self._batchedReadValues[2]
@b1_minuteNum10Prim.setter
def b1_minuteNum10Prim(self, value):
self._batchedReadValues[2] = value
@property
def b2_minuteNum1Prim(self):
return self._batchedReadValues[3]
@b2_minuteNum1Prim.setter
def b2_minuteNum1Prim(self, value):
self._batchedReadValues[3] = value
@property
def c1_amPrim(self):
return self._batchedReadValues[4]
@c1_amPrim.setter
def c1_amPrim(self, value):
self._batchedReadValues[4] = value
@property
def c2_pmPrim(self):
return self._batchedReadValues[5]
@c2_pmPrim.setter
def c2_pmPrim(self, value):
self._batchedReadValues[5] = value
@property
def d1_hour(self):
return self._batchedReadValues[6]
@d1_hour.setter
def d1_hour(self, value):
self._batchedReadValues[6] = value
@property
def d2_minute(self):
return self._batchedReadValues[7]
@d2_minute.setter
def d2_minute(self, value):
self._batchedReadValues[7] = value
@property
def d3_second(self):
return self._batchedReadValues[8]
@d3_second.setter
def d3_second(self, value):
self._batchedReadValues[8] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _prefetch(self):
readAttributes = self._batchedReadAttributes
newValues = _og._prefetch_input_attributes_data(readAttributes)
if len(readAttributes) == len(newValues):
self._batchedReadValues = newValues
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = OutputToLCDDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = OutputToLCDDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
# ----------------------------------------------------.
# Class defining the ABI interface for the node type.
# ----------------------------------------------------.
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.OutputToLCD'
@staticmethod
def compute(context, node):
def database_valid():
return True
try:
per_node_data = OutputToLCDDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = OutputToLCDDatabase(node)
per_node_data['_db'] = db
if not database_valid():
per_node_data['_db'] = None
return False
except:
db = OutputToLCDDatabase(node)
try:
compute_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
db.inputs._prefetch()
db.inputs._setting_locked = True
with og.in_compute():
return OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
#db.outputs._commit()
return False
@staticmethod
def initialize(context, node):
OutputToLCDDatabase._initialize_per_node_data(node)
initialize_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
OutputToLCDDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Time output to LCD")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Time output to LCD")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type)
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
NODE_TYPE_CLASS = None
@staticmethod
def register(node_type_class):
OutputToLCDDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(OutputToLCDDatabase.abi, 1)
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.OutputToLCD")
| 11,682 | Python | 42.431227 | 322 | 0.598185 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/RotationByTimeDatabase.py | import omni.graph.core as og
import omni.graph.core._omni_graph_core as _og
import omni.graph.tools.ogn as ogn
import numpy
import sys
import traceback
import carb
class RotationByTimeDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.RotationByTime
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
inputs.a1_defaultRotateXYZ
inputs.a2_rotationAxis
inputs.b1_hour
inputs.b2_minute
inputs.b3_second
Outputs:
outputs.a1_hourRotateXYZ
outputs.a2_minuteRotateXYZ
outputs.a3_secondRotateXYZ
"""
# Omniverse Create 2022.3.3 (Kit.104)
#GENERATOR_VERSION = (1, 17, 2)
#TARGET_VERSION = (2, 65, 4)
# Imprint the generator and target ABI versions in the file for JIT generation
# USD Composer 2023.2.2 (Kit.105.1.2)
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('inputs:a1_defaultRotateXYZ', 'float[3]', 0, 'Default RotateXYZ', 'Default rotateXYZ', {}, True, None, False, ''),
('inputs:a2_rotationAxis', 'int', 0, 'Rotation Axis', 'Rotation axis (0:X, 1:Y, 2:Z)', {}, True, None, False, ''),
('inputs:b1_hour', 'int', 0, 'Hour', 'Hour', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('inputs:b2_minute', 'int', 0, 'Minute', 'Minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('inputs:b3_second', 'int', 0, 'Second', 'Second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('outputs:a1_hourRotateXYZ', 'float[3]', 0, 'Hour RotateXYZ', 'Hour RotateXYZ', {}, True, None, False, ''),
('outputs:a2_minuteRotateXYZ', 'float[3]', 0, 'Minute RotateXYZ', 'Minute RotateXYZ', {}, True, None, False, ''),
('outputs:a3_secondRotateXYZ', 'float[3]', 0, 'Second RotateXYZ', 'Second RotateXYZ', {}, True, None, False, ''),
])
# ----------------------------------------------------.
# Processing Input Parameters.
# ----------------------------------------------------.
class ValuesForInputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"}
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second]
self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0]
@property
def a1_defaultRotateXYZ(self):
return self._batchedReadValues[0]
@a1_defaultRotateXYZ.setter
def a1_defaultRotateXYZ(self, value):
self._batchedReadValues[0] = value
@property
def a2_rotationAxis(self):
return self._batchedReadValues[1]
@a2_rotationAxis.setter
def a2_rotationAxis(self, value):
self._batchedReadValues[1] = value
@property
def b1_hour(self):
return self._batchedReadValues[2]
@b1_hour.setter
def b1_hour(self, value):
self._batchedReadValues[2] = value
@property
def b2_minute(self):
return self._batchedReadValues[3]
@b2_minute.setter
def b2_minute(self, value):
self._batchedReadValues[3] = value
@property
def b3_second(self):
return self._batchedReadValues[4]
@b3_second.setter
def b3_second(self, value):
self._batchedReadValues[4] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _prefetch(self):
readAttributes = self._batchedReadAttributes
newValues = _og._prefetch_input_attributes_data(readAttributes)
if len(readAttributes) == len(newValues):
self._batchedReadValues = newValues
# ----------------------------------------------------.
# Processing Output Parameter.
# ----------------------------------------------------.
class ValuesForOutputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" }
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedWriteValues = { }
@property
def a1_hourRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ)
return data_view.get()
@a1_hourRotateXYZ.setter
def a1_hourRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value
@property
def a2_minuteRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a2_minuteRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a2_minuteRotateXYZ)
return data_view.get()
@a2_minuteRotateXYZ.setter
def a2_minuteRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a2_minuteRotateXYZ] = value
@property
def a3_secondRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a3_secondRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a3_secondRotateXYZ)
return data_view.get()
@a3_secondRotateXYZ.setter
def a3_secondRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a3_secondRotateXYZ] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _commit(self):
_og._commit_output_attributes_data(self._batchedWriteValues)
self._batchedWriteValues = { }
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = RotationByTimeDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = RotationByTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = RotationByTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
# ----------------------------------------------------.
# Class defining the ABI interface for the node type.
# ----------------------------------------------------.
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.RotationByTime'
@staticmethod
def compute(context, node):
def database_valid():
return True
try:
per_node_data = RotationByTimeDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = RotationByTimeDatabase(node)
per_node_data['_db'] = db
if not database_valid():
per_node_data['_db'] = None
return False
except:
db = RotationByTimeDatabase(node)
try:
compute_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
db.inputs._prefetch()
db.inputs._setting_locked = True
with og.in_compute():
return RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
db.outputs._commit()
return False
@staticmethod
def initialize(context, node):
RotationByTimeDatabase._initialize_per_node_data(node)
initialize_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
RotationByTimeDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Rotation By Time")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Rotation By Time")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type)
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
NODE_TYPE_CLASS = None
@staticmethod
def register(node_type_class):
RotationByTimeDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(RotationByTimeDatabase.abi, 1)
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.RotationByTime")
| 13,782 | Python | 44.488449 | 196 | 0.600929 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/GetDateTime.py | """
Get date time.
"""
import numpy as np
import omni.ext
import datetime
class GetDateTime:
@staticmethod
def compute(db) -> bool:
try:
# Get current date and time.
now = datetime.datetime.now()
db.outputs.a1_year = now.year
db.outputs.a2_month = now.month
db.outputs.a3_day = now.day
db.outputs.b1_hour = now.hour
db.outputs.b2_minute = now.minute
db.outputs.b3_second = now.second
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
| 650 | Python | 22.249999 | 56 | 0.550769 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/OutputToLCD.py | """
Time output to LCD (hh:mm).
"""
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
import numpy as np
import omni.ext
class OutputToLCD:
@staticmethod
def compute(db) -> bool:
try:
hour = db.inputs.d1_hour
minute = db.inputs.d2_minute
second = db.inputs.d3_second
# xABCDEFG => 0b01111110 = 0x7e = '0'
nameList = ["A", "B", "C", "D", "E", "F", "G"]
numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b]
# Get stage.
stage = omni.usd.get_context().get_stage()
# Show/hide "AM"
if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c1_amPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible')
# Show/hide "PM"
if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible')
# Hour : 10th digit.
hour12 = hour if (hour < 12) else (hour - 12)
if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "":
basePrimPath = db.inputs.a1_hourNum10Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12 / 10) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Hour : 1th digit.
if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "":
basePrimPath = db.inputs.a2_hourNum1Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Minute : 10th digit.
if db.inputs.b1_minuteNum10Prim != None and db.inputs.b1_minuteNum10Prim != "":
basePrimPath = db.inputs.b1_minuteNum10Prim
shiftV = 0x40
maskV = numMaskList[(int)(minute / 10) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Minute : 1th digit.
if db.inputs.b2_minuteNum1Prim != None and db.inputs.b2_minuteNum1Prim != "":
basePrimPath = db.inputs.b2_minuteNum1Prim
shiftV = 0x40
maskV = numMaskList[(int)(minute) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
| 4,275 | Python | 42.632653 | 118 | 0.509474 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/RotationByTime.py | """
Rotation by time.
"""
import numpy as np
import omni.ext
class RotationByTime:
@staticmethod
def compute(db) -> bool:
try:
# Calculate clock rotation from seconds.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a3_secondRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b3_second) / 60.0) * 360.0
# Calculate clock rotation from minutes.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a2_minuteRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b2_minute * 60.0 + db.inputs.b3_second) / (60.0 * 60.0)) * 360.0
# Calculate clock rotation from hours.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a1_hourRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b1_hour * 60.0 + db.inputs.b2_minute) / (60.0 * 24.0)) * 360.0 * 2.0
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
| 1,705 | Python | 39.619047 | 134 | 0.559531 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/docs/CHANGELOG.md | # CHANGELOG
This document records all notable changes to ``ft_lab.OmniGraph.GetDateTime`` extension.
| 104 | Markdown | 16.499997 | 88 | 0.778846 |
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/docs/README.md | # GetDateTime [ft_lab.OmniGraph.GetDateTime]
This sample uses OmniGraph to reflect the current time on analog and digital clocks created as 3D models.
This extension consists of three custom nodes.
## Get DateTime
Get the current local date and time.
### Output
* Year (int)
* Month (int)
* Day (int)
* Hour (int)
* Minute (int)
* Second (int)
## Rotation By Time
Given an hour, minute, and second, returns the XYZ of each rotation(degree).
Used in analog clock rotation.
### Input
* Default RotationXYZ : Default rotation value (float3)
* Rotation Axis : Rotation axis (0:X, 1:Y, 2:Z)
* Hour (int)
* Minute (int)
* Second (int)
### Output
* Hour RotateXYZ : Hour rotation value (float3)
* Minute RotateXYZ : Minute rotation value (float3)
* Second RotateXYZ : Second rotation value (float3)
## Time Output To LCD
This node controls a virtual 7-segment LED LCD screen.
Show/Hide the Prim specified in Input to display the digital clock.
### Input
* HourNum10 Prim : Specify the 10th digit Prim of hour (token)
* HourNum11 Prim : Specify the 1th digit Prim of hour (token)
* MinuteNum10 Prim : Specify the 10th digit Prim of minute (token)
* MinuteNum11 Prim : Specify the 1th digit Prim of minute (token)
* AM Prim : Specify the prim to display "AM" (token)
* PM Prim : Specify the prim to display "PM" (token)
* Hour (int)
* Minute (int)
* Second (int)
| 1,405 | Markdown | 24.563636 | 110 | 0.703203 |
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_GetDateTime.md | # GetDateTime
Get the current local date and time.

## GetDateTime.json
```json
{
"GetDateTime": {
"version": 1,
"categories": "examples",
"description": "Get datetime node.",
"language": "Python",
"metadata": {
"uiName": "Get DateTime"
},
"inputs": {
},
"outputs": {
"a1_year": {
"type": "int",
"description": "year",
"default": 2000,
"metadata": {
"uiName": "Year"
}
},
"a2_month": {
"type": "int",
"description": "month",
"default": 1,
"metadata": {
"uiName": "Month"
}
},
"a3_day": {
"type": "int",
"description": "day",
"default": 1,
"metadata": {
"uiName": "Day"
}
},
"b1_hour": {
"type": "int",
"description": "hour",
"default": 1,
"metadata": {
"uiName": "Hour"
}
},
"b2_minute": {
"type": "int",
"description": "minute",
"default": 1,
"metadata": {
"uiName": "Minute"
}
},
"b3_second": {
"type": "int",
"description": "second",
"default": 1,
"metadata": {
"uiName": "Second"
}
}
}
}
}
```

No inputs is provided, as it only outputs the current time.
Outputs date and time in int type.
### Outputs
|Attribute name|Type|UI name|Description|
|---|---|---|---|
|a1_year|int|Year|year|
|a2_month|int|Month|month|
|a3_day|int|Day|day|
|b1_hour|int|Hour|hour|
|b2_minute|int|Minute|minute|
|b3_second|int|Second|second|
The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph.
This is done to prevent the node inputs/outputs from being sorted in ascending order as ASCII code strings when displaying the inputs/outputs of the node in the UI.
The order is ascending by attribute name, and the display name is the UI name.
## GetDateTime.py
”GetDateTime.py" specifies what the node actually does.
```python
import numpy as np
import omni.ext
import datetime
class GetDateTime:
@staticmethod
def compute(db) -> bool:
try:
# Get current date and time.
now = datetime.datetime.now()
db.outputs.a1_year = now.year
db.outputs.a2_month = now.month
db.outputs.a3_day = now.day
db.outputs.b1_hour = now.hour
db.outputs.b2_minute = now.minute
db.outputs.b3_second = now.second
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
```
Get the date and time and store them in the outputs.
Data is set to "db.outputs.[Attribute name]".
## GetDateTimeDatabase.py
The registration process as an Extension of the OmniGraph node is performed.
Since this code is almost canned, it is expected that once it is created, it will be reused.
In the case of "GetDateTimeDatabase.py", enter the class "GetDateTimeDatabase(og.Database)".
```python
import omni.graph.core as og
import omni.graph.core._omni_graph_core as _og
import omni.graph.tools.ogn as ogn
import numpy
import sys
import traceback
import carb
class GetDateTimeDatabase(og.Database):
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('outputs:a1_year', 'int', 0, 'Year', 'output year', {ogn.MetadataKeys.DEFAULT: '2000'}, True, 0, False, ''),
('outputs:a2_month', 'int', 0, 'Month', 'output month', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:a3_day', 'int', 0, 'Day', 'output day', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:b1_hour', 'int', 0, 'Hour', 'output hour', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''),
('outputs:b2_minute', 'int', 0, 'Minute', 'output minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('outputs:b3_second', 'int', 0, 'Second', 'output second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
])
```
"INTERFACE" enumerates attribute data.
The input and output data, in turn, will include the following.
* Attribute name
* Type (To allow more than one, separate them with a comma)
* Index of type ? Specify 0 for a single Type or 1 for multiple Types.
* Display name in UI
* Description
* Meta data
* Necessary or not (True, False)
* Default value
* Deprecated (True, False)
* Message when deprecated
Attribute name and type must match those specified in the ogn file.
In the case of the OmniGraph node provided by Extension, it seemed to refer to this description rather than the ogn file.
### ValuesForOutputs
The outputs designation is described in the "ValuesForOutputs" class.
```python
class ValuesForOutputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" }
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedWriteValues = { }
@property
def a1_year(self):
value = self._batchedWriteValues.get(self._attributes.a1_year)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_year)
return data_view.get()
@a1_year.setter
def a1_year(self, value):
self._batchedWriteValues[self._attributes.a1_year] = value
@property
def a2_month(self):
value = self._batchedWriteValues.get(self._attributes.a2_month)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a2_month)
return data_view.get()
@a2_month.setter
def a2_month(self, value):
self._batchedWriteValues[self._attributes.a2_month] = value
@property
def a3_day(self):
value = self._batchedWriteValues.get(self._attributes.a3_day)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a3_day)
return data_view.get()
@a3_day.setter
def a3_day(self, value):
self._batchedWriteValues[self._attributes.a3_day] = value
@property
def b1_hour(self):
value = self._batchedWriteValues.get(self._attributes.b1_hour)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b1_hour)
return data_view.get()
@b1_hour.setter
def b1_hour(self, value):
self._batchedWriteValues[self._attributes.b1_hour] = value
@property
def b2_minute(self):
value = self._batchedWriteValues.get(self._attributes.b2_minute)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b2_minute)
return data_view.get()
@b2_minute.setter
def b2_minute(self, value):
self._batchedWriteValues[self._attributes.b2_minute] = value
@property
def b3_second(self):
value = self._batchedWriteValues.get(self._attributes.b3_second)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.b3_second)
return data_view.get()
@b3_second.setter
def b3_second(self, value):
self._batchedWriteValues[self._attributes.b3_second] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _commit(self):
_og._commit_output_attributes_data(self._batchedWriteValues)
self._batchedWriteValues = { }
```
Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES".
```python
LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" }
```
Specify getter/setter for each attribute.
If the attribute type is fixed, simply change the attribute name.
```python
@property
def a1_year(self):
value = self._batchedWriteValues.get(self._attributes.a1_year)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_year)
return data_view.get()
@a1_year.setter
def a1_year(self, value):
self._batchedWriteValues[self._attributes.a1_year] = value
```
"\_\_getattr\_\_", "\_\_setattr\_\_", and "\_commit" can be copied and pasted as is.
### ValuesForState(og.DynamicAttributeAccess)
The ValuesForState class "GetDateTimeDatabase" can be used by simply specifying the target class name and copying and pasting.
```python
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
```
### \_\_init\_\_
In "\_\_init\_\_", outputs and state classes are created.
```python
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = GetDateTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = GetDateTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
```
There are no inputs in this GetDateTimeDatabase class, so that is not mentioned.
### class abi
Define the connections for the OmniGraph node.
Think of ABI as a regular flow.
Basically, the designation to the ABI interface is a canned statement.
```python
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.GetDateTime'
```
Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "GetDateTime" is in it, "ft_lab.OmniGraph.GetDateTime.GetDateTime" is specified as the return value.
The compute method is called when this node is executed.
This also specifies an almost canned statement.
```python
@staticmethod
def compute(context, node):
try:
per_node_data = GetDateTimeDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = GetDateTimeDatabase(node)
per_node_data['_db'] = db
except:
db = GetDateTimeDatabase(node)
try:
compute_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
with og.in_compute():
return GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.outputs._commit()
return False
```
The compute method of GetDateTime.py is called from "GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db)".
initialize, release, and update_node_version are listed as they are, just matching the class names.
This is also a canned statement.
```python
@staticmethod
def initialize(context, node):
GetDateTimeDatabase._initialize_per_node_data(node)
initialize_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
GetDateTimeDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
```
The initialize_type method specifies information about the OmniGraph node.
```python
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Get DateTime")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Get current date and time")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/icon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type)
```
The information is set as metadata by using "node_type.set_metadata".
|Key name|Description|Value|
|---|---|---|
|ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime|
|ogn.MetadataKeys.UI_NAME|UI name of node|Get DateTime|
|ogn.MetadataKeys.CATEGORIES|Categories name|examples|
|ogn.MetadataKeys.DESCRIPTION|Node description|Get current date and time|
|ogn.MetadataKeys.LANGUAGE|language used|Python|
|ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg|
See below for available category names.
https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html
The icon path is obtained from the Extension path as follows, and then "/data/icons/icon.svg" is connected.
```python
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
```
Finally, register the "node_type" to which the metadata is assigned.
```python
GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type)
```
The on_connection_type_resolve method is a canned statement.
```python
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
```
### Specify version
After describing the abi class, add the following line as is.
USD Composer 2023.2.2 (Kit.105.1.2).
```python
NODE_TYPE_CLASS = None
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
```
This seemed to need to be updated when the Kit version was upgraded.
Otherwise, problems occurred, such as icons not being displayed.
### register method
The register method is a canned statement.
```python
@staticmethod
def register(node_type_class):
GetDateTimeDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(GetDateTimeDatabase.abi, 1)
```
### deregister method
The deregister method specifies "[Extension name].[class name of this node]".
```python
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.GetDateTime")
```
| 18,152 | Markdown | 34.734252 | 177 | 0.613431 |
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_RotationByTime.md | # RotationByTime
Given an hour, minute, and second, returns the XYZ of each rotation(degree).

## RotationByTime.json
```json
{
"RotationByTime": {
"version": 1,
"categories": "examples",
"description": "Rotation mechanism by time.",
"language": "Python",
"metadata": {
"uiName": "Rotation By Time"
},
"inputs": {
"a1_defaultRotateXYZ": {
"type": "float[3]",
"description": "Default rotateXYZ",
"default": [0.0, 0.0, 0.0],
"metadata": {
"uiName": "Default rotateXYZ"
}
},
"a2_rotationAxis": {
"type": "int",
"description": "Rotation axis (0:X, 0:Y, 0:Z)",
"default": 0,
"metadata": {
"uiName": "Rotation axis"
}
},
"b1_hour": {
"type": "int",
"description": "Hour",
"default": 0,
"metadata": {
"uiName": "Hour"
}
},
"b2_minute": {
"type": "int",
"description": "Minute",
"default": 0,
"metadata": {
"uiName": "Minute"
}
},
"b3_second": {
"type": "int",
"description": "Second",
"default": 0,
"metadata": {
"uiName": "Second"
}
}
},
"outputs": {
"a1_hourRotateXYZ": {
"type": "float[3]",
"description": "Hour rotateXYZ",
"default": [0.0, 0.0, 0.0],
"metadata": {
"uiName": "Hour RotateXYZ"
}
},
"a2_minuteRotateXYZ": {
"type": "float[3]",
"description": "Minute rotateXYZ",
"default": [0.0, 0.0, 0.0],
"metadata": {
"uiName": "Minute RotateXYZ"
}
},
"a3_secondRotateXYZ": {
"type": "float[3]",
"description": "Second rotateXYZ",
"default": [0.0, 0.0, 0.0],
"metadata": {
"uiName": "Second RotateXYZ"
}
}
}
}
}
```

### Inputs
|Attribute name|Type|UI name|Description|
|---|---|---|---|
|a1_defaultRotateXYZ|float3|Default rotateXYZ|Default rotateXYZ|
|a2_rotationAxis|int|Rotation axis|Rotation axis (0:X, 1:Y, 2:Z)|
|b1_hour|int|Hour|Hour|
|b2_minute|int|Minute|Minute|
|b3_second|int|Second|Second|
The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph.
"a1_defaultRotateXYZ" is the initial rotation value of the clock hands provided in the 3D model.

"a2_rotationAxis" is the axis of rotation (0:X, 1:Y, 2:Z).
In the case of the image above, it rotates around the Y axis. In this case, specify 1.
b1_hour, b2_minute, and b3_second are entered as hours, minutes, and seconds.
### Outputs
|Attribute name|Type|UI name|Description|
|---|---|---|---|
|a1_hourRotateXYZ|float3|Hour rotateXYZ|Hour rotateXYZ|
|a2_minuteRotateXYZ|float3|Minute rotateXYZ|Minute rotateXYZ|
|a3_secondRotateXYZ|float3|Second rotateXYZ|Second rotateXYZ|
Returns the rotational value of an analog clock corresponding to the input hour, minute, and second.
The XYZ of the rotation returned here is assigned to the rotation of the clock hands in the 3D model.
## RotationByTime.py
The rotation of the hands of a clock is calculated.
```python
import numpy as np
import omni.ext
class RotationByTime:
@staticmethod
def compute(db) -> bool:
try:
# Calculate clock rotation from seconds.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a3_secondRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b3_second) / 60.0) * 360.0
# Calculate clock rotation from minutes.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a2_minuteRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b2_minute * 60.0 + db.inputs.b3_second) / (60.0 * 60.0)) * 360.0
# Calculate clock rotation from hours.
if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2:
v = db.outputs.a1_hourRotateXYZ
v[0] = db.inputs.a1_defaultRotateXYZ[0]
v[1] = db.inputs.a1_defaultRotateXYZ[1]
v[2] = db.inputs.a1_defaultRotateXYZ[2]
v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b1_hour * 60.0 + db.inputs.b2_minute) / (60.0 * 24.0)) * 360.0 * 2.0
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
```
## RotationByTimeDatabase.py
For the most part, the process is the same as for "[GetDateTimeDatabase.py](./node_GetDateTime.md)".
"INTERFACE" enumerates attribute data.
```python
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('inputs:a1_defaultRotateXYZ', 'float[3]', 0, 'Default RotateXYZ', 'Default rotateXYZ', {}, True, None, False, ''),
('inputs:a2_rotationAxis', 'int', 0, 'Rotation Axis', 'Rotation axis (0:X, 1:Y, 2:Z)', {}, True, None, False, ''),
('inputs:b1_hour', 'int', 0, 'Hour', 'Hour', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('inputs:b2_minute', 'int', 0, 'Minute', 'Minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('inputs:b3_second', 'int', 0, 'Second', 'Second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''),
('outputs:a1_hourRotateXYZ', 'float[3]', 0, 'Hour RotateXYZ', 'Hour RotateXYZ', {}, True, None, False, ''),
('outputs:a2_minuteRotateXYZ', 'float[3]', 0, 'Minute RotateXYZ', 'Minute RotateXYZ', {}, True, None, False, ''),
('outputs:a3_secondRotateXYZ', 'float[3]', 0, 'Second RotateXYZ', 'Second RotateXYZ', {}, True, None, False, ''),
])
```
”RotationByTimeDatabase.py" specifies both inputs and outputs.
Note that the attribute type specified as "float3" in the ogn file becomes "float[3]".
### ValuesForInputs
The inputs designation is described in the "ValuesForInputs" class.
```python
class ValuesForInputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"}
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second]
self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0]
@property
def a1_defaultRotateXYZ(self):
return self._batchedReadValues[0]
@a1_defaultRotateXYZ.setter
def a1_defaultRotateXYZ(self, value):
self._batchedReadValues[0] = value
@property
def a2_rotationAxis(self):
return self._batchedReadValues[1]
@a2_rotationAxis.setter
def a2_rotationAxis(self, value):
self._batchedReadValues[1] = value
@property
def b1_hour(self):
return self._batchedReadValues[2]
@b1_hour.setter
def b1_hour(self, value):
self._batchedReadValues[2] = value
@property
def b2_minute(self):
return self._batchedReadValues[3]
@b2_minute.setter
def b2_minute(self, value):
self._batchedReadValues[3] = value
@property
def b3_second(self):
return self._batchedReadValues[4]
@b3_second.setter
def b3_second(self, value):
self._batchedReadValues[4] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _prefetch(self):
readAttributes = self._batchedReadAttributes
newValues = _og._prefetch_input_attributes_data(readAttributes)
if len(readAttributes) == len(newValues):
self._batchedReadValues = newValues
```
Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES".
```python
LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"}
```
In "\_\_init\_\_", specify "self._attributes.[Attribute name]" as an array.
```python
self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second]
```
Also, put initial values in self._batchedReadValues.
```python
self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0]
```
"a1_defaultRotateXYZ" is a float[3] value, all other values are of type int.
The property getter/setter is specified as follows.
If the attribute type is fixed, simply change the attribute name.
```python
@property
def a1_defaultRotateXYZ(self):
return self._batchedReadValues[0]
@a1_defaultRotateXYZ.setter
def a1_defaultRotateXYZ(self, value):
self._batchedReadValues[0] = value
```
The index of "self.\_batchedReadValues" is a number starting from 0 specified in "self.\_batchedReadAttributes[]".
"\_\_getattr\_\_", "\_\_setattr\_\_", and "\_prefetch" can be copied and pasted as is.
### ValuesForOutputs
The outputs designation is described in the "ValuesForOutputs" class.
```python
class ValuesForOutputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" }
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedWriteValues = { }
@property
def a1_hourRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ)
return data_view.get()
@a1_hourRotateXYZ.setter
def a1_hourRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value
@property
def a2_minuteRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a2_minuteRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a2_minuteRotateXYZ)
return data_view.get()
@a2_minuteRotateXYZ.setter
def a2_minuteRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a2_minuteRotateXYZ] = value
@property
def a3_secondRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a3_secondRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a3_secondRotateXYZ)
return data_view.get()
@a3_secondRotateXYZ.setter
def a3_secondRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a3_secondRotateXYZ] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _commit(self):
_og._commit_output_attributes_data(self._batchedWriteValues)
self._batchedWriteValues = { }
```
Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES".
```python
LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" }
```
Specify getter/setter for each attribute.
If the attribute type is fixed, simply change the attribute name.
```python
@property
def a1_hourRotateXYZ(self):
value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ)
if value:
return value
else:
data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ)
return data_view.get()
@a1_hourRotateXYZ.setter
def a1_hourRotateXYZ(self, value):
self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value
```
"\_\_getattr\_\_", "\_\_setattr\_\_", and "\_commit" can be copied and pasted as is.
### ValuesForState(og.DynamicAttributeAccess)
The ValuesForState class "RotationByTimeDatabase" can be used by simply specifying the target class name and copying and pasting.
```python
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
```
### \_\_init\_\_
In "\_\_init\_\_", inputs, outputs and state classes are created.
```python
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = RotationByTimeDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = RotationByTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = RotationByTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
```
### class abi
Define the connections for the OmniGraph node.
Think of ABI as a regular flow.
Basically, the designation to the ABI interface is a canned statement.
```python
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.RotationByTime'
```
Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "RotationByTime" is in it, "ft_lab.OmniGraph.GetDateTime.RotationByTime" is specified as the return value.
The compute method is called when this node is executed.
This also specifies an almost canned statement.
```python
@staticmethod
def compute(context, node):
try:
per_node_data = RotationByTimeDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = RotationByTimeDatabase(node)
per_node_data['_db'] = db
except:
db = RotationByTimeDatabase(node)
try:
compute_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
db.inputs._prefetch()
db.inputs._setting_locked = True
with og.in_compute():
return RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
db.outputs._commit()
return False
```
The compute method of RotationByTime.py is called from "RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db)".
initialize, release, and update_node_version are listed as they are, just matching the class names.
This is also a canned statement.
```python
@staticmethod
def initialize(context, node):
RotationByTimeDatabase._initialize_per_node_data(node)
initialize_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
RotationByTimeDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
```
The initialize_type method specifies information about the OmniGraph node.
```python
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Rotation By Time")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Rotation By Time")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/rotationByTimeIcon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type)
```
The information is set as metadata by using "node_type.set_metadata".
|Key name|Description|Value|
|---|---|---|
|ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime|
|ogn.MetadataKeys.UI_NAME|UI name of node|Rotation By Time|
|ogn.MetadataKeys.CATEGORIES|Categories name|examples|
|ogn.MetadataKeys.DESCRIPTION|Node description|Rotation By Time|
|ogn.MetadataKeys.LANGUAGE|language used|Python|
|ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg|
See below for available category names.
https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html
The icon path is obtained from the Extension path as follows, and then "/data/icons/rotationByTimeIcon.svg" is connected.
```python
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
```
Finally, register the "node_type" to which the metadata is assigned.
```python
RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type)
```
The on_connection_type_resolve method is a canned statement.
```python
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
```
### Specify version
After describing the abi class, add the following line as is.
USD Composer 2023.2.2 (Kit.105.1.2).
```python
NODE_TYPE_CLASS = None
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
```
This seemed to need to be updated when the Kit version was upgraded.
Otherwise, problems occurred, such as icons not being displayed.
### register method
The register method is a canned statement.
```python
@staticmethod
def register(node_type_class):
RotationByTimeDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(RotationByTimeDatabase.abi, 1)
```
### deregister method
The deregister method specifies "[Extension name].[class name of this node]".
```python
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.RotationByTime")
```
| 23,193 | Markdown | 37.785953 | 196 | 0.617341 |
ft-lab/Omniverse_OmniGraph_ClockSample/docs/Modeling3D.md | # 3D models
I modeled 3D models of analog and digital clocks in Blender.
I used the Blender 3.6 alpha USD branch which can be launched from the Omniverse Launcher.
This is because I want to export the correct USD from Blender.
I exported the modeled shapes in Blender in fbx format and textured them in Substance 3D Painter.
I also imported Blender exported USD files into Omniverse Create to edit the hierarchy and reassign materials.
## Clock

Analog clocks use hour, minute, and second hands.
To organize this part of the process, I imported it once into Omniverse Create and organized it.

The final usd file is placed at "[usds/Clock](../usds/Clock)".
Check which Prim the hour, minute, and second hands are.
## Digital Clock

For digital clocks, note the AM/PM/7-segment LED on the LCD.
This is used by showing/hiding each of them.
AM/PM gives the quadrangle mesh a texture with Opacity as the material.
To organize this, I imported it into Omniverse Create and edited it.

"SevenSegmentLED1", "SevenSegmentLED2", "SevenSegmentLED3", "SevenSegmentLED4", and a mesh of parts A through G as children.
The Mesh of the letters on this LCD was placed with a slight float in the normal direction.
The final usd file is placed at "[usds/ClockDigital](../usds/ClockDigital)".
| 1,643 | Markdown | 42.263157 | 129 | 0.725502 |
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_OutputToLCD.md | # OutputToLCD
This node controls a virtual 7-segment LED LCD screen.

## OutputToLCD.ogn
```json
{
"OutputToLCD": {
"version": 1,
"categories": "examples",
"description": "Time output to LCD (hh:mm).",
"language": "Python",
"metadata": {
"uiName": "Time output to LCD (hh:mm)"
},
"inputs": {
"a1_hourNum10Prim": {
"type": "token",
"description": "Tenth digit of the hour Prim",
"metadata": {
"uiName": "HourNum10 Prim"
}
},
"a2_hourNum1Prim": {
"type": "token",
"description": "First digit of the hour Prim",
"metadata": {
"uiName": "HourNum1 Prim"
}
},
"b1_minuteNum10Prim": {
"type": "token",
"description": "Tenth digit of the minute Prim",
"metadata": {
"uiName": "MinuteNum10 Prim"
}
},
"b2_minuteNum1Prim": {
"type": "token",
"description": "First digit of the minute Prim",
"metadata": {
"uiName": "MinuteNum1 Prim"
}
},
"c1_amPrim": {
"type": "token",
"description": "AM Prim",
"metadata": {
"uiName": "AM Prim"
}
},
"c2_pmPrim": {
"type": "token",
"description": "PM Prim",
"metadata": {
"uiName": "PM Prim"
}
},
"d1_hour": {
"type": "int",
"description": "Hour",
"default": 0,
"metadata": {
"uiName": "Hour"
}
},
"d2_minute": {
"type": "int",
"description": "Minute",
"default": 0,
"metadata": {
"uiName": "Minute"
}
},
"d3_second": {
"type": "int",
"description": "Second",
"default": 0,
"metadata": {
"uiName": "Second"
}
}
},
"outputs": {
}
}
}
```

### Inputs
|Attribute name|Type|UI name|Description|
|---|---|---|---|
|a1_hourNum10Prim|token|HourNum10 Prim|Tenth digit of the hour Prim|
|a2_hourNum1Prim|token|HourNum1 Prim|First digit of the hour Prim|
|b1_minuteNum10Prim|token|MinuteNum10 Prim|Tenth digit of the minute Prim|
|b2_minuteNum1Prim|token|MinuteNum1 Prim|First digit of the minute Prim|
|c1_amPrim|token|AM Prim|AM Prim|
|c2_pmPrim|token|PM Prim|PM Prim|
|d1_hour|int|Hour|Hour|
|d2_minute|int|Minute|Minute|
|d3_second|int|Second|Second|
The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph.
Those that specify a "token" type will be connected to the Prim path.
In total, 6 Prims will be connected to this node.

Four prims that imitate "7-segment LEDs" are placed as numerical components.
One of the "7-segment LEDs" consists of four components, A, B, C, D, E, F, and G, as shown below.

The same A, B, C, D, E, F, and G are given for the child Prim names.
This is turned On/Off to indicate the numerical value.
The numbers were expressed in 8 bits as follows.
The lower 7 bits are assigned to ABCDEFG respectively.
|Image|Bit value|Hexadecimal|
|---|---|---|
|<img src="./images/num_0.jpg" height=40 />|01111110|0x7e|
|<img src="./images/num_1.jpg" height=40 />|00110000|0x30|
|<img src="./images/num_2.jpg" height=40 />|01101101|0x6d|
|<img src="./images/num_3.jpg" height=40 />|01111001|0x79|
|<img src="./images/num_4.jpg" height=40 />|00110011|0x33|
|<img src="./images/num_5.jpg" height=40 />|01011011|0x5b|
|<img src="./images/num_6.jpg" height=40 />|01011111|0x5f|
|<img src="./images/num_7.jpg" height=40 />|01110000|0x70|
|<img src="./images/num_8.jpg" height=40 />|01111111|0x7f|
|<img src="./images/num_9.jpg" height=40 />|01111011|0x7b|
d1_hour, d2_minute, and d3_second are entered as hours, minutes, and seconds.
## OutputToLCD.py
Controls the display/non-display of the AM and PM panels and the 2-digit 7-segment LED for the hour and minute, respectively.
```python
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
import numpy as np
import omni.ext
class OutputToLCD:
@staticmethod
def compute(db) -> bool:
try:
hour = db.inputs.d1_hour
minute = db.inputs.d2_minute
second = db.inputs.d3_second
# xABCDEFG => 0b01111110 = 0x7e = '0'
nameList = ["A", "B", "C", "D", "E", "F", "G"]
numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b]
# Get stage.
stage = omni.usd.get_context().get_stage()
# Show/hide "AM"
if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c1_amPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible')
# Show/hide "PM"
if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible')
# Hour : 10th digit.
hour12 = hour if (hour < 12) else (hour - 12)
if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "":
basePrimPath = db.inputs.a1_hourNum10Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12 / 10) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Hour : 1th digit.
if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "":
basePrimPath = db.inputs.a2_hourNum1Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Minute : 10th digit.
if db.inputs.b1_minuteNum10Prim != None and db.inputs.b1_minuteNum10Prim != "":
basePrimPath = db.inputs.b1_minuteNum10Prim
shiftV = 0x40
maskV = numMaskList[(int)(minute / 10) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Minute : 1th digit.
if db.inputs.b2_minuteNum1Prim != None and db.inputs.b2_minuteNum1Prim != "":
basePrimPath = db.inputs.b2_minuteNum1Prim
shiftV = 0x40
maskV = numMaskList[(int)(minute) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
except TypeError as error:
db.log_error(f"Processing failed : {error}")
return False
return True
```
The following retrieves hours, minutes, and seconds.
```python
hour = db.inputs.d1_hour
minute = db.inputs.d2_minute
second = db.inputs.d3_second
```
### AM/PM
The Prim path specified as "token" in the cogn file is received as a string.
I did the following to show/hide the Prim path in the AM.
The Prim path is in "db.inputs.c1_amPrim".
Use "db.inputs.c2_pmPrim" for the PM prim path.
```python
# Get stage.
stage = omni.usd.get_context().get_stage()
# Show/hide "AM"
if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c1_amPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible')
# Show/hide "PM"
if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "":
prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible')
```
"stage.GetPrimAtPath" is used to obtain Prim.
"prim.IsValid()" is True, the prim exists.
For AM, the time is before 12, so it will be displayed then.
In Visibility, specify "inherited" to show or "invisible" to hide.
PM is the reverse of AM.
### Display 2-digit numbers
Hour(db.inputs.d1_hour) will be entered as a number from 0-23.
nameList is an array of letters from 'A' to 'G'.
The numMaskList contains an array of bits to show/hide for seven of them.
This will display 0-9.
```python
nameList = ["A", "B", "C", "D", "E", "F", "G"]
numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b]
```
Divide the hour into 10 and 1 digits and give a show/hide for each of 'A' through 'G' in the target Prim.
```python
# Hour : 10th digit.
hour12 = hour if (hour < 12) else (hour - 12)
if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "":
basePrimPath = db.inputs.a1_hourNum10Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12 / 10) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
# Hour : 1th digit.
if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "":
basePrimPath = db.inputs.a2_hourNum1Prim
shiftV = 0x40
maskV = numMaskList[(int)(hour12) % 10]
for i in range(7):
primPath = f"{basePrimPath}/{nameList[i]}"
prim = stage.GetPrimAtPath(primPath)
if prim.IsValid():
primImageable = UsdGeom.Imageable(prim)
primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible')
shiftV >>= 1
```
The same process is applied to the minute.
## OutputToLCDDatabase.py
For the most part, the process is the same as for "[GetDateTimeDatabase.py](./node_GetDateTime.md)".
"INTERFACE" enumerates attribute data.
```python
PER_NODE_DATA = {}
INTERFACE = og.Database._get_interface([
('inputs:a1_hourNum10Prim', 'token', 0, 'HourNum10 Prim', 'HourNum10 Prim', {}, True, None, False, ''),
('inputs:a2_hourNum1Prim', 'token', 0, 'HourNum1 Prim', 'HourNum1 Prim', {}, True, None, False, ''),
('inputs:b1_minuteNum10Prim', 'token', 0, 'MinuteNum10 Prim', 'MinuteNum10 Prim', {}, True, None, False, ''),
('inputs:b2_minuteNum1Prim', 'token', 0, 'MinuteNum1 Prim', 'MinuteNum1 Prim', {}, True, None, False, ''),
('inputs:c1_amPrim', 'token', 0, 'AM Prim', 'AM Prim', {}, True, None, False, ''),
('inputs:c2_pmPrim', 'token', 0, 'PM Prim', 'PM Prim', {}, True, None, False, ''),
('inputs:d1_hour', 'int', 0, 'Hour', 'Hour', {}, True, 0, False, ''),
('inputs:d2_minute', 'int', 0, 'Minute', 'Minute', {}, True, 0, False, ''),
('inputs:d3_second', 'int', 0, 'Second', 'Second', {}, True, 0, False, ''),
])
```
'inputs:a1_hourNum10Prim', 'inputs:a2_hourNum1Prim', 'inputs:b1_minuteNum10Prim', 'inputs:b2_minuteNum1Prim', 'inputs:c1_amPrim', ' inputs:c2_pmPrim' accepts the Prim path, so the type is token.
### ValuesForInputs
The inputs designation is described in the "ValuesForInputs" class.
```python
class ValuesForInputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"}
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second]
self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0]
@property
def a1_hourNum10Prim(self):
return self._batchedReadValues[0]
@a1_hourNum10Prim.setter
def a1_hourNum10Prim(self, value):
self._batchedReadValues[0] = value
@property
def a2_hourNum1Prim(self):
return self._batchedReadValues[1]
@a2_hourNum1Prim.setter
def a2_hourNum1Prim(self, value):
self._batchedReadValues[1] = value
@property
def b1_minuteNum10Prim(self):
return self._batchedReadValues[2]
@b1_minuteNum10Prim.setter
def b1_minuteNum10Prim(self, value):
self._batchedReadValues[2] = value
@property
def b2_minuteNum1Prim(self):
return self._batchedReadValues[3]
@b2_minuteNum1Prim.setter
def b2_minuteNum1Prim(self, value):
self._batchedReadValues[3] = value
@property
def c1_amPrim(self):
return self._batchedReadValues[4]
@c1_amPrim.setter
def c1_amPrim(self, value):
self._batchedReadValues[4] = value
@property
def c2_pmPrim(self):
return self._batchedReadValues[5]
@c2_pmPrim.setter
def c2_pmPrim(self, value):
self._batchedReadValues[5] = value
@property
def d1_hour(self):
return self._batchedReadValues[6]
@d1_hour.setter
def d1_hour(self, value):
self._batchedReadValues[6] = value
@property
def d2_minute(self):
return self._batchedReadValues[7]
@d2_minute.setter
def d2_minute(self, value):
self._batchedReadValues[7] = value
@property
def d3_second(self):
return self._batchedReadValues[8]
@d3_second.setter
def d3_second(self, value):
self._batchedReadValues[8] = value
def __getattr__(self, item: str):
if item in self.LOCAL_PROPERTY_NAMES:
return object.__getattribute__(self, item)
else:
return super().__getattr__(item)
def __setattr__(self, item: str, new_value):
if item in self.LOCAL_PROPERTY_NAMES:
object.__setattr__(self, item, new_value)
else:
super().__setattr__(item, new_value)
def _prefetch(self):
readAttributes = self._batchedReadAttributes
newValues = _og._prefetch_input_attributes_data(readAttributes)
if len(readAttributes) == len(newValues):
self._batchedReadValues = newValues
```
Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES".
```python
LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"}
```
In "\_\_init\_\_", specify "self._attributes.[Attribute name]" as an array.
```python
self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second]
```
Also, put initial values in self._batchedReadValues.
```python
self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0]
```
Specify "" for token. All other values are of type int.
The property getter/setter is specified as follows.
If the attribute type is fixed, simply change the attribute name.
```python
@property
def a1_hourNum10Prim(self):
return self._batchedReadValues[0]
@a1_hourNum10Prim.setter
def a1_hourNum10Prim(self, value):
self._batchedReadValues[0] = value
```
The index of "self.\_batchedReadValues" is a number starting from 0 specified in "self.\_batchedReadAttributes[]".
"\_\_getattr\_\_", "\_\_setattr\_\_", and "\_prefetch" can be copied and pasted as is.
### ValuesForState(og.DynamicAttributeAccess)
The ValuesForState class "OutputToLCDDatabase" can be used by simply specifying the target class name and copying and pasting.
```python
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
```
### \_\_init\_\_
In "\_\_init\_\_", inputs, outputs and state classes are created.
```python
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = OutputToLCDDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = OutputToLCDDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
```
There are no outputs in this OutputToLCDDatabase class, so that is not mentioned.
### class abi
Define the connections for the OmniGraph node.
Think of ABI as a regular flow.
Basically, the designation to the ABI interface is a canned statement.
```python
class abi:
@staticmethod
def get_node_type():
get_node_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'ft_lab.OmniGraph.GetDateTime.OutputToLCD'
```
Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "OutputToLCD" is in it, "ft_lab.OmniGraph.GetDateTime.OutputToLCD" is specified as the return value.
The compute method is called when this node is executed.
This also specifies an almost canned statement.
```python
@staticmethod
def compute(context, node):
try:
per_node_data = OutputToLCDDatabase.PER_NODE_DATA[node.node_id()]
db = per_node_data.get('_db')
if db is None:
db = OutputToLCDDatabase(node)
per_node_data['_db'] = db
except:
db = OutputToLCDDatabase(node)
try:
compute_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
db.inputs._prefetch()
db.inputs._setting_locked = True
with og.in_compute():
return OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
#db.outputs._commit()
return False
```
The compute method of OutputToLCD.py is called from "OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db)".
initialize, release, and update_node_version are listed as they are, just matching the class names.
This is also a canned statement.
```python
@staticmethod
def initialize(context, node):
OutputToLCDDatabase._initialize_per_node_data(node)
initialize_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
OutputToLCDDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
```
The initialize_type method specifies information about the OmniGraph node.
```python
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime")
node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Time output to LCD")
node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples")
node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Time output to LCD")
node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python")
# Set Icon(svg).
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/outputToLCD.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type)
```
The information is set as metadata by using "node_type.set_metadata".
|Key name|Description|Value|
|---|---|---|
|ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime|
|ogn.MetadataKeys.UI_NAME|UI name of node|Time output to LCD|
|ogn.MetadataKeys.CATEGORIES|Categories name|examples|
|ogn.MetadataKeys.DESCRIPTION|Node description|Time output to LCD|
|ogn.MetadataKeys.LANGUAGE|language used|Python|
|ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg|
See below for available category names.
https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html
The icon path is obtained from the Extension path as follows, and then "/data/icons/outputToLCD.svg" is connected.
```python
icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}")
icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg"
node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)
```
Finally, register the "node_type" to which the metadata is assigned.
```python
OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type)
```
The on_connection_type_resolve method is a canned statement.
```python
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
```
### Specify version
After describing the abi class, add the following line as is.
USD Composer 2023.2.2 (Kit.105.1.2).
```python
NODE_TYPE_CLASS = None
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
```
This seemed to need to be updated when the Kit version was upgraded.
Otherwise, problems occurred, such as icons not being displayed.
### register method
The register method is a canned statement.
```python
@staticmethod
def register(node_type_class):
OutputToLCDDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(OutputToLCDDatabase.abi, 1)
```
### deregister method
The deregister method specifies "[Extension name].[class name of this node]".
```python
@staticmethod
def deregister():
og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.OutputToLCD")
```
| 26,995 | Markdown | 38.181422 | 322 | 0.601926 |
ft-lab/Omniverse_OmniGraph_ClockSample/docs/ExtensionStructure.md | # Extension Structure
The extension has the following structure.
Extension name is "ft_lab.OmniGraph.GetDateTime".
```
[ft_lab.OmniGraph.GetDateTime]
[config]
extension.toml
[data]
[icons]
icon.svg
outputToLCD.svg
rotationByTimeIcon.svg
icon.png
preview.jpg
[docs]
CHANGELOG.md
index.rst
README.md
[ft_lab]
[OmniGraph]
[GetDateTime]
[nodes]
GetDateTime.ogn
GetDateTime.py
OutputToLCD.ogn
OutputToLCD.py
RotationByTime.ogn
RotationByTime.py
[ogn]
__init__.py
GetDateTimeDatabase.py
OutputToLCDDatabase.py
RotationByTimeDatabase.py
__init__.py
extension.py
```
The Extension configuration file is "extension.toml".
This section describes only the information on creating custom nodes for OmniGraph in Extension.
## Files per node
The data for OmniGraph nodes uses files with the extension ogn.
If there is an ogn file called "GetDateTime.ogn", the node name is "GetDateTime".
One node consists of three files.
```
[nodes]
GetDateTime.ogn
GetDateTime.py
[ogn]
GetDateTimeDatabase.py
```
|File|Description|
|---|---|
|GetDateTime.ogn|Node configuration in json format|
|GetDateTime.py|Describes the implementation part of the node|
|GetDateTimeDatabase.py|Describe the implementation as a custom node.<br>It is almost always a canned statement.|
”GetDateTimeDatabase.py" specifies "[node name]Database.py".
## extension.toml
```
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["*Database.py","*/ogn*"]
# We only depend on testing framework currently:
[dependencies]
"omni.graph" = {}
"omni.graph.nodes" = {}
"omni.graph.tools" = {}
```
In [fswatcher.patterns], add the information to be used by OmniGraph nodes.
I think there is no problem copying and pasting the above as is.
Specify other Extensions to be used with OmniGraph in [dependencies].
This will be enabled if disabled before this Extension is called.
## Icons used in graph
In "data/icons", icons used in nodes are stored as SVG files.
```
[data]
[icons]
ft_lab.OmniGraph.GetDateTime.icon.svg
ft_lab.OmniGraph.GetDateTime.outputToLCD.svg
ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg
```
Icon names have been standardized with the following designations.
```
[Project name].[Icon name].svg
```
These icons are used in the node graph in Omniverse Create at the following locations.

I created the svg file in Affinity Designer( https://affinity.serif.com/ ).
## Nodes
The following three nodes exist.
Please also see "[Description of OmniGraph nodes](../OmniGraphNodes.md)" for node descriptions.
|Node name|Description|
|---|---|
|[GetDateTime](./node_GetDateTime.md)|Get the current local date and time.|
|[RotationByTime](./node_RotationByTime.md)|Given an hour, minute, and second, returns the XYZ of each rotation(degree).|
|[OutputToLCD](./node_OutputToLCD.md)|This node controls a virtual 7-segment LED LCD screen.|
| 3,303 | Markdown | 25.861788 | 126 | 0.674236 |
ft-lab/Omniverse_extension_SetOrigin/update_log.md | # Update log
## Set Origin v.0.0.1 [08/11/2022]
* Adjustments for Extension Manager
## Set Origin v.0.0.1 [04/28/2022]
* First version.
| 141 | Markdown | 11.90909 | 35 | 0.659574 |
ft-lab/Omniverse_extension_SetOrigin/readme.md | # Omniverse Extension : "Set Origin"
[Japanese readme](./readme_jp.md)
Changes the center position of the rotation or scale for the selected Mesh or Xform.

## Operating Environment
* Windows 10/Ubuntu 20.04
* Omniverse Create 2022.1.1 (Omniverse Kit 103)
* Omniverse Code 2022.1.0
## Usage
1. Copy "ft_lab.Tools.SetOrigin" to the exts folder in Omniverse.
(ov/pkg/create-2022.1.1/exts , etc.)
2. Run Omniverse Create.
3. Activate "ft_lab.Tools.SetOrigin" in the Extension window.

4. Select Mesh or Xform.
5. Select "Tools"-"Set Origin"-"Center of Geometry" from the menu to move the center of the manipulator to the center of the geometry.
6. Select "Tools"-"Set Origin"-"Lower center of Geometry" from the menu to move the center of the manipulator to the lower center of the geometry.

## Additional command in Python
This Set Origin function adjusts the Translate and Pivot of the Prim.
Add "ToolSetOrigin" to omni.kit.commands.
The argument "prim" specifies Usd.Prim.
The argument "center_position" specifies the center position in world coordinates.
```python
import omni.kit.commands
from pxr import Usd, Gf
stage = omni.usd.get_context().get_stage()
omni.kit.commands.execute('ToolSetOrigin',
prim=stage.GetPrimAtPath("/World/xxx"),
center_position=Gf.Vec3f(50.0, -50.0, 0.0))
```
## Script reference in Omniverse Extension
[https://github.com/ft-lab/omniverse_sample_scripts](https://github.com/ft-lab/omniverse_sample_scripts)
## Update log
[Update log](./update_log.md)
| 1,731 | Markdown | 29.385964 | 146 | 0.720971 |
ft-lab/Omniverse_extension_SetOrigin/readme_jp.md | # Omniverse Extension : "Set Origin"
[English readme](./readme.md)
選択されたMeshまたはXformの回転またはスケールの中心位置を変更します。

## 動作確認環境
* Windows 10/Ubuntu 20.04
* Omniverse Create 2022.1.1 (Omniverse Kit 103)
* Omniverse Code 2022.1.0
## 使い方
1. "ft_lab.Tools.SetOrigin"を Omniverseのextフォルダにコピーします。
(ov/pkg/create-2022.1.1/exts など)
2. Omniverse Createを起動します。
3. Extensionウィンドウで"ft_lab.Tools.SetOrigin"をアクティブにします。

4. MeshまたはXformを選択します。
5. "Tools"-"Set Origin"-"Center of Geometry"をメニューから選択すると、マニピュレータの中心がジオメトリの中心位置になります。
6. "Tools"-"Set Origin"-"Lower center of Geometry"をメニューから選択すると、マニピュレータの中心が ジオメトリの中央下の位置になります。

## Pythonでの追加コマンド
Set Origin機能は、PrimのTranslateとPivotを調整する機能を提供します。
omni.kit.commandsに"ToolSetOrigin"を追加しています。
引数"prim"はUsd.Primを指定します。
引数"center_position"はワールド座標での中心にする位置を指定します。
```python
import omni.kit.commands
from pxr import Usd, Gf
stage = omni.usd.get_context().get_stage()
omni.kit.commands.execute('ToolSetOrigin',
prim=stage.GetPrimAtPath("/World/xxx"),
center_position=Gf.Vec3f(50.0, -50.0, 0.0))
```
## Omniverse Extensionでのスクリプトの参考
[https://github.com/ft-lab/omniverse_sample_scripts](https://github.com/ft-lab/omniverse_sample_scripts)
## 更新履歴
[Update log](./update_log.md)
| 1,440 | Markdown | 25.685185 | 108 | 0.728472 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/extension.py | from pxr import Usd, UsdGeom, UsdSkel, UsdPhysics, UsdShade, UsdSkel, Sdf, Gf, Tf
import omni.ext
import omni.usd
import omni.kit.menu.utils
import omni.kit.undo
import omni.kit.commands
from omni.kit.menu.utils import MenuItemDescription
import asyncio
from .scripts.SetOrigin import SetOrigin
# ----------------------------------------------------.
class SetOriginExtension (omni.ext.IExt):
# Menu list.
_menu_list = None
_sub_menu_list = None
# Menu name.
_menu_name = "Tools"
# ------------------------------------------.
# Initialize menu.
# ------------------------------------------.
def init_menu (self):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
def menu_select (mode):
if mode == 0:
setOrigin = SetOrigin()
setOrigin.doCenterOfGeometry()
if mode == 1:
setOrigin = SetOrigin()
setOrigin.doLowerCenterOfGeometry()
self._sub_menu_list = [
MenuItemDescription(name="Center of Geometry", onclick_fn=lambda: menu_select(0)),
MenuItemDescription(name="Lower center of Geometry", onclick_fn=lambda: menu_select(1)),
]
self._menu_list = [
MenuItemDescription(name="Set Origin", sub_menu=self._sub_menu_list),
]
# Rebuild with additional menu items.
omni.kit.menu.utils.add_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
# ------------------------------------------.
# Term menu.
# It seems that the additional items in the top menu will not be removed.
# ------------------------------------------.
def term_menu (self):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
# Remove and rebuild the added menu items.
omni.kit.menu.utils.remove_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
# ------------------------------------------.
# ------------------------------------------.
# Extension startup.
# ------------------------------------------.
def on_startup (self, ext_id):
# Initialize menu.
self.init_menu()
# ------------------------------------------.
# Extension shutdown.
# ------------------------------------------.
def on_shutdown(self):
# Term menu.
self.term_menu()
| 2,607 | Python | 31.6 | 100 | 0.498274 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/TransformUtil.py | from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf
import omni.kit.commands
# ---------------------------.
# Set Translate.
# ---------------------------.
def TUtil_SetTranslate (prim : Usd.Prim, tV : Gf.Vec3f):
trans = prim.GetAttribute("xformOp:translate").Get()
if trans != None:
# Specify a value for each type.
if type(trans) == Gf.Vec3f:
prim.GetAttribute("xformOp:translate").Set(Gf.Vec3f(tV))
elif type(trans) == Gf.Vec3d:
prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(tV))
else:
# xformOpOrder is also updated.
xformAPI = UsdGeom.XformCommonAPI(prim)
xformAPI.SetTranslate(Gf.Vec3d(tV))
# ---------------------------.
# Set Scale.
# ---------------------------.
def TUtil_SetScale (prim : Usd.Prim, sV : Gf.Vec3f):
scale = prim.GetAttribute("xformOp:scale").Get()
if scale != None:
# Specify a value for each type.
if type(scale) == Gf.Vec3f:
prim.GetAttribute("xformOp:scale").Set(Gf.Vec3f(sV))
elif type(scale) == Gf.Vec3d:
prim.GetAttribute("xformOp:scale").Set(Gf.Vec3d(sV))
else:
# xformOpOrder is also updated.
xformAPI = UsdGeom.XformCommonAPI(prim)
xformAPI.SetScale(Gf.Vec3f(sV))
# ---------------------------.
# Set Rotate.
# ---------------------------.
def TUtil_SetRotate (prim : Usd.Prim, rV : Gf.Vec3f):
# Get rotOrder.
# If rotation does not exist, rotOrder = UsdGeom.XformCommonAPI.RotationOrderXYZ.
xformAPI = UsdGeom.XformCommonAPI(prim)
time_code = Usd.TimeCode.Default()
translation, rotation, scale, pivot, rotOrder = xformAPI.GetXformVectors(time_code)
# Convert rotOrder to "xformOp:rotateXYZ" etc.
t = xformAPI.ConvertRotationOrderToOpType(rotOrder)
rotateAttrName = "xformOp:" + UsdGeom.XformOp.GetOpTypeToken(t)
# Set rotate.
rotate = prim.GetAttribute(rotateAttrName).Get()
if rotate != None:
# Specify a value for each type.
if type(rotate) == Gf.Vec3f:
prim.GetAttribute(rotateAttrName).Set(Gf.Vec3f(rV))
elif type(rotate) == Gf.Vec3d:
prim.GetAttribute(rotateAttrName).Set(Gf.Vec3d(rV))
else:
# xformOpOrder is also updated.
xformAPI.SetRotate(Gf.Vec3f(rV), rotOrder)
# ---------------------------.
# Set Pivot.
# ---------------------------.
def TUtil_SetPivot (prim : Usd.Prim, pV : Gf.Vec3f):
pivot = prim.GetAttribute("xformOp:translate:pivot").Get()
if pivot != None:
# Specify a value for each type.
if type(pivot) == Gf.Vec3f:
prim.GetAttribute("xformOp:translate:pivot").Set(Gf.Vec3f(pV))
elif type(pivot) == Gf.Vec3d:
prim.GetAttribute("xformOp:translate:pivot").Set(Gf.Vec3d(pV))
else:
# xformOpOrder is also updated.
# ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"]
# The following do not work correctly?
#xformAPI = UsdGeom.XformCommonAPI(prim)
#xformAPI.SetPivot(Gf.Vec3f(pV))
prim.CreateAttribute("xformOp:translate:pivot", Sdf.ValueTypeNames.Float3, False).Set(Gf.Vec3f(pV))
# ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"]
transformOrder = prim.GetAttribute("xformOpOrder").Get()
orderList = []
for sV in transformOrder:
orderList.append(sV)
orderList.append("xformOp:translate:pivot")
orderList.append("!invert!xformOp:translate:pivot")
prim.GetAttribute("xformOpOrder").Set(orderList)
# -------------------------------------------.
# Check the order of Pivot in OpOrder
# @return -1 ... unknown
# 0 ... No pivot.
# 1 ... ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"]
# 2 ... ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"]
# -------------------------------------------.
def TUtil_ChkOrderOfPivot (prim : Usd.Prim):
if prim == None:
return
transformOrder = prim.GetAttribute("xformOpOrder").Get()
orderList = []
for sV in transformOrder:
orderList.append(sV)
orderLen = len(orderList)
pos1 = -1
pos2 = -1
for i in range(orderLen):
if orderList[i] == "xformOp:translate:pivot":
pos1 = i
elif orderList[i] == "!invert!xformOp:translate:pivot":
pos2 = i
if pos1 < 0 or pos2 < 0:
return 0
# ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"]
if pos1 == 1 and pos2 == orderLen - 1:
return 1
# ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"]
if pos1 == orderLen - 2 and pos2 == orderLen - 1:
return 2
return -1
# -------------------------------------------.
# Delete Pivot.
# -------------------------------------------.
def TUtil_DeletePivot (prim : Usd.Prim):
if prim == None:
return
path = prim.GetPath().pathString + ".xformOp:translate:pivot"
omni.kit.commands.execute('RemoveProperty', prop_path=path)
transformOrder = prim.GetAttribute("xformOpOrder").Get()
if transformOrder != None:
orderList = []
for sV in transformOrder:
if sV == "xformOp:translate:pivot" or sV == "!invert!xformOp:translate:pivot":
continue
orderList.append(sV)
prim.GetAttribute("xformOpOrder").Set(orderList)
| 5,746 | Python | 36.318182 | 138 | 0.59102 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/MathUtil.py | # -----------------------------------------------------.
# Math functions.
# -----------------------------------------------------.
from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf
# Get local matrix.
def GetLocalMatrix (prim : Usd.Prim):
xformCache = UsdGeom.XformCache()
curM = xformCache.GetLocalToWorldTransform(prim)
parentPrim = prim.GetParent()
matrix = curM * xformCache.GetLocalToWorldTransform(parentPrim).GetInverse()
return matrix
# Get world matrix.
def GetWorldMatrix (prim : Usd.Prim):
xformCache = UsdGeom.XformCache()
return xformCache.GetLocalToWorldTransform(prim)
| 617 | Python | 33.333331 | 80 | 0.606159 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/CalcWorldBoundingBox.py | # -----------------------------------------------------.
# # Calculate bounding box in world coordinates.
# -----------------------------------------------------.
from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf
def CalcWorldBoundingBox (prim : Usd.Prim):
# Calc world boundingBox.
bboxCache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), ["default"])
bboxD = bboxCache.ComputeWorldBound(prim).ComputeAlignedRange()
bb_min = Gf.Vec3f(bboxD.GetMin())
bb_max = Gf.Vec3f(bboxD.GetMax())
return bb_min, bb_max
| 537 | Python | 34.866664 | 70 | 0.55121 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/SetOrigin.py | # -----------------------------------------------------.
# Change the center.
# -----------------------------------------------------.
from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf
import omni.usd
import omni.kit.commands
import omni.kit.undo
from .CalcWorldBoundingBox import *
from .MathUtil import *
from .TransformUtil import *
# Check if Prim can handle.
def _checkPrim (prim : Usd.Prim):
if prim == None:
return False
if prim.IsA(UsdGeom.Mesh) == False and prim.IsA(UsdGeom.Xform) == False:
return False
# Skip for reference.
#if prim.HasAuthoredReferences():
# return False
return True
# ------------------------------------------------------------------------.
# Change Mesh Center
# ------------------------------------------------------------------------.
class ToolSetOrigin (omni.kit.commands.Command):
_prim = None
_centerWPos = None
_targetCenterWPos = None
_prevTranslate = None
_prevPivot = None
# prim : Target prim.
# center_position : Position of the center in world coordinates.
def __init__ (self, prim : Usd.Prim, center_position : Gf.Vec3f):
self._prim = prim
self._targetCenterWPos = center_position
# Calculate world center from bounding box.
bbMin, bbMax = CalcWorldBoundingBox(prim)
self._centerWPos = (bbMin + bbMax) * 0.5
# Execute process.
def do (self):
if _checkPrim(self._prim) == False:
return
self._prevTranslate = self._prim.GetAttribute("xformOp:translate").Get()
if self._prevTranslate == None:
self._prevTranslate = Gf.Vec3f(0, 0, 0)
self._prevPivot = self._prim.GetAttribute("xformOp:translate:pivot").Get()
localM = GetWorldMatrix(self._prim).GetInverse()
centerPosL = localM.Transform(self._targetCenterWPos)
TUtil_SetPivot(self._prim, Gf.Vec3f(centerPosL))
# Calculate world center from bounding box.
bbMin, bbMax = CalcWorldBoundingBox(self._prim)
bbCenter = (bbMin + bbMax) * 0.5
# Recalculate the center position in world coordinates and correct for any misalignment.
ddV = Gf.Vec3f(bbCenter - self._centerWPos)
fMin = 1e-6
if abs(ddV[0]) > fMin or abs(ddV[1]) > fMin or abs(ddV[2]) > fMin:
parentLocalM = GetWorldMatrix(self._prim.GetParent()).GetInverse()
p1 = parentLocalM.Transform(self._centerWPos)
p2 = parentLocalM.Transform(bbCenter)
transV = self._prim.GetAttribute("xformOp:translate").Get()
if transV == None:
transV = Gf.Vec3f(0, 0, 0)
transV = Gf.Vec3f(transV) + (p1 - p2)
TUtil_SetTranslate(self._prim, Gf.Vec3f(transV))
# Undo process.
def undo (self):
if _checkPrim(self._prim) == False:
return
TUtil_SetTranslate(self._prim, Gf.Vec3f(self._prevTranslate))
if self._prevPivot != None:
TUtil_SetPivot(self._prim, Gf.Vec3f(self._prevPivot))
else:
TUtil_DeletePivot(self._prim)
# ------------------------------------------------------------------------.
class SetOrigin:
def __init__(self):
pass
# Get selected Prim.
def _getSelectedPrim (self):
# Get stage.
stage = omni.usd.get_context().get_stage()
# Get selection.
selection = omni.usd.get_context().get_selection()
paths = selection.get_selected_prim_paths()
prim = None
for path in paths:
prim = stage.GetPrimAtPath(path)
break
return prim
def doCenterOfGeometry (self):
prim = self._getSelectedPrim()
if _checkPrim(prim) == False:
return
# Calculate world center from bounding box.
bbMin, bbMax = CalcWorldBoundingBox(prim)
bbCenter = (bbMin + bbMax) * 0.5
# Register a Class and run it.
omni.kit.commands.register(ToolSetOrigin)
omni.kit.commands.execute("ToolSetOrigin", prim=prim, center_position=bbCenter)
def doLowerCenterOfGeometry (self):
prim = self._getSelectedPrim()
if _checkPrim(prim) == False:
return
# Calculate world lower center from bounding box.
bbMin, bbMax = CalcWorldBoundingBox(prim)
bbCenter = Gf.Vec3f((bbMin[0] + bbMax[0]) * 0.5, bbMin[1], (bbMin[2] + bbMax[2]) * 0.5)
# Register a Class and run it.
omni.kit.commands.register(ToolSetOrigin)
omni.kit.commands.execute("ToolSetOrigin", prim=prim, center_position=bbCenter)
| 4,640 | Python | 32.388489 | 96 | 0.567026 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/docs/CHANGELOG.md | # CHANGELOG
## Ver.0.0.1 (08/11/2022)
* Adjustments for Extension Manager
## Ver.0.0.1 (04/28/2022)
* First Version
| 123 | Markdown | 8.538461 | 35 | 0.642276 |
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/docs/README.md | # Set Origin [ft_lab.Tools.SetOrigin]
Changes the center position of the rotation or scale for the selected Mesh or Xform.
https://github.com/ft-lab/Omniverse_extension_SetOrigin
## Usage
1. Activate "ft_lab.Tools.SetOrigin" in the Extension window.
2. Select Mesh or Xform.
3. Select "Tools"-"Set Origin"-"Center of Geometry" from the menu to move the center of the manipulator to the center of the geometry.
4. Select "Tools"-"Set Origin"-"Lower center of Geometry" from the menu to move the center of the manipulator to the lower center of the geometry.
## Operation Description
This Set Origin function adjusts the Translate and Pivot of the Prim.
Add "ToolSetOrigin" to omni.kit.commands.
The argument "prim" specifies Usd.Prim.
The argument "center_position" specifies the center position in world coordinates.
| 850 | Markdown | 37.681816 | 146 | 0.752941 |
omnioverflow/kit-extension-path-tracking/README.md | # Vehicle Path Tracking Extension
## 1. About
Omniverse Vehicle Path tracking extension allows a physics-enabled vehicle created
with a PhysX Vehicle extension (omni.physx.vehicle) to move and automatically track a user-defined path.
User-defined path is represented by an instance of USD BasisCurves, and a path tracking algorithm
is inspired by a classic Pure Pursuit algorithm [3].

Figure 1. Preview of Vehicle Path Tracking Extension
### System Requirements:
- `Code 2022.1.3+` or `Create 2022.1.5+` (support for Create 2022.3.0 is in progress)
- `Pyhton 3.7+`, `numpy` (this requirement should be satisfied when using Omniverse Kit's embedded `CPython 3.7`)
### Limitations
For the moment, the extension is simple and a number of
shortcuts have been taken and a few simplifications applied, including the following:
* Pure Pursuit Tracking algorithm is kinematics-based and therefore several physics vehicle dynamics
properties are not considered while computing wheel steering angle, such as tire slipping etc.
* A vehicle might go off the track if proposed an input path of a physically "impossible" trajectory, or at high-speed turn.
* Limited unit test coverage; occasional bugs might exist.
### Future Work
* Implement automatic computation of vehicle path which satisfies certain constraints (waypoints, collision free path etc.).
* Add support for different vehicle controller algorithms, including more sophisticated ones (e.g., PID controller).
* Getting rid of limitations, bugfix.
## 2. Installing Extension
### Add a path to a local clone to Omniverse extension search path
1. `git clone -b main $PATH_TO_DIR`
2. `Window` -> `Extension Manager` -> ⚙️ `Gear Icon` -> `Extension Search Path`
3. Add a path to just cloned extension as an extension search path: `$PATH_TO_DIR/exts`
### Omniverse Community Tab
Extension is also available in the community tab in the Extension Manager: just search for path.tracking in the search field.
### Activate extension
When extension search path configuration is done, start the extension:
1. `Window` -> `Extension Manager`
2. Find Vehicle path tracking extension in the list and enable it (Figure 2)
<img src="exts/ext.path.tracking/data/img/figures/figure_01.png" alt="activating extension" style="height:400px;"/></br>
Figure 2. Activating path tracking extension in extension manager.</br>
---
## 3. Getting Started
### 3.1. Evaluate vehicle path tracking on a preset configuration
The fastest way to evaluate how vehicle path tracking extension works is to use a preset vehicle and curve (could be considered as `HelloWorld` before importing your own physx-vehicle and custom paths).
To get started with the preset configuration please proceed as follows (Figure 3):
1. Click `Load a preset scene` button
2. Click `Start scenario` button
<img src="exts/ext.path.tracking/data/img/figures/figure_02.png" style="width:600px" alt="extension preview"><br/>
Figure 3. Getting started with a preset scene.
The extension also allows a quick way to load a ground plane, a sample physics vehicle, and a sample basis curve. See Figure 4.
<img src="exts/ext.path.tracking/data/img/figures/figure_03.png" style="width:600px" alt="extension controls"/><br/>
Figure 4. Other extension controls.
---
### 3.2. Create your custom vehicle-to-curve attachment setup
Extension supports path tracking for any Omniverse PhysX Vehicle.
One could load a template vehicle using the extension ui or using a conventional method via `Create`->`Physics`->`Vehicle`.
It is also straightforward to add a custom mesh and materials to a physics vehicle [2].
You can create a curve for vehicle path tracking using either of the following methods (Figure 5):
- `Create`->`BasisCurves`->`From Bezier`
- `Create`->`BasisCurves`->`From Pencil`
<img src="exts/ext.path.tracking/data/img/figures/figure_04.png" style="height:500px"/> | <img src="exts/ext.path.tracking/data/img/figures/figure_05.png" style="height:500px"/><br/>
Figure 5. Create a custom path to track via USD BasisCurves.
---
Once a physics vehicle and a path to be tracked defined by USD BasisCurves is created, select the WizardVehicle and the BasisCruves prims in the stage (via Ctrl-click)
and click `Attach Selected` button. Note that is very important to select specifically `WizardVehicle` prim in the scene,
not `WizardVehicle/Vehicle` for instance.
Please see Figure 6 for the illustration.
<img src="exts/ext.path.tracking/data/img/figures/figure_06.png" style="width:1100px"/><br/>
Figure 6. Attachment of a path (USD BasisCurves) to a physics-enabled vehicle.
In case if vehicle-to-curve attachment was successful it should be reflected on the
extension UI (Figure 7).
<img src="exts/ext.path.tracking/data/img/figures/figure_07.png" style="width:600px"/><br/>
Figure 7. Successful vehicle-to-curve attachment is shown on the right side.
When vehicle-to-curve attachment(s) is created, proceed by clicking Start Scenario button.
If you want to get rid of all already existing vehicle-to-curve attachments, please click `Clear All Attachments` (Figure 8).
It is very important to clear vehicle-to-curve attachments, when changing vehicles and corresponding tracked paths.
<img src="exts/ext.path.tracking/data/img/figures/figure_08.png" style="width:600px"/><br/>
Figure 8. Removing existing vehicle-to-curve attachments.
### 3.3. Multiple Vehicles
The extension supports multiple vehicle-to-curve attachments.
Note, that for attachment to work, a pair of `WizardVehicle` and
`BasisCurve` objects should be selected and attached consequently.
Results of path tracking with multiple vehicles is shown in Figure 9.
<img src="exts/ext.path.tracking/data/img/figures/figure_09_01.png" style="height:300px"/> <img src="exts/ext.path.tracking/data/img/figures/figure_09_02.png" style="height:300px"/> <img src="exts/ext.path.tracking/data/img/figures/figure_09_03.png" style="height:300px"/><br/>
Figure 9. Support of multiple vehicle-to-curve attachments.
### Troubleshooting
Note that extension is in Beta. The following items might be of help if any issues:
- It always takes a few seconds between clicking 'Start scenario' button and actual start of the simulation, so please be patient.
- On a fresh install some physx warnings/errors might be occasionally reported to the console log, they should not prevent the extension from producing expected results though.
- If path tracking is not working on a custom vehicle and path, please verify that exactly `WizardVehicle1` from omni.physx.vehicle is selected (not a child prim 'WizardVehicle1/Vehicle' or some parent prim) along with a prim of type `BasisCurves` (which is to be tracked) before clicking 'Attach Selected’.
- Use 'Clear All Attachments` if there are some issues.
---
## 4. Results
1. [youtube video] [Vehicle Path Tracking Extension Overview](https://youtu.be/tv-_xrqjzm4)
2. [youtube video] [Vehicle Dynamics and Vehicle Path Tracking: Forklift Usecase](https://youtu.be/SRibExkL4aE)
2. [youtube video] [OmniPhysX & Vehicle Dynamics Showcase](https://youtu.be/C8tjZWtU6w8)
## 5. References
1. [Omniverse Developer Contest] https://www.nvidia.com/en-us/omniverse/apps/code/developer-contest/
2. [Omniverse Vehicle Dynamics] https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_vehicle-dynamics.html
3. [Coutler 1992] Coulter, R. Craig. Implementation of the pure pursuit path tracking algorithm. Carnegie-Mellon UNIV Pittsburgh PA Robotics INST, 1992. (https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf)
4. Credits for a forklift model model: https://sketchfab.com/3d-models/forklift-73d21c990e634589b0c130777751be28 (license: [Creative Commons Attribution](https://creativecommons.org/licenses/by/4.0/))
5. Credits for a Dodge Challenger car model: https://sketchfab.com/3d-models/dodge-challenger-ef40662c84eb4beb85acdfce5ac4f40e (license: [Creative Commons Attribution NonCommercial](https://creativecommons.org/licenses/by-nc/4.0/))
6. Credits for a monster truck (used in the result video): https://sketchfab.com/3d-models/hcr2-monster-truck-811bd567566b497a8cbbb06fd5a267b6 (license: [Creative Commons Attribution](https://creativecommons.org/licenses/by/4.0/))
7. Credits for a race track model (used in the result video): https://sketchfab.com/3d-models/track-5f5e9454fd59436e8d0dd38df9ec83c4 (license: [Creative Commons Attribution NonCommercial](https://creativecommons.org/licenses/by-nc/4.0/))
| 8,547 | Markdown | 55.609271 | 307 | 0.776881 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/__init__.py | from .scripts.debug_draw import *
from .scripts.extension import *
from .scripts.model import *
from .scripts.path_tracker import *
from .scripts.path_tracker import *
from .scripts.ui import *
from .scripts.utils import *
from .scripts.vehicle import * | 253 | Python | 30.749996 | 35 | 0.770751 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/vehicle.py | import omni.usd
from enum import IntEnum
from pxr import Gf, Usd, UsdGeom, PhysxSchema
import numpy as np
# ======================================================================================================================
# Vehicle
# ======================================================================================================================
class Axle(IntEnum):
FRONT = 0,
REAR = 1
class Wheel(IntEnum):
FRONT_LEFT = 0,
FRONT_RIGHT = 1,
REAR_LEFT = 2,
REAR_RIGHT = 3
# ======================================================================================================================
class Vehicle():
"""
A wrapper created to help manipulating state of a vehicle prim and its
dynamic properties, such as acceleration, desceleration, steering etc.
"""
def __init__(self, vehicle_prim, max_steer_angle_radians, rear_steering=True):
self._prim = vehicle_prim
self._path = self._prim.GetPath()
self._steer_delta = 0.01
self._stage = omni.usd.get_context().get_stage()
self._rear_stearing = rear_steering
self._wheel_prims = {
Wheel.FRONT_LEFT:
self._stage.GetPrimAtPath(f"{self._path}/LeftWheel1References"),
Wheel.FRONT_RIGHT:
self._stage.GetPrimAtPath(f"{self._path}/RightWheel1References"),
Wheel.REAR_LEFT:
self._stage.GetPrimAtPath(f"{self._path}/LeftWheel2References"),
Wheel.REAR_RIGHT:
self._stage.GetPrimAtPath(f"{self._path}/RightWheel2References")
}
steering_wheels = [Wheel.FRONT_LEFT, Wheel.FRONT_RIGHT]
non_steering_wheels = [Wheel.REAR_LEFT, Wheel.REAR_RIGHT]
if self._rear_stearing:
steering_wheels, non_steering_wheels = non_steering_wheels, steering_wheels
for wheel_prim_key in steering_wheels:
self._set_max_steer_angle(self._wheel_prims[wheel_prim_key], max_steer_angle_radians)
for wheel_prim_key in non_steering_wheels:
self._set_max_steer_angle(self._wheel_prims[wheel_prim_key], 0.0)
p = self._prim.GetAttribute("xformOp:translate").Get()
self._p = Gf.Vec4f(p[0], p[1], p[2], 1.0)
def _set_max_steer_angle(self, wheel_prim, max_steer_angle_radians):
physx_wheel = PhysxSchema.PhysxVehicleWheelAPI(wheel_prim)
physx_wheel.GetMaxSteerAngleAttr().Set(max_steer_angle_radians)
def get_bbox_size(self):
"""Computes size of vehicle's oriented bounding box."""
purposes = [UsdGeom.Tokens.default_]
bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
return bbox_cache.ComputeWorldBound(self._prim).ComputeAlignedRange().GetSize()
def steer_left(self, value):
if self._rear_stearing:
self._steer_right_priv(value)
else:
self._steer_left_priv(value)
def steer_right(self, value):
if self._rear_stearing:
self._steer_left_priv(value)
else:
self._steer_right_priv(value)
def _steer_left_priv(self, value):
self._prim.GetAttribute("physxVehicleController:steerLeft").Set(value)
self._prim.GetAttribute("physxVehicleController:steerRight").Set(0.0)
def _steer_right_priv(self, value):
self._prim.GetAttribute("physxVehicleController:steerLeft").Set(0.0)
self._prim.GetAttribute("physxVehicleController:steerRight").Set(value)
def accelerate(self, value):
self._vehicle().GetAttribute("physxVehicleController:accelerator").Set(value)
def brake(self, value):
self._prim.GetAttribute("physxVehicleController:brake").Set(value)
def get_velocity(self):
return self._prim.GetAttribute("physics:velocity").Get()
def get_speed(self):
return np.linalg.norm(self.get_velocity())
def curr_position(self):
prim = self._vehicle()
cache = UsdGeom.XformCache()
T = cache.GetLocalToWorldTransform(prim)
p = self._p * T
return Gf.Vec3f(p[0], p[1], p[2])
def axle_front(self):
return self.axle_position(Axle.FRONT)
def axle_rear(self):
return self.axle_position(Axle.REAR)
def axle_position(self, type):
cache = UsdGeom.XformCache()
T = cache.GetLocalToWorldTransform(self._vehicle())
if type == Axle.FRONT:
wheel_fl = self._wheel_prims[Wheel.FRONT_LEFT].GetAttribute("xformOp:translate").Get()
wheel_fr = self._wheel_prims[Wheel.FRONT_RIGHT].GetAttribute("xformOp:translate").Get()
wheel_fl[1] = 0.0
wheel_fr[1] = 0.0
wheel_fl = Gf.Vec4f(wheel_fl[0], wheel_fl[1], wheel_fl[2], 1.0) * T
wheel_fr = Gf.Vec4f(wheel_fr[0], wheel_fr[1], wheel_fr[2], 1.0) * T
wheel_fl = Gf.Vec3f(wheel_fl[0], wheel_fl[1], wheel_fl[2])
wheel_fr = Gf.Vec3f(wheel_fr[0], wheel_fr[1], wheel_fr[2])
return (wheel_fl + wheel_fr) / 2
elif type == Axle.REAR:
wheel_rl = self._wheel_prims[Wheel.REAR_LEFT].GetAttribute("xformOp:translate").Get()
wheel_rr = self._wheel_prims[Wheel.REAR_RIGHT].GetAttribute("xformOp:translate").Get()
wheel_rl[1] = 0.0
wheel_rr[1] = 0.0
wheel_rl = Gf.Vec4f(wheel_rl[0], wheel_rl[1], wheel_rl[2], 1.0) * T
wheel_rr = Gf.Vec4f(wheel_rr[0], wheel_rr[1], wheel_rr[2], 1.0) * T
wheel_rl = Gf.Vec3f(wheel_rl[0], wheel_rl[1], wheel_rl[2])
wheel_rr = Gf.Vec3f(wheel_rr[0], wheel_rr[1], wheel_rr[2])
return (wheel_rl + wheel_rr) / 2
else:
return None
def _wheel_pos(self, type):
R = self.rotation_matrix()
wheel_pos = self._wheel_prims[type].GetAttribute("xformOp:translate").Get()
wheel_pos = Gf.Vec4f(wheel_pos[0], wheel_pos[1], wheel_pos[2], 1.0) * R
return Gf.Vec3f(wheel_pos[0], wheel_pos[1], wheel_pos[2]) + self.curr_position()
def wheel_pos_front_left(self):
return self._wheel_pos(Wheel.FRONT_LEFT)
def wheel_pos_front_right(self):
return self._wheel_pos(Wheel.FRONT_RIGHT)
def wheel_pos_rear_left(self):
return self._wheel_pos(Wheel.REAR_LEFT)
def wheel_pos_rear_right(self):
return self._wheel_pos(Wheel.REAR_RIGHT)
def rotation_matrix(self):
"""
Produces vehicle's local-to-world rotation transform.
"""
cache = UsdGeom.XformCache()
T = cache.GetLocalToWorldTransform(self._vehicle())
return Gf.Matrix4d(T.ExtractRotationMatrix(), Gf.Vec3d())
def forward(self):
R = self.rotation_matrix()
f = self._forward_local()
return Gf.Vec4f(f[0], f[1], f[2], 1.0) * R
def up(self):
R = self.rotation_matrix()
u = self._up_local()
return Gf.Vec4f(u[0], u[1], u[2], 1.0) * R
def _forward_local(self):
return Gf.Vec3f(0.0, 0.0, 1.0)
def _up_local(self):
return Gf.Vec3f(0.0, 1.0, 0.0)
def _vehicle(self):
return self._stage.GetPrimAtPath(self._path)
def is_close_to(self, point, lookahead_distance):
if not point:
raise Exception("[Vehicle] Point is None")
curr_vehicle_pos = self.curr_position()
if not curr_vehicle_pos:
raise Exception("[Vechicle] Current position is None")
distance = np.linalg.norm(curr_vehicle_pos - point)
return tuple([distance, distance < lookahead_distance])
| 7,517 | Python | 36.402985 | 120 | 0.579753 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/stepper.py | import omni.kit
import omni.physx
import omni.usd
import omni.timeline
from omni.physx.bindings._physx import SimulationEvent
import math
import threading
"""
Based on Nvidia's sample from omni.physx.vehicle Physics extension.
"""
# ======================================================================================================================
#
# Scenario
#
# ======================================================================================================================
class Scenario:
def __init__(self, secondsToRun, timeStep=1.0 / 60.0):
self._targetIterationCount = math.ceil(secondsToRun / timeStep)
def get_iteration_count(self):
return self._targetIterationCount
# override in subclass as needed
def on_start(self):
pass
def on_end(self):
pass
def on_step(self, deltaTime, totalTime):
pass
# ======================================================================================================================
#
# SimStepTracker
#
# ======================================================================================================================
class SimStepTracker:
def __init__(self, scenario, scenarioDoneSignal):
self._scenario = scenario
self._targetIterationCount = scenario.get_iteration_count()
self._scenarioDoneSignal = scenarioDoneSignal
self._physx = omni.physx.get_physx_interface()
self._physxSimEventSubscription = self._physx.get_simulation_event_stream_v2().create_subscription_to_pop(
self._on_simulation_event
)
self._hasStarted = False
self._resetOnNextResume = False
def abort(self):
if self._hasStarted:
self._on_stop()
self._physxSimEventSubscription = None
self._physx = (
None
) # should release automatically (note: explicit release call results in double release being reported)
self._scenarioDoneSignal.set()
def stop(self):
self._scenario.on_end()
self._scenarioDoneSignal.set()
def reset_on_next_resume(self):
self._resetOnNextResume = True
def _on_stop(self):
self._hasStarted = False
self._physxStepEventSubscription = None # should unsubscribe automatically
self._scenario.on_end()
def _on_simulation_event(self, event):
if event.type == int(SimulationEvent.RESUMED):
if not self._hasStarted:
self._scenario.on_start()
self._iterationCount = 0
self._totalTime = 0
self._physxStepEventSubscription = self._physx.subscribe_physics_step_events(self._on_physics_step)
self._hasStarted = True
elif self._resetOnNextResume:
self._resetOnNextResume = False
# the simulation step callback is still registered and should remain so, thus no unsubscribe
self._hasStarted = False
self._scenario.on_end()
self._scenario.on_start()
self._iterationCount = 0
self._totalTime = 0
self._hasStarted = True
# elif event.type == int(SimulationEvent.PAUSED):
# self._on_pause()
elif event.type == int(SimulationEvent.STOPPED):
self._on_stop()
def _on_physics_step(self, dt):
if self._hasStarted:
pass
if self._iterationCount < self._targetIterationCount:
self._scenario.on_step(dt, self._totalTime)
self._iterationCount += 1
self._totalTime += dt
else:
self._scenarioDoneSignal.set()
# ======================================================================================================================
#
# StageEventListener
#
# ======================================================================================================================
class StageEventListener:
def __init__(self, simStepTracker):
self._simStepTracker = simStepTracker
self._stageEventSubscription = (
omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self._on_stage_event)
)
self._stageIsClosing = False
self.restart_after_stop = False
def cleanup(self):
self._stageEventSubscription = None
def is_stage_closing(self):
return self._stageIsClosing
def _on_stage_event(self, event):
# Check out omni.usd docs for more information regarding
# omni.usd.StageEventType in particular.
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.usd/docs/index.html
if event.type == int(omni.usd.StageEventType.CLOSING):
self._stop(stageIsClosing=True)
elif event.type == int(omni.usd.StageEventType.SIMULATION_STOP_PLAY):
if self.restart_after_stop:
omni.timeline.get_timeline_interface().play()
elif event.type == int(omni.usd.StageEventType.SIMULATION_START_PLAY):
self.restart_after_stop = False
elif event.type == int(omni.usd.StageEventType.ANIMATION_STOP_PLAY):
pass
def _stop(self, stageIsClosing=False):
self._stageIsClosing = stageIsClosing
self._simStepTracker.stop()
# ======================================================================================================================
#
# ScenarioManager
#
# ======================================================================================================================
class ScenarioManager:
def __init__(self, scenario):
self._scenario = scenario
self._setup(scenario)
def _setup(self, scenario):
self._init_done = False
scenarioDoneSignal = threading.Event()
self._simStepTracker = SimStepTracker(scenario, scenarioDoneSignal)
self._stageEventListener = StageEventListener(self._simStepTracker)
def stop_scenario(self):
self._stageEventListener._stop()
def cleanup(self):
self._stageEventListener.cleanup()
self._simStepTracker.abort()
@property
def scenario(self):
return self._scenario
@scenario.setter
def set_scenario(self, scenario):
self.stop_scenario()
self._setup(scenario)
| 6,373 | Python | 32.197916 | 120 | 0.530206 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/path_tracker.py | import omni.usd
from pxr import Gf, UsdGeom
import math
import numpy as np
from .debug_draw import DebugRenderer
from .stepper import Scenario
from .vehicle import Axle, Vehicle
# ======================================================================================================================
#
# PurePursuitScenario
#
# ======================================================================================================================
class PurePursuitScenario(Scenario):
def __init__(self, lookahead_distance, vehicle_path, trajectory_prim_path, meters_per_unit,
close_loop_flag, enable_rear_steering):
super().__init__(secondsToRun=10000.0, timeStep=1.0/25.0)
self._MAX_STEER_ANGLE_RADIANS = math.pi / 3
self._lookahead_distance = lookahead_distance
self._METERS_PER_UNIT = meters_per_unit
self._max_speed = 250.0
self._stage = omni.usd.get_context().get_stage()
self._vehicle = Vehicle(
self._stage.GetPrimAtPath(vehicle_path),
self._MAX_STEER_ANGLE_RADIANS,
enable_rear_steering
)
self._debug_render = DebugRenderer(self._vehicle.get_bbox_size())
self._path_tracker = PurePursuitPathTracker(math.pi/4)
self._dest = None
self._trajectory_prim_path = trajectory_prim_path
self._trajectory = Trajectory(trajectory_prim_path, close_loop=close_loop_flag)
self._stopped = False
self.draw_track = False
self._close_loop = close_loop_flag
def on_start(self):
self._vehicle.accelerate(1.0)
def on_end(self):
self._trajectory.reset()
def _process(self, forward, up, dest_position, distance=None, is_close_to_dest=False):
"""
Steering/accleleration vehicle control heuristic.
"""
if (distance is None):
distance, is_close_to_dest = self._vehicle.is_close_to(dest_position, self._lookahead_distance)
curr_vehicle_pos = self._vehicle.curr_position()
self._debug_render.update_vehicle(self._vehicle)
self._debug_render.update_path_to_dest(curr_vehicle_pos, dest_position)
# FIXME: - currently the extension expect Y-up axis which is not flexible.
# Project onto XZ plane
curr_vehicle_pos[1] = 0.0
forward[1] = 0.0
dest_position[1] = 0.0
speed = self._vehicle.get_speed() * self._METERS_PER_UNIT
axle_front = Gf.Vec3f(self._vehicle.axle_position(Axle.FRONT))
axle_rear = Gf.Vec3f(self._vehicle.axle_position(Axle.REAR))
axle_front[1] = 0.0
axle_rear[1] = 0.0
# self._debug_render.update_path_tracking(axle_front, axle_rear, forward, dest_position)
steer_angle = self._path_tracker.on_step(
axle_front,
axle_rear,
forward,
dest_position,
curr_vehicle_pos
)
if steer_angle < 0:
self._vehicle.steer_left(abs(steer_angle))
else:
self._vehicle.steer_right(steer_angle)
# Accelerate/break control heuristic
if abs(steer_angle) > 0.1 and speed > 5.0:
self._vehicle.brake(1.0)
self._vehicle.accelerate(0.0)
else:
if (speed >= self._max_speed):
self._vehicle.brake(0.8)
self._vehicle.accelerate(0.0)
else:
self._vehicle.brake(0.0)
self._vehicle.accelerate(0.7)
def _full_stop(self):
self._vehicle.accelerate(0.0)
self._vehicle.brake(1.0)
def set_meters_per_unit(self, value):
self._METERS_PER_UNIT = value
def teardown(self):
super().abort()
self._dest.teardown()
self._dest = None
self._stage = None
self._vehicle = None
self._debug_render = None
self._path_tracker = None
def enable_debug(self, flag):
self._debug_render.enable(flag)
def on_step(self, deltaTime, totalTime):
"""
Updates vehicle control on sim update callback in order to stay on tracked path.
"""
forward = self._vehicle.forward()
up = self._vehicle.up()
if self._trajectory and self.draw_track:
self._trajectory.draw()
dest_position = self._trajectory.point()
is_end_point = self._trajectory.is_at_end_point()
# Run vehicle control unless reached the destination
if dest_position:
distance, is_close_to_dest = self._vehicle.is_close_to(dest_position, self._lookahead_distance)
if (is_close_to_dest):
dest_position = self._trajectory.next_point()
else:
# Compute vehicle steering and acceleration
self._process(forward, up, dest_position, distance, is_close_to_dest)
else:
self._stopped = True
self._full_stop()
def recompute_trajectory(self):
self._trajectory = Trajectory(self._trajectory_prim_path, self._close_loop)
def set_lookahead_distance(self, distance):
self._lookahead_distance = distance
def set_close_trajectory_loop(self, flag):
self._close_loop = flag
self._trajectory.set_close_loop(flag)
# ======================================================================================================================
#
# PurePursuitPathTracker
#
# ======================================================================================================================
class PurePursuitPathTracker():
"""
Implements path tracking in spirit of Pure Pursuit algorithm.
References
* Implementation of the Pure Pursuit Path tracking Algorithm, RC Conlter:
https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf
* https://dingyan89.medium.com/three-methods-of-vehicle-lateral-control-pure-pursuit-stanley-and-mpc-db8cc1d32081
"""
def __init__(self, max_steer_angle_radians):
self._max_steer_angle_radians = max_steer_angle_radians
self._debug_enabled = False
def _steer_value_from_angle(self, angle):
"""
Computes vehicle's steering wheel angle in expected range [-1, 1].
"""
return np.clip(angle / self._max_steer_angle_radians, -1.0, 1.0)
def on_step(self, front_axle_pos, rear_axle_pos, forward, dest_pos, curr_pos):
"""
Recomputes vehicle's steering angle on a simulation step.
"""
front_axle_pos, rear_axle_pos = rear_axle_pos, front_axle_pos
# Lookahead points to the next destination point
lookahead = dest_pos - rear_axle_pos
# Forward vector corrsponds to an axis segment front-to-rear
forward = front_axle_pos - rear_axle_pos
lookahead_dist = np.linalg.norm(lookahead)
forward_dist = np.linalg.norm(forward)
if self._debug_enabled:
if lookahead_dist == 0.0 or forward_dist == 0.0:
raise Exception("Pure pursuit aglorithm: invalid state")
lookahead.Normalize()
forward.Normalize()
# Compute a signed angle alpha between lookahead and forward vectors,
# /!\ left-handed rotation assumed.
dot = lookahead[0] * forward[0] + lookahead[2] * forward[2]
cross = lookahead[0] * forward[2] - lookahead[2] * forward[0]
alpha = math.atan2(cross, dot)
theta = math.atan(2.0 * forward_dist * math.sin(alpha) / lookahead_dist)
steer_angle = self._steer_value_from_angle(theta)
return steer_angle
# ======================================================================================================================
#
# Trajectory
#
# ======================================================================================================================
class Trajectory():
"""
A helper class to access coordinates of points that form a BasisCurve prim.
"""
def __init__(self, prim_path, close_loop=True):
stage = omni.usd.get_context().get_stage()
basis_curves = UsdGeom.BasisCurves.Get(stage, prim_path)
if (basis_curves and basis_curves is not None):
curve_prim = stage.GetPrimAtPath(prim_path)
self._points = basis_curves.GetPointsAttr().Get()
self._num_points = len(self._points)
cache = UsdGeom.XformCache()
T = cache.GetLocalToWorldTransform(curve_prim)
for i in range(self._num_points):
p = Gf.Vec4d(self._points[i][0], self._points[i][1], self._points[i][2], 1.0)
p_ = p * T
self._points[i] = Gf.Vec3f(p_[0], p_[1], p_[2])
else:
self._points = None
self._num_points = 0
self._pointer = 0
self._close_loop = close_loop
def point(self):
"""
Returns current point.
"""
return self._points[self._pointer] if self._pointer < len(self._points) else None
def next_point(self):
"""
Next point on the curve.
"""
if (self._pointer < self._num_points):
self._pointer = self._pointer + 1
if self._pointer >= self._num_points and self._close_loop:
self._pointer = 0
return self.point()
return None
def is_at_end_point(self):
"""
Checks if the current point is the last one.
"""
return self._pointer == (self._num_points - 1)
def reset(self):
"""
Resets current point to the first one.
"""
self._pointer = 0
def set_close_loop(self, flag):
self._close_loop = flag
| 9,688 | Python | 34.752767 | 120 | 0.55192 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/extension.py | import omni.ext
import omni.kit
import omni.usd
import carb
import asyncio
from .model import ExtensionModel
from .ui import ExtensionUI
# ======================================================================================================================
#
# PathTrackingExtension
#
# ======================================================================================================================
class PathTrackingExtension(omni.ext.IExt):
def __init__(self):
self._DEFAULT_LOOKAHEAD = 550.0
# Any user-defined changes to the lookahead parameter will be clamped:
self._MIN_LOOKAHEAD = 400.0
self._MAX_LOOKAHEAD = 2000.0
def on_startup(self, ext_id):
if omni.usd.get_context().get_stage() is None:
# Workaround for running within test environment.
omni.usd.get_context().new_stage()
# Usd listener could be used in the future if we could be interested
# in recomputing changes in the vehicle planned trajectory "on the fly".
# self._usd_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_usd_change, None)
self._stage_event_sub = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="Stage Open/Closing Listening"
)
self._model = ExtensionModel(
ext_id,
default_lookahead_distance=self._DEFAULT_LOOKAHEAD,
max_lookahed_distance=self._MAX_LOOKAHEAD,
min_lookahed_distance=self._MIN_LOOKAHEAD
)
self._ui = ExtensionUI(self)
self._ui.build_ui(self._model.get_lookahead_distance(), attachments=[])
def on_shutdown(self):
timeline = omni.timeline.get_timeline_interface()
if timeline.is_playing():
timeline.stop()
self._clear_attachments()
self._usd_listener = None
self._stage_event_sub = None
self._ui.teardown()
self._ui = None
self._model.teardown()
self._model = None
def _update_ui(self):
self._ui.update_attachment_info(self._model._vehicle_to_curve_attachments.keys())
# ======================================================================================================================
# Callbacks
# ======================================================================================================================
def _on_click_start_scenario(self):
async def start_scenario(model):
timeline = omni.timeline.get_timeline_interface()
if timeline.is_playing():
timeline.stop()
await omni.kit.app.get_app().next_update_async()
lookahead_distance = self._ui.get_lookahead_distance()
model.load_simulation(lookahead_distance)
omni.timeline.get_timeline_interface().play()
run_loop = asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(start_scenario(self._model), loop=run_loop)
def _on_click_stop_scenario(self):
async def stop_scenario():
timeline = omni.timeline.get_timeline_interface()
if timeline.is_playing():
timeline.stop()
await omni.kit.app.get_app().next_update_async()
run_loop = asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(stop_scenario(), loop=run_loop)
def _on_click_load_sample_vehicle(self):
self._model.load_sample_vehicle()
def _on_click_load_ground_plane(self):
self._model.load_ground_plane()
def _on_click_load_basis_curve(self):
self._model.load_sample_track()
def _on_click_load_forklift(self):
self._model.load_forklift_rig()
def _on_click_attach_selected(self):
selected_prim_paths = omni.usd.get_context().get_selection().get_selected_prim_paths()
self._model.attach_selected_prims(selected_prim_paths)
self._update_ui()
def _clear_attachments(self):
async def stop_scenario():
timeline = omni.timeline.get_timeline_interface()
if timeline.is_playing():
timeline.stop()
await omni.kit.app.get_app().next_update_async()
run_loop = asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(stop_scenario(), loop=run_loop)
self._model.clear_attachments()
self._update_ui()
def _on_click_clear_attachments(self):
self._clear_attachments()
def _on_click_load_preset_scene(self):
self._model.load_preset_scene()
self._update_ui()
def _on_stage_event(self, event: carb.events.IEvent):
"""Called on USD Context event"""
if event.type == int(omni.usd.StageEventType.CLOSING):
self._model.clear_attachments()
self._update_ui()
def _on_usd_change(self, objects_changed, stage):
carb.log_info("_on_usd_change")
for resync_path in objects_changed.GetResyncedPaths():
carb.log_info(resync_path)
def _changed_enable_debug(self, model):
self._model.set_enable_debug(model.as_bool)
def _on_lookahead_distance_changed(self, distance):
# self._clear_attachments()
clamped_lookahead_distance = self._model.update_lookahead_distance(distance)
self._ui.set_lookahead_distance(clamped_lookahead_distance)
def _on_trajectory_loop_value_changed(self, widget_model):
self._model.set_close_trajectory_loop(widget_model.as_bool)
def _on_steering_changed(self, model):
# First we have to stop current simulation.
self._on_click_stop_scenario()
self._model.set_enable_rear_steering(model.as_bool)
| 5,705 | Python | 35.576923 | 120 | 0.57844 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/utils.py | import omni.usd
from pxr import UsdGeom, Sdf, Gf, UsdPhysics, PhysxSchema
class Utils:
@staticmethod
def create_mesh_square_axis(stage, path, axis, halfSize):
if axis == "X":
points = [
Gf.Vec3f(0.0, -halfSize, -halfSize),
Gf.Vec3f(0.0, halfSize, -halfSize),
Gf.Vec3f(0.0, halfSize, halfSize),
Gf.Vec3f(0.0, -halfSize, halfSize),
]
normals = [Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0)]
indices = [0, 1, 2, 3]
vertexCounts = [4]
# Create the mesh
return Utils.create_mesh(stage, path, points, normals, indices, vertexCounts)
elif axis == "Y":
points = [
Gf.Vec3f(-halfSize, 0.0, -halfSize),
Gf.Vec3f(halfSize, 0.0, -halfSize),
Gf.Vec3f(halfSize, 0.0, halfSize),
Gf.Vec3f(-halfSize, 0.0, halfSize),
]
normals = [Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0)]
indices = [0, 1, 2, 3]
vertexCounts = [4]
# Create the mesh
return Utils.create_mesh(stage, path, points, normals, indices, vertexCounts)
points = [
Gf.Vec3f(-halfSize, -halfSize, 0.0),
Gf.Vec3f(halfSize, -halfSize, 0.0),
Gf.Vec3f(halfSize, halfSize, 0.0),
Gf.Vec3f(-halfSize, halfSize, 0.0),
]
normals = [Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1)]
indices = [0, 1, 2, 3]
vertexCounts = [4]
# Create the mesh
mesh = Utils.create_mesh(stage, path, points, normals, indices, vertexCounts)
# text coord
texCoords = mesh.CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying)
texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)])
return mesh
@staticmethod
def create_mesh(stage, path, points, normals, indices, vertexCounts):
mesh = UsdGeom.Mesh.Define(stage, path)
# Fill in VtArrays
mesh.CreateFaceVertexCountsAttr().Set(vertexCounts)
mesh.CreateFaceVertexIndicesAttr().Set(indices)
mesh.CreatePointsAttr().Set(points)
mesh.CreateDoubleSidedAttr().Set(False)
mesh.CreateNormalsAttr().Set(normals)
return mesh
@staticmethod
def add_ground_plane(stage, planePath, axis,
size=3000.0, position=Gf.Vec3f(0.0), color=Gf.Vec3f(0.2, 0.25, 0.25)):
# plane xform, so that we dont nest geom prims
planePath = omni.usd.get_stage_next_free_path(stage, planePath, True)
planeXform = UsdGeom.Xform.Define(stage, planePath)
planeXform.AddTranslateOp().Set(position)
planeXform.AddOrientOp().Set(Gf.Quatf(1.0))
planeXform.AddScaleOp().Set(Gf.Vec3f(1.0))
# (Graphics) Plane mesh
geomPlanePath = planePath + "/CollisionMesh"
entityPlane = Utils.create_mesh_square_axis(stage, geomPlanePath, axis, size)
entityPlane.CreateDisplayColorAttr().Set([color])
# (Collision) Plane
colPlanePath = planePath + "/CollisionPlane"
planeGeom = PhysxSchema.Plane.Define(stage, colPlanePath)
planeGeom.CreatePurposeAttr().Set("guide")
planeGeom.CreateAxisAttr().Set(axis)
prim = stage.GetPrimAtPath(colPlanePath)
UsdPhysics.CollisionAPI.Apply(prim)
return planePath
| 3,519 | Python | 38.111111 | 104 | 0.577721 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/model.py | import omni
from pxr import UsdGeom
import omni.kit.commands
from omni.physxvehicle.scripts.wizards import physxVehicleWizard as VehicleWizard
from omni.physxvehicle.scripts.helpers.UnitScale import UnitScale
from omni.physxvehicle.scripts.commands import PhysXVehicleWizardCreateCommand
from .stepper import ScenarioManager
from .path_tracker import PurePursuitScenario
from .utils import Utils
from pxr import UsdPhysics
# ======================================================================================================================
#
# ExtensionModel
#
# ======================================================================================================================
class ExtensionModel:
ROOT_PATH = "/World"
def __init__(self, extension_id, default_lookahead_distance, max_lookahed_distance, min_lookahed_distance):
self._ext_id = extension_id
self._METADATA_KEY = f"{extension_id.split('-')[0]}.metadata"
self._lookahead_distance = default_lookahead_distance
self._min_lookahead_distance = min_lookahed_distance
self._max_lookahead_distance = max_lookahed_distance
self.METERS_PER_UNIT = 0.01
UsdGeom.SetStageMetersPerUnit(omni.usd.get_context().get_stage(), self.METERS_PER_UNIT)
# Currently the extension expects Y-axis to be up-axis.
# Conventionally Y-up is often used in graphics, including Kit-apps.
# TODO: refactor impl to avoid breaking things when changing up-axis settings.
self._up_axis = "Y"
self._vehicle_to_curve_attachments = {}
self._scenario_managers = []
self._dirty = False
# Enables debug overlay with additional info regarding current vehicle state.
self._enable_debug = False
# Closed trajectory loop
self._closed_trajectory_loop = False
self._rear_steering = False
def teardown(self):
self.stop_scenarios()
self._scenario_managers = None
def attach_vehicle_to_curve(self, wizard_vehicle_path, curve_path):
"""
Links a vehicle prim (must be WizardVehicle Xform) to the path (BasisCurve)
to be tracked by the vechile.
Currently we expect two prims to be selected:
- WizardVehicle
- BasisCurve (corresponding curve/trajectory the vehicle must track)
"""
stage = omni.usd.get_context().get_stage()
prim0 = stage.GetPrimAtPath(wizard_vehicle_path)
prim1 = stage.GetPrimAtPath(curve_path)
if prim0.IsA(UsdGeom.BasisCurves):
# Fix order of selected prims: WizardVehicle should be first
prim0, prim1 = prim1, prim0
wizard_vehicle_path, curve_path = curve_path, wizard_vehicle_path
if prim0.IsA(UsdGeom.Xformable):
key = wizard_vehicle_path + "/Vehicle"
self._vehicle_to_curve_attachments[key] = curve_path
self._dirty = True
def attach_selected_prims(self, selected_prim_paths):
"""
Attaches selected prims paths from a stage to be considered as a
vehicle and path to be tracked correspondingly.
The selected prim paths should include a WizardVehicle Xform that
represents vehicle, and a BasisCurves that represents tracked path.
"""
if len(selected_prim_paths) == 2:
self.attach_vehicle_to_curve(
wizard_vehicle_path=selected_prim_paths[0],
curve_path=selected_prim_paths[1]
)
def attach_preset_metadata(self, metadata):
"""
Does vehicle-to-curve attachment from the metadata dictionary directly.
"""
self.attach_vehicle_to_curve(
wizard_vehicle_path=metadata["WizardVehicle"],
curve_path=metadata["BasisCurve"]
)
def _cleanup_scenario_managers(self):
"""Cleans up scenario managers. Often useful when tracked data becomes obsolete."""
self.stop_scenarios()
for manager in self._scenario_managers:
manager.cleanup()
self._scenario_managers.clear()
self._dirty = True
def clear_attachments(self):
"""
Removes previously added path tracking attachments.
"""
self._cleanup_scenario_managers()
self._vehicle_to_curve_attachments.clear()
def stop_scenarios(self):
"""
Stops path tracking scenarios.
"""
for manager in self._scenario_managers:
manager.stop_scenario()
def load_simulation(self, lookahead_distance):
"""
Load scenarios with vehicle-to-curve attachments.
Note that multiple vehicles could run at the same time.
"""
if self._dirty:
self._cleanup_scenario_managers()
for vehicle_path in self._vehicle_to_curve_attachments:
scenario = PurePursuitScenario(
lookahead_distance,
vehicle_path,
self._vehicle_to_curve_attachments[vehicle_path],
self.METERS_PER_UNIT,
self._closed_trajectory_loop,
self._rear_steering
)
scenario.enable_debug(self._enable_debug)
scenario_manager = ScenarioManager(scenario)
self._scenario_managers.append(scenario_manager)
self._dirty = False
self.recompute_trajectories()
def recompute_trajectories(self):
"""
Update tracked trajectories. Often needed when BasisCurve defining a
trajectory in the scene was updated by a user.
"""
for i in range(len(self._scenario_managers)):
manager = self._scenario_managers[i]
manager.scenario.recompute_trajectory()
def set_enable_debug(self, flag):
"""
Enables/disables debug overlay.
"""
self._enable_debug = flag
for manager in self._scenario_managers:
manager.scenario.enable_debug(flag)
def set_close_trajectory_loop(self, flag):
"""
Enables closed loop path tracking.
"""
self._closed_trajectory_loop = flag
for manager in self._scenario_managers:
manager.scenario.set_close_trajectory_loop(flag)
def set_enable_rear_steering(self, flag):
"""
Enables rear steering for the vehicle.
"""
self._rear_steering = flag
# Mark simulation config as dirty in order to re-create vehicle object.
self._dirty = True
def load_ground_plane(self):
"""
Helper to quickly load a preset ground plane prim.
"""
stage = omni.usd.get_context().get_stage()
path = omni.usd.get_stage_next_free_path(stage, "/GroundPlane", False)
Utils.add_ground_plane(stage, path, self._up_axis)
def get_unit_scale(self, stage):
metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage)
lengthScale = 1.0 / metersPerUnit
kilogramsPerUnit = UsdPhysics.GetStageKilogramsPerUnit(stage)
massScale = 1.0 / kilogramsPerUnit
return UnitScale(lengthScale, massScale)
def load_sample_vehicle(self):
"""
Load a preset vechile from a USD data provider shipped with the extension.
"""
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
vehicleData = VehicleWizard.VehicleData(self.get_unit_scale(stage),
VehicleWizard.VehicleData.AXIS_Y, VehicleWizard.VehicleData.AXIS_Z)
root_vehicle_path = self.ROOT_PATH + VehicleWizard.VEHICLE_ROOT_BASE_PATH
root_vehicle_path = omni.usd.get_stage_next_free_path(stage, root_vehicle_path, True)
root_shared_path = self.ROOT_PATH + VehicleWizard.SHARED_DATA_ROOT_BASE_PATH
root_vehicle_path = omni.usd.get_stage_next_free_path(stage, root_shared_path, True)
vehicleData.rootVehiclePath = root_vehicle_path
vehicleData.rootSharedPath = root_shared_path
(success, (messageList, scenePath)) = PhysXVehicleWizardCreateCommand.execute(vehicleData)
assert (success)
assert (not messageList)
assert (scenePath and scenePath is not None)
return root_vehicle_path
def load_sample_track(self):
"""
Load a sample BasisCurve serialiazed in USD.
"""
usd_context = omni.usd.get_context()
ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id)
basis_curve_prim_path = "/BasisCurves"
basis_curve_prim_path = omni.usd.get_stage_next_free_path(
usd_context.get_stage(),
basis_curve_prim_path,
True
)
basis_curve_usd_path = f"{ext_path}/data/usd/curve.usd"
omni.kit.commands.execute(
"CreateReferenceCommand",
path_to=basis_curve_prim_path,
asset_path=basis_curve_usd_path,
usd_context=usd_context,
)
def load_forklift_rig(self):
"""Load a forklift model from USD with already exisitng physx vehicle rig."""
usd_context = omni.usd.get_context()
ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id)
forklift_prim_path = "/ForkliftRig"
forklift_prim_path = omni.usd.get_stage_next_free_path(
usd_context.get_stage(),
forklift_prim_path,
True
)
vehicle_usd_path = f"{ext_path}/data/usd/forklift/forklift_rig.usd"
omni.kit.commands.execute(
"CreateReferenceCommand",
path_to=forklift_prim_path,
asset_path=vehicle_usd_path,
usd_context=usd_context,
)
return forklift_prim_path
def load_preset_scene(self):
"""
Loads a preset scene with vehicle template and predefined curve for
path tracking.
"""
default_prim_path = self.ROOT_PATH
stage = omni.usd.get_context().get_stage()
if not stage.GetPrimAtPath(default_prim_path):
omni.kit.commands.execute(
"CreatePrim", prim_path=default_prim_path,
prim_type="Xform", select_new_prim=True, attributes={}
)
stage.SetDefaultPrim(stage.GetPrimAtPath(default_prim_path))
self.load_ground_plane()
vehicle_prim_path = self.load_sample_vehicle()
self.load_sample_track()
metadata_vehicle_to_curve = self.get_attachment_presets(vehicle_prim_path)
self.attach_preset_metadata(metadata_vehicle_to_curve)
def get_attachment_presets(self, vehicle_path):
"""
Prim paths for the preset scene with prim paths for vehicle-to-curve
attachment.
"""
stage = omni.usd.get_context().get_stage()
vehicle_prim = stage.GetPrimAtPath(vehicle_path)
metadata = vehicle_prim.GetCustomData()
# Vehicle-to-Curve attachment of the preset is stored in the metadata.
attachment_preset = metadata.get(self._METADATA_KEY)
if not attachment_preset or attachment_preset is None:
# Fallback to defaults
attachment_preset = {
"WizardVehicle": vehicle_path,
"BasisCurve": "/World/BasisCurves/BasisCurves"
}
return attachment_preset
def get_lookahead_distance(self):
return self._lookahead_distance
def update_lookahead_distance(self, distance):
"""Updates the lookahead distance parameter for pure pursuit"""
clamped_distance = max(
self._min_lookahead_distance,
min(self._max_lookahead_distance, distance)
)
for scenario_manager in self._scenario_managers:
scenario_manager.scenario.set_lookahead_distance(clamped_distance)
return clamped_distance
| 11,903 | Python | 38.287129 | 120 | 0.614971 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/debug_draw.py | import carb
from omni.debugdraw import get_debug_draw_interface
"""
Note: DebugRenderer relies on `omni.debugdraw` utility to optionally provide
a debug overlay with additional info regarding current state of vehicle,
path tracking destination etc.
Using omni.ui.scene would be more future proof as it will break
dependency on `omni.debugdraw` which may change or not guaranteed to be
kept in the future in Kit-based apps.
"""
class DebugRenderer():
def __init__(self, vehicle_bbox_size):
self._debug_draw = get_debug_draw_interface()
self._curr_time = 0.0
self._color = 0x60FF0000
self._line_thickness = 2.0
self._size = max(vehicle_bbox_size)
self._enabled = True
# update_stream = omni.kit.app.get_app().get_update_event_stream()
# self._update_sub = update_stream.create_subscription_to_pop(self._on_update, name="omni.physx update")
def _draw_segment(self, start, end, color, thickness):
self._debug_draw.draw_line(
carb.Float3(start[0], start[1], start[2]),
color, thickness,
carb.Float3(end[0], end[1], end[2]),
color, thickness
)
def update_path_tracking(self, front_axle_pos, rear_axle_pos, forward, dest_pos):
if not self._enabled:
return
color = 0xFF222222
thickness = 10.0
self._draw_segment(rear_axle_pos, dest_pos, color, thickness)
color = 0xFF00FA9A
self._draw_segment(rear_axle_pos, front_axle_pos, color, thickness)
def update_vehicle(self, vehicle):
if not self._enabled:
return
curr_vehicle_pos = vehicle.curr_position()
forward = vehicle.forward()
up = vehicle.up()
t = self._line_thickness * 2
x = curr_vehicle_pos[0]
y = curr_vehicle_pos[1]
z = curr_vehicle_pos[2]
s = self._size / 2
# Draw forward
self._debug_draw.draw_line(
carb.Float3(x, y, z),
0xFF0000FF, t,
carb.Float3(x + s * forward[0], y + s * forward[1], z + s * forward[2]),
0xFF0000FF, t
)
# Draw up
self._debug_draw.draw_line(
carb.Float3(x, y, z),
0xFF00FF00, t,
carb.Float3(x + s * up[0], y + s * up[1], z + s * up[2]),
0xFF00FF00, t
)
# /!\ Uncomment additional debug overlay drawing below if needed
# Draw axle axis connecting front to rear
# af = vehicle.axle_front()
# ar = vehicle.axle_rear()
# axle_color = 0xFF8A2BE2
# self._debug_draw.draw_line(
# carb.Float3(af[0], af[1], af[2]),
# axle_color, t*4,
# carb.Float3(ar[0], ar[1], ar[2]),
# axle_color, t*4
# )
# Draw front axle
# fl = vehicle.wheel_pos_front_left()
# fr = vehicle.wheel_pos_front_right()
# front_axle_color = 0xFFFF0000
# self._debug_draw.draw_line(
# carb.Float3(fl[0], fl[1], fl[2]),
# front_axle_color, t*2,
# carb.Float3(fr[0], fr[1], fr[2]),
# front_axle_color, t*2
# )
# Draw rear axle
# rl = vehicle.wheel_pos_rear_left()
# rr = vehicle.wheel_pos_rear_right()
# rear_axle_color = 0xFFAAAAAA
# self._debug_draw.draw_line(
# carb.Float3(rl[0], rl[1], rl[2]),
# rear_axle_color, t*2,
# carb.Float3(rr[0], rr[1], rr[2]),
# rear_axle_color, t*2
# )
def update_path_to_dest(self, vehicle_pos, dest_pos):
if not self._enabled:
return
if dest_pos:
self._debug_draw.draw_line(
carb.Float3(vehicle_pos[0], vehicle_pos[1], vehicle_pos[2]), self._color, self._line_thickness,
carb.Float3(dest_pos[0], dest_pos[1], dest_pos[2]), self._color, self._line_thickness
)
def enable(self, value):
self._enabled = value
| 4,039 | Python | 32.666666 | 112 | 0.549641 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/ui.py | from ctypes import alignment
import omni.ui as ui
from typing import List
DEFAULT_BTN_HEIGHT = 22
COLLAPSABLE_FRAME_HEIGHT = 32
LINE_HEIGHT = 32
LABEL_WIDTH = 150
LABEL_INNER_WIDTH = 70
ELEM_MARGIN = 4
BTN_WIDTH = 32
VSPACING = ELEM_MARGIN * 2
BORDER_RADIUS = 4
CollapsableFrameStyle = {
"CollapsableFrame": {
"background_color": 0xFF333333,
"secondary_color": 0xFF333333,
"color": 0xFF00b976,
"border_radius": BORDER_RADIUS,
"border_color": 0x0,
"border_width": 0,
"font_size": 14,
"padding": ELEM_MARGIN * 2,
"margin_width": ELEM_MARGIN,
"margin_height": ELEM_MARGIN,
},
"CollapsableFrame:hovered": {"secondary_color": 0xFF3C3C3C},
"CollapsableFrame:pressed": {"secondary_color": 0xFF333333},
"Button": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS},
"Button:selected": {"background_color": 0xFF666666},
"Button.Label:disabled": {"color": 0xFF888888},
"Slider": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS},
"Slider:disabled": {"color": 0xFF888888},
"ComboBox": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS},
"Label": {"margin_height": 0, "margin_width": ELEM_MARGIN},
"Label:disabled": {"color": 0xFF888888},
}
TREE_VIEW_STYLE = {
"TreeView:selected": {"background_color": 0x66FFFFFF},
"TreeView.Item": {"color": 0xFFCCCCCC},
"TreeView.Item:selected": {"color": 0xFFCCCCCC},
"TreeView.Header": {"background_color": 0xFF000000},
}
IMPORTANT_BUTTON_STYLE = {
"Button": {
"background_color": 0x7000b976
}
}
class AttachedItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
class AttachmentModel(ui.AbstractItemModel):
"""
Represents the list active vehicle-to-curve attachments.
It is used to make a single level tree appear like a simple list.
"""
def __init__(self, items: List[object]):
super().__init__()
self.attachments_changed(items)
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._attachments
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item, column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
if item and isinstance(item, AttachedItem):
return item.name_model
def attachments_changed(self, attachments):
self._attachments = []
i = 1
for attachment in attachments:
self._attachments.append(AttachedItem(f"[{i}] {attachment}"))
i = i + 1
self._item_changed(None)
class ExtensionUI():
def __init__(self, controller):
self._controller = controller
def build_ui(self, lookahead_distance, attachments):
self._window = ui.Window("Vehicle Path Tracking Extension (Beta)", width=300, height=300)
with self._window.frame:
with ui.HStack():
# Column #1
with ui.VStack():
self._settings_frame = ui.CollapsableFrame(
"SETTINGS", collapsed=False,
height=COLLAPSABLE_FRAME_HEIGHT,
style=CollapsableFrameStyle
)
with self._settings_frame:
with ui.VStack():
width = 64
height = 16
with ui.HStack(width=width, height=height):
ui.Label("Enable debug: ")
enable_debug_checkbox = ui.CheckBox()
enable_debug_checkbox.model.add_value_changed_fn(
self._controller._changed_enable_debug
)
ui.Spacer(height=LINE_HEIGHT/4)
ui.Label("REFERENCE COORDINATE SYSTEM: Up-axis: Y-axis (fixed)")
ui.Spacer(height=LINE_HEIGHT/4)
with ui.HStack(width=width, height=height):
ui.Label("Pure Pursuit look ahead distance: ")
self._lookahead_field = ui.FloatField(width=64.0)
self._lookahead_field.model.set_value(lookahead_distance)
self._lookahead_field.model.add_end_edit_fn(self._notify_lookahead_distance_changed)
with ui.HStack(width=width, height=height):
ui.Label("Trajectory Loop:")
self._checkbox_trajectory_loop = ui.CheckBox(name="TracjectoryLoop")
self._checkbox_trajectory_loop.model.set_value(False)
self._checkbox_trajectory_loop.model.add_value_changed_fn(
self._controller._on_trajectory_loop_value_changed
)
# FIXME: Fix regression in rear steering behaviour.
# (Issue #13)
# with ui.HStack(width=width, height=height):
# ui.Label("Enable rear steering:")
# self._checkbox_rear_steering = ui.CheckBox(name="RearSteering")
# self._checkbox_rear_steering.model.set_value(False)
# self._checkbox_rear_steering.model.add_value_changed_fn(
# self._controller._on_steering_changed
# )
self._controls_frame = ui.CollapsableFrame("CONTROLS",
collapsed=False,
height=COLLAPSABLE_FRAME_HEIGHT,
style=CollapsableFrameStyle
)
with self._controls_frame:
with ui.HStack():
with ui.VStack():
ui.Button(
"Start Scenario",
clicked_fn=self._controller._on_click_start_scenario,
height=DEFAULT_BTN_HEIGHT,
style=IMPORTANT_BUTTON_STYLE
)
ui.Spacer(height=LINE_HEIGHT/8)
ui.Button(
"Stop Scenario",
clicked_fn=self._controller._on_click_stop_scenario,
height=DEFAULT_BTN_HEIGHT,
style=IMPORTANT_BUTTON_STYLE
)
ui.Line(height=LINE_HEIGHT/2)
ui.Button(
"Load a preset scene",
clicked_fn=self._controller._on_click_load_preset_scene,
height=DEFAULT_BTN_HEIGHT
)
ui.Line(height=LINE_HEIGHT/2)
ui.Button(
"Load a ground plane",
clicked_fn=self._controller._on_click_load_ground_plane,
height=DEFAULT_BTN_HEIGHT
)
ui.Spacer(height=LINE_HEIGHT/8)
ui.Button(
"Load a sample vehicle template",
clicked_fn=self._controller._on_click_load_sample_vehicle,
height=DEFAULT_BTN_HEIGHT
)
ui.Spacer(height=LINE_HEIGHT/8)
ui.Button(
"Load a sample BasisCurve",
clicked_fn=self._controller._on_click_load_basis_curve,
height=DEFAULT_BTN_HEIGHT
)
# FIXME: re-enable Forklift once the new updated
# meta-data for it will be provided.
# ui.Spacer(height=LINE_HEIGHT/8)
# ui.Button(
# "Load a Forklift",
# clicked_fn=self._controller._on_click_load_forklift,
# height=DEFAULT_BTN_HEIGHT
# )
self._atachments_frame = ui.CollapsableFrame(
"VEHICLE-TO-CURVE ATTACHMENTS",
collapsed=False, height=COLLAPSABLE_FRAME_HEIGHT,
style=CollapsableFrameStyle
)
with self._atachments_frame:
with ui.VStack():
ui.Label(
"(1) Select WizardVehicle Xform and corresponding BasisCurve;\n(2) Click 'Attach Selected'",
width=32
)
ui.Spacer(height=LINE_HEIGHT/8)
ui.Button(
"Attach Selected",
clicked_fn=self._controller._on_click_attach_selected,
height=DEFAULT_BTN_HEIGHT,
style=IMPORTANT_BUTTON_STYLE
)
ui.Spacer(height=LINE_HEIGHT/8)
ui.Button(
"Clear All Attachments",
clicked_fn=self._controller._on_click_clear_attachments
)
# Column #2
self._attachments_frame = ui.CollapsableFrame(
"VEHICLE-TO-CURVE attachments", collapsed=False,
height=COLLAPSABLE_FRAME_HEIGHT,
style=CollapsableFrameStyle
)
with self._attachments_frame:
with ui.VStack(direction=ui.Direction.TOP_TO_BOTTOM, height=20, style=CollapsableFrameStyle):
if attachments is not None and len(attachments) > 0:
self._attachment_label = ui.Label(
"Active vehicle-to-curve attachments:",
alignment=ui.Alignment.TOP
)
else:
self._attachment_label = ui.Label("No active vehicle-to-curve attachments")
self._attachment_model = AttachmentModel(attachments)
tree_view = ui.TreeView(
self._attachment_model, root_visible=False,
header_visible=False,
style={"TreeView.Item": {"margin": 4}}
)
# viewport = ui.Workspace.get_window("Viewport")
# self._window.dock_in(viewport, ui.DockPosition.BOTTOM)
# Dock extension window alongside 'Property' extension.
self._window.deferred_dock_in("Property")
# dock_in_window is deprecated unfortunatelly
# self._window.dock_in_window("Viewport", ui.DockPosition.RIGHT, ratio=0.1)
def teardown(self):
self._controller = None
self._settings_frame = None
self._controls_frame = None
self._atachments_frame = None
self._window = None
def get_lookahead_distance(self):
return self._lookahead_field.model.as_float
def set_lookahead_distance(self, distance):
self._lookahead_field.model.set_value(distance)
def _notify_lookahead_distance_changed(self, model):
self._controller._on_lookahead_distance_changed(model.as_float)
def update_attachment_info(self, attachments):
self._attachment_model.attachments_changed(attachments)
if len(attachments) > 0:
self._attachment_label.text = "Active vehicle-to-curve attachments:"
else:
self._attachment_label.text = "No active vehicle-to-curve attachments"
| 13,127 | Python | 45.553191 | 124 | 0.475585 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/tests/test_extension_model.py | from email.policy import default
import omni.kit.app
import omni.kit.commands
import omni.usd
from omni.kit.test import AsyncTestCaseFailOnLogError
# from omni.kit.test_suite.helpers import wait_stage_loading
from ..scripts.model import ExtensionModel
# ======================================================================================================================
class TestExtensionModel(AsyncTestCaseFailOnLogError):
async def setUp(self):
usd_context = omni.usd.get_context()
await usd_context.new_stage_async()
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_manager.get_enabled_extension_id("ext.path.tracking")
self._DEFAULT_LOOKAHEAD = 550.0
self._MAX_LOOKAHEAD = 1200.0
self._MIN_LOOKAHEAD = 300.0
async def tearDown(self):
self._ext_id = None
async def test_load_preset(self):
ext_model = ExtensionModel(self._ext_id,
default_lookahead_distance=self._DEFAULT_LOOKAHEAD,
max_lookahed_distance=self._MAX_LOOKAHEAD,
min_lookahed_distance=self._MIN_LOOKAHEAD
)
ext_model.load_preset_scene()
stage = omni.usd.get_context().get_stage()
ground_plane = stage.GetPrimAtPath("/World/GroundPlane")
vehicle_template = stage.GetPrimAtPath("/World/VehicleTemplate")
curve = stage.GetPrimAtPath("/World/BasisCurves")
self.assertTrue(ground_plane is not None)
self.assertTrue(vehicle_template is not None)
self.assertTrue(curve is not None)
async def test_hello(self):
ext_model = ExtensionModel(self._ext_id,
default_lookahead_distance=self._DEFAULT_LOOKAHEAD,
max_lookahed_distance=self._MAX_LOOKAHEAD,
min_lookahed_distance=self._MIN_LOOKAHEAD
)
async def test_attachments_preset(self):
# TODO: provide impl
self.assertTrue(True) | 2,139 | Python | 37.90909 | 120 | 0.57223 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/tests/__init__.py | try:
from .test_extension_model import *
except:
import carb
carb.log_error("No tests for this module, check extension settings")
| 142 | Python | 22.83333 | 72 | 0.711268 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/config/extension.toml | [package]
version = "1.0.2-beta"
title = "Vehicle Path Tracking Extension"
description="Allows omni.physxvehicle to move along a user-defined trajectory via path tracking 'pure pursuit' inspired algorithm."
readme = "docs/index.rst"
changelog="docs/CHANGELOG.md"
repository = ""
icon = "data/icon.png"
preview_image="data/preview.png"
keywords = ["kit", "omni.physxvehicle", "animation", "path", "tracking", "vehicle"]
[dependencies]
"omni.usd" = {}
"omni.kit.uiapp" = {}
"omni.physx" = {}
"omni.physx.ui" = {}
"omni.physx.vehicle" = {}
"omni.usdphysics" = {}
"omni.physx.commands" = {}
"omni.kit.test_suite.helpers" = {}
[[python.module]]
name = "ext.path.tracking"
[[test]]
args = [
"--/renderer/enabled=pxr",
"--/renderer/active=pxr",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--no-window"
]
dependencies = [
"omni.hydra.pxr",
"omni.kit.mainwindow",
"omni.kit.widget.stage",
"omni.kit.window.viewport",
"omni.kit.window.stage",
"omni.kit.window.console",
"omni.kit.test_suite.helpers",
] | 1,079 | TOML | 24.714285 | 131 | 0.658017 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/docs/CHANGELOG.md | # Changelog
## [1.0.2-beta] - 2023-01-29
### Changes
- Fixed regression in preset vehicle scene after Kit 104 updates;
- Temporarily removed forklfit model from simulation templates (Kit 104 regression);
- Temporarily removed ui control for a user to select rear steering option (Kit 104 regression).
## [1.0.0] - 2022-08-18
### Changes
- Created initial vehicle path tracking extension for Nvidia Omniverse Developer Contest
| 428 | Markdown | 34.749997 | 96 | 0.759346 |
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/docs/index.rst | omni.path.tracking
########################
Omniverse Vehicle Path tracking extension allows a physics-enabled vehicle created
with a PhysX Vehicle extension (omni.physx.vehicle) to move and automatically track a user-defined path.
User-defined path is represented by an instance of USD BasisCurves, and a path tracking algorithm
is inspired by a classic Pure Pursuit algorithm.
The fastest way to evaluate how vehicle path tracking extension works is to use a preset vehicle and curve.
In order to get started with the preset configuration please proceed as follows:
1. Click `Load a preset scene` button
2. Click `Start scenario` button
---
Extension supports path tracking for any Omniverse PhysX Vehicle.
One could load a template vehicle using the extension ui, or using a conventional method via `Create`->`Physics`->`Vehicle`.
It is also straightforward to add a custom mesh and materials to a physics vehicle.
You can create a curve for vehicle path tracking using either of the following methods:
- `Create`->`BasisCurves`->`From Bezier`
- `Create`->`BasisCurves`->`From Pencil`
---
Once a physics vehicle and a path to be tracked defined by USD BasisCurves is created, select the WizardVehicle and the BasisCruves prims in the stage (via Ctrl-click)
and click `Attach Selected` button. Note that is very important to select specifically `WizardVehicle` prim in the scene,
not `WizardVehicle/Vehicle` for instance.
If vehicle-to-curve attachment was successful it should be reflected on the
extension UI.
When vehicle-to-curve attachment(s) is created, proceed by clicking `Start Scenario` button.
If you want to get rid of all already existing vehicle-to-curve attachments please click `Clear All Attachments`. | 1,731 | reStructuredText | 47.11111 | 167 | 0.783362 |
ericcraft-mh/omniverse-resources/README.md | ## USD Resources
###### Pixar
[USD](https://graphics.pixar.com/usd/release/index.html)</br>
[Universal Scene Description (USD) API](https://graphics.pixar.com/usd/release/api/index.html)
###### NVIDIA Developer
[USD](https://developer.nvidia.com/usd)</br>
[Working with USD Python Libraries](https://developer.nvidia.com/usd/tutorials)</br>
[USD Python API Notes](https://developer.nvidia.com/usd/apinotes)
## Omniverse Resources
###### NVIDIA
[Omniverse Documentation Site](https://docs.omniverse.nvidia.com/)</br>
[Omniverse Utilities](https://docs.omniverse.nvidia.com/prod_utilities/prod_utilities/overview.html) Helpful utilities in the Omniverse.</br>
[Omniverse Workflows](https://docs.omniverse.nvidia.com/prod_workflows/prod_workflows/overview.html) Objective based tutorials using Omniverse.</br>
[Omniverse Kit API](https://docs.omniverse.nvidia.com/py/kit/index.html)</br>
[Frequently Used Python Snippets](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/reference_python_snippets.html)</br>
NVIDIA On-Demand: [Omniverse Video Lists](https://docs.omniverse.nvidia.com/plat_omniverse/common/video-list.html) [^1]</br>
[Omniverse Forums](https://forums.developer.nvidia.com/c/omniverse/300)
###### Third Party
[Official Omniverse Channel](https://discord.com/invite/nvidiaomniverse) (Discord)</br>
[omniverse-kit-extension](https://github.com/topics/omniverse-kit-extension) (GitHub)</br>
[**PHYSICALLY**BASED](https://physicallybased.info/) A database of physically based values for CG artists [^2]</br>
[NVIDIA Omniverse Channel](https://www.youtube.com/c/NVIDIAOmniverse) (YouTube)</br>
NVIDIA Studio: [Omniverse Search](https://www.youtube.com/channel/UCDeQdW6Lt6nhq3mLM4oLGWw/search?query=Omniverse) (YouTube)</br>
NVIDIA: [Omniverse Search](https://www.youtube.com/c/NVIDIA/search?query=Omniverse) (YouTube)</br>
[PathCopyCopy](https://pathcopycopy.github.io/) [^3]
## Visual Studio Code
[Visual Studio Code](https://code.visualstudio.com/)
###### Visual Studio Code Extensions
Fully-featured TOML support: [Even Better TOML](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml)</br>
Pixar USD Language Extension by Animal Logic: [USD Language](https://marketplace.visualstudio.com/items?itemName=AnimalLogic.vscode-usda-syntax)</br>
Material Definition Language by NVIDIA: [vscode-mdl-language](https://marketplace.visualstudio.com/items?itemName=OmerShapira.mdl)</br>
[^1]: NVIDIA Account may be required to access content.
[^2]: Includes Omniverse Engine values.
[^3]: Provides a way to copy Omniverse compliant UNIX paths.
| 2,580 | Markdown | 72.742855 | 149 | 0.770155 |
terrylincn/omniverse-tutorials/README.md | # omniverse-tutorials</br>
animatedTop 皮克斯的陀螺例子程序</br>
code_demo_mesh100 100个球体的代码控制程序</br>
kaolin_data_generator_patch koalin 2021.2.0 bug fix for dirb_tutorials
| 163 | Markdown | 31.799994 | 70 | 0.803681 |
terrylincn/omniverse-tutorials/animatedTop/generate_examples.py | # This is an example script from the USD tutorial,
# "Transformations, Time-sampled Animation, and Layer Offsets".
#
# When run, it will generate a series of usda files in the current
# directory that illustrate each of the steps in the tutorial.
#
from pxr import Usd, UsdGeom, Gf, Sdf
def MakeInitialStage(path):
stage = Usd.Stage.CreateNew(path)
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
stage.SetStartTimeCode(0)
stage.SetEndTimeCode(192)
return stage
def Step1():
stage = MakeInitialStage('Step1.usda')
stage.SetMetadata('comment', 'Step 1: Start and end time codes')
stage.Save()
def AddReferenceToGeometry(stage, path):
geom = UsdGeom.Xform.Define(stage, path)
geom.GetPrim().GetReferences().AddReference('./top.geom.usd')
return geom
def Step2():
stage = MakeInitialStage('Step2.usda')
stage.SetMetadata('comment', 'Step 2: Geometry reference')
top = AddReferenceToGeometry(stage, '/Top')
stage.Save()
def AddSpin(top):
spin = top.AddRotateZOp(opSuffix='spin')
spin.Set(time=0, value=0)
spin.Set(time=192, value=1440)
def Step3():
stage = MakeInitialStage('Step3.usda')
stage.SetMetadata('comment', 'Step 3: Adding spin animation')
top = AddReferenceToGeometry(stage, '/Top')
AddSpin(top)
stage.Save()
def AddTilt(top):
tilt = top.AddRotateXOp(opSuffix='tilt')
tilt.Set(value=12)
def Step4():
stage = MakeInitialStage('Step4.usda')
stage.SetMetadata('comment', 'Step 4: Adding tilt')
top = AddReferenceToGeometry(stage, '/Top')
AddTilt(top)
AddSpin(top)
stage.Save()
def Step4A():
stage = MakeInitialStage('Step4A.usda')
stage.SetMetadata('comment', 'Step 4A: Adding spin and tilt')
top = AddReferenceToGeometry(stage, '/Top')
AddSpin(top)
AddTilt(top)
stage.Save()
def AddOffset(top):
top.AddTranslateOp(opSuffix='offset').Set(value=(0, 0.1, 0))
def AddPrecession(top):
precess = top.AddRotateZOp(opSuffix='precess')
precess.Set(time=0, value=0)
precess.Set(time=192, value=360)
def Step5():
stage = MakeInitialStage('Step5.usda')
stage.SetMetadata('comment', 'Step 5: Adding precession and offset')
top = AddReferenceToGeometry(stage, '/Top')
AddPrecession(top)
AddOffset(top)
AddTilt(top)
AddSpin(top)
stage.Save()
def Step6():
# Use animated layer from Step5
anim_layer_path = './Step5.usda'
stage = MakeInitialStage('Step6.usda')
stage.SetMetadata('comment', 'Step 6: Layer offsets and animation')
left = UsdGeom.Xform.Define(stage, '/Left')
left_top = UsdGeom.Xform.Define(stage, '/Left/Top')
left_top.GetPrim().GetReferences().AddReference(
assetPath = anim_layer_path,
primPath = '/Top')
middle = UsdGeom.Xform.Define(stage, '/Middle')
middle.AddTranslateOp().Set(value=(2, 0, 0))
middle_top = UsdGeom.Xform.Define(stage, '/Middle/Top')
middle_top.GetPrim().GetReferences().AddReference(
assetPath = anim_layer_path,
primPath = '/Top',
layerOffset = Sdf.LayerOffset(offset=96))
right = UsdGeom.Xform.Define(stage, '/Right')
right.AddTranslateOp().Set(value=(4, 0, 0))
right_top = UsdGeom.Xform.Define(stage, '/Right/Top')
right_top.GetPrim().GetReferences().AddReference(
assetPath = anim_layer_path,
primPath = '/Top',
layerOffset = Sdf.LayerOffset(scale=0.25))
stage.Save()
if __name__ == '__main__':
Step1()
Step2()
Step3()
Step4()
Step4A()
Step5()
Step6()
| 3,547 | Python | 29.852174 | 72 | 0.666479 |
terrylincn/omniverse-tutorials/kaolin_data_generator_patch/extension.py | import os
import re
import json
import random
import asyncio
import posixpath
import threading
import webbrowser
from queue import Queue
import glob
from functools import partial
import pathlib
import carb
import omni.ext
import omni.syntheticdata as sd
from omni import ui
from carb import settings
from pxr import Usd, UsdGeom, UsdShade, UsdLux, Vt, Gf, Sdf, Tf, Semantics
import numpy as np
import omni.syntheticdata as sd
from omni.kit.pointcloud_generator import PointCloudGenerator
from kaolin_app.research import utils
from .utils import (
delete_sublayer,
omni_shader,
bottom_to_elevation,
save_to_log,
save_numpy_array,
save_image,
save_pointcloud,
wait_for_loaded,
)
from .sensors import _build_ui_sensor_selection
from .ui import build_component_frame
from .dr_components import sample_component
_extension_instance = None
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
CACHE = os.path.join(FILE_DIR, ".cache")
EXTENSION_NAME = "Data Generator"
SCENE_PATH = "/World/visualize"
NUM_WORKERS = 10
VALID_EXTENSIONS = ["*.usd", "*.usda", "*.usdc"]
RENDERERS = ["RaytracedLighting", "PathTracing"]
CAMERAS = ["UniformSampling", "Trajectory"]
TRAJ_OPTIONS = ["Spiral", "CustomJson"]
DEMO_URL = "https://docs.omniverse.nvidia.com/app_kaolin/app_kaolin/user_manual.html#data-generator"
MAX_RESOLUTION = {"width": 7680, "height": 4320}
MIN_RESOLUTION = {"width": 1024, "height": 1024}
DR_COMPONENTS = [
"LightComponent",
"MovementComponent",
"RotationComponent",
"ColorComponent",
"TextureComponent",
"MaterialComponent",
"VisibilityComponent",
]
class KaolinDataGeneratorError(Exception):
pass
class IOWorkerPool:
def __init__(self, num_workers: int):
self.save_queue = Queue()
for _ in range(num_workers):
t = threading.Thread(target=self._do_work)
t.start()
def add_to_queue(self, data: object):
self.save_queue.put(data)
def _do_work(self):
while True:
fn = self.save_queue.get(block=True)
fn()
self.save_queue.task_done()
class Extension(omni.ext.IExt):
def __init__(self):
self.root_dir = None
self._ref_idx = 0
self._filepicker = None
self._outpicker = None
self._configpicker = None
self._jsonpicker = None
self.camera = None
self._preset_layer = None
self.dr_components = {}
self.asset_list = None
self._progress_tup = None
self.option_frame = None
self.config = {}
self.start_config = {}
def get_name(self):
return EXTENSION_NAME
def on_startup(self, ext_id: str):
global _extension_instance
_extension_instance = self
self._settings = carb.settings.get_settings()
self.progress = None
self._context = omni.usd.get_context()
self._window = ui.Window(EXTENSION_NAME, width=500, height=500)
self._menu_entry = omni.kit.ui.get_editor_menu().add_item(
f"Window/Kaolin/{EXTENSION_NAME}", self._toggle_menu, toggle=True, value=True
)
self._preview_window = ui.Window("Preview", width=500, height=500)
self._preview_window.deferred_dock_in("Property")
self._preview_window.visible = False
self._filepicker = omni.kit.window.filepicker.FilePickerDialog(
"Select Asset(s)",
click_apply_handler=lambda f, d: self._on_filepick(f, d),
apply_button_label="Open",
item_filter_options=["usd", "usda", "usdc"],
)
self._filepicker.hide()
self._outpicker = omni.kit.window.filepicker.FilePickerDialog(
"Select Output Directory",
click_apply_handler=lambda _, x: self._on_outpick(x),
apply_button_label="Select",
enable_filename_input=False,
)
self._outpicker.hide()
self._configpicker = omni.kit.window.filepicker.FilePickerDialog(
"Import Preset",
click_apply_handler=self._on_load_config,
apply_button_label="Open",
item_filter_options=["usda"],
)
self._configpicker.hide()
self._jsonpicker = omni.kit.window.filepicker.FilePickerDialog(
"Import Json trajectory file",
click_apply_handler=lambda f, d: asyncio.ensure_future(
self._import_trajectory_from_json(posixpath.join(d, f))
),
apply_button_label="Open",
item_filter_fn=self._on_filter_json,
)
self._jsonpicker.hide()
self._configsaver = omni.kit.window.filepicker.FilePickerDialog(
"Save Preset As...",
click_apply_handler=self._on_save_config,
apply_button_label="Save",
item_filter_options=["usda"],
)
cache = {}
if not os.path.exists(CACHE):
os.makedirs(CACHE, exist_ok=True)
if posixpath.exists(os.path.join(CACHE, ".log")):
with open(os.path.join(CACHE, ".log"), "r") as f:
cache = json.load(f)
self._cache = cache
self._hide_filepickers()
self.start_config = self._set_start_config()
self.presets = [str(pathlib.Path(p).as_posix()) for p in glob.glob(posixpath.join(FILE_DIR, "presets/*.usda"))]
self.stage_events_sub = self._context.get_stage_event_stream().create_subscription_to_pop(self._on_stage_event)
self.sdv = sd.Extension.get_instance()
self._vp_iface = omni.kit.viewport.get_viewport_interface()
self.timeline = omni.timeline.get_timeline_interface()
self._build_ui()
def on_shutdown(self):
global _extension_instance
_extension_instance = None
if self._preset_layer:
delete_sublayer(self._preset_layer)
self.progress = None
if self._window:
del self._window
if self._filepicker:
self._filepicker = None
if self._outpicker:
self._outpicker = None
if self._configpicker:
self._configpicker = None
if self._jsonpicker:
self._jsonpicker = None
def _toggle_menu(self, *args):
self._window.visible = not self._window.visible
def clear(self):
if self._preset_layer:
delete_sublayer(self._preset_layer)
# reset resolution
self._settings.set("/app/renderer/resolution/width", self.start_config["width"])
self._settings.set("/app/renderer/resolution/height", self.start_config["height"])
# reset rendering mode
self._settings.set("/rtx/rendermode", self.start_config["renderer"])
self._settings.set("/rtx-defaults/pathtracing/clampSpp", self.start_config["clampSpp"])
self._settings.set("/rtx-defaults/pathtracing/totalSpp", self.start_config["totalSpp"])
self._settings.set("/rtx/post/aa/op", self.start_config["aa"])
def _on_stage_event(self, e):
pass
def _reset(self):
self._ref_idx = 0
self.asset_list = None
def _show_filepicker(self, filepicker, default_dir: str = "", default_file: str = ""):
cur_dir = filepicker.get_current_directory()
show_dir = cur_dir if cur_dir else default_dir
filepicker.show(show_dir)
filepicker.set_filename(default_file)
def _hide_filepickers(self):
# Hide all filepickers
self._configsaver.hide()
self._filepicker._click_cancel_handler = self._filepicker.hide()
self._outpicker._click_cancel_handler = self._outpicker.hide()
self._jsonpicker._click_cancel_handler = self._jsonpicker.hide()
self._configpicker._click_cancel_handler = self._configpicker.hide()
self._configsaver._click_cancel_handler = self._configsaver.hide()
def _set_start_config(self):
return {
"width": self._settings.get("/app/renderer/resolution/width"),
"height": self._settings.get("/app/renderer/resolution/height"),
"renderer": self._settings.get("/rtx/rendermode"),
"clampSpp": self._settings.get("/rtx-defaults/pathtracing/clampSpp"),
"totalSpp": self._settings.get("/rtx/pathtracing/totalSpp"),
"aa": self._settings.get("/rtx/post/aa/op"),
}
def _on_filter_json(self, item: omni.kit.widget.filebrowser.filesystem_model.FileSystemItem):
file_exts = ["json", "JSON"]
for fex in file_exts:
if item.name.endswith(fex) or item.is_folder:
return True
async def _import_trajectory_from_json(self, path: str):
""" Import a trajectory from a JSON file in a predefined format. """
trajectory = self._on_load_json(path)
self.config["jsonpath"] = path
assert isinstance(trajectory, list)
assert len(trajectory) > 0
# add trajectory prim
stage = omni.usd.get_context().get_stage()
timestamp_prim = stage.DefinePrim(f"{SCENE_PATH}/timestamp", "Xform")
trajectory_rig = stage.DefinePrim(f"{timestamp_prim.GetPath()}/rig", "Xform")
UsdGeom.Xformable(trajectory_rig).ClearXformOpOrder()
UsdGeom.Xformable(trajectory_rig).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble)
UsdGeom.Xformable(trajectory_rig).AddOrientOp()
# Set translation and orientation according to trajectory
origins, scales, orientations = [], [], []
for idx, entry in enumerate(trajectory):
# Set camera based on time, translation, quaternion in the json file.
trans, quaternion, time = entry["t"], entry["q"], entry["time"]
# The JSON format has different camera coordinate system conventions:
# +X points right, +Y points down, camera faces in +Z.
# Compared to Kit's conventions:
# +X points right, -Y points down, camera faces in -Z.
# So the Y and Z axes need to be flipped, and orientations need to be
# rotated around X by 180 degrees for the coordinate systems to match.
trans[1] = -trans[1] # Flip Y
trans[2] = -trans[2] # Flip Z
# Set translation and orientations according to time.
trajectory_rig.GetAttribute("xformOp:translate").Set(Gf.Vec3d(trans), time=time)
# Both the JSON format and Gf.Quatd use a "scalar first" ordering.
# Flip Y and Z axes.
quaternion[2] = -quaternion[2]
quaternion[3] = -quaternion[3]
trajectory_rig.GetAttribute("xformOp:orient").Set(Gf.Quatf(*quaternion), time=time)
# Use prev and curr translation to generate a trajectory vis as PointInstancer
orientation = Gf.Quath(*quaternion).GetNormalized()
orientations.append(orientation)
origins.append(Gf.Vec3d(trans))
scales.append([1.0, 1.0, 1.0])
# Define prim for visualization, each component will be a cone (like 3d vector)
cone_height = 0.03
proto_prim = stage.DefinePrim(f"{SCENE_PATH}/proto", "Xform")
proto_prim.GetAttribute("visibility").Set("invisible")
cone_rig = stage.DefinePrim(f"{proto_prim.GetPath()}/cone", "Xform")
cone = UsdGeom.Cone.Define(stage, (f"{cone_rig.GetPath()}/cone"))
cone.GetRadiusAttr().Set(0.01)
cone.GetHeightAttr().Set(cone_height)
cone.GetAxisAttr().Set("Z")
# cone rig
UsdGeom.Xformable(cone_rig).ClearXformOpOrder()
UsdGeom.Xformable(cone_rig).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble).Set((0.0, cone_height / 2, 0.0))
# Setup point instancer
instancer_prim = stage.DefinePrim(f"{SCENE_PATH}/Viz", "PointInstancer")
instancer = UsdGeom.PointInstancer(instancer_prim)
assert instancer
instancer.CreatePrototypesRel().SetTargets([cone_rig.GetPath()])
# Populate point instancer with the calculated scales, positions, and orientations
instancer.GetPositionsAttr().Set(origins)
instancer.GetScalesAttr().Set(scales)
indices = [0] * len(origins)
instancer.GetProtoIndicesAttr().Set(indices)
instancer.GetOrientationsAttr().Set(orientations)
await self._preview_trajectory()
def _move_camera(self, centre: Gf.Vec3d, azimuth: float, elevation: float, distance: float):
stage = omni.usd.get_context().get_stage()
rig = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig")
boom = stage.GetPrimAtPath(f"{rig.GetPath()}/Boom")
camera = stage.GetPrimAtPath(f"{boom.GetPath()}/Camera")
UsdGeom.Xformable(rig).ClearXformOpOrder()
centre_op = UsdGeom.Xformable(rig).AddTranslateOp()
centre_op.Set(tuple(centre))
rig_rotate_op = UsdGeom.Xformable(rig).AddRotateXYZOp()
rig_rotate_op.Set((0.0, azimuth, 0.0))
UsdGeom.Xformable(boom).ClearXformOpOrder()
boom_rotate_op = UsdGeom.Xformable(boom).AddRotateXYZOp()
boom_rotate_op.Set((-elevation, 0.0, 0.0))
# Reset camera
UsdGeom.Xformable(camera).ClearXformOpOrder()
distance_op = UsdGeom.Xformable(camera).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble)
distance_op.Set((0.0, 0.0, distance))
UsdGeom.Xformable(camera).ComputeLocalToWorldTransform(0)
def _get_value(self, option, default=None):
if option not in self.config:
self.config[option] = default
if self.config[option]["mode"] == 0:
return self.config[option]["fixed"]
else:
v_min, v_max = self.config[option]["random"]
if isinstance(v_min, list):
return [random.random() * (v_max_el - v_min_el) + v_min_el for v_min_el, v_max_el in zip(v_min, v_max)]
else:
return random.random() * (v_max - v_min) + v_min
def _set_trajectory_camera_pose(self, cur_frame: int, num_frames: int):
"""
Calculate the camera pose based on a trajectory, number of frames to generate and current frame
"""
stage = omni.usd.get_context().get_stage()
viz_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Viz")
# Match transform of visualization prim
tf = UsdGeom.Xformable(viz_prim).ComputeLocalToWorldTransform(0.0) # .GetInverse()
camera_rig = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig")
UsdGeom.Xformable(camera_rig).ClearXformOpOrder()
UsdGeom.Xformable(camera_rig).AddTransformOp().Set(tf)
trajectory_rig = stage.GetPrimAtPath(f"{SCENE_PATH}/timestamp/rig")
translations = trajectory_rig.GetAttribute("xformOp:translate")
time_samples = translations.GetTimeSamples()
if num_frames <= 1:
cur_time = (time_samples[-1] - time_samples[0]) / 2.0
else:
cur_time = (time_samples[-1] - time_samples[0]) / (num_frames - 1) * cur_frame
translate = trajectory_rig.GetAttribute("xformOp:translate").Get(time=cur_time)
orientation = trajectory_rig.GetAttribute("xformOp:orient").Get(time=cur_time)
UsdGeom.Xformable(self.camera).ClearXformOpOrder()
UsdGeom.Xformable(self.camera).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble).Set(translate)
UsdGeom.Xformable(self.camera).AddOrientOp().Set(orientation)
def _get_spiral_camera_pose(self, frame, total_frames):
"""
Calculate the rotation with respect to X & Y based on the current iteration
of all the sampling
"""
distance = self._get_value("distance")
min_ele, max_ele = tuple(self.config["elevation"]["random"])
numrot = self.config["num_rotations"]
if total_frames > 1:
az_step = 360 * numrot / (total_frames - 1)
ele_step = (max_ele - min_ele) / (total_frames - 1)
else:
az_step = 0
ele_step = 0
az = frame * az_step
ele = min_ele + frame * ele_step
return az, ele, distance
def _normalize(self, prim: Usd.Prim):
prim_range = UsdGeom.Imageable(prim).ComputeLocalBound(0, "default").GetRange()
range_min = prim_range.GetMin()
range_max = prim_range.GetMax()
size = prim_range.GetSize()
sf = 1.0 / max(size)
offset = (range_max + range_min) / 2 * sf
UsdGeom.Xformable(prim).AddTranslateOp().Set(-offset)
UsdGeom.Xformable(prim).AddScaleOp().Set((sf, sf, sf))
def _change_up_axis(self, model):
# TODO type
self.config["up_axis"] = model.as_int
def add_semantics(self, prim: Usd.Prim, semantic_label: str):
if not prim.HasAPI(Semantics.SemanticsAPI):
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
def create_asset_prim(self):
stage = omni.usd.get_context().get_stage()
asset_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset")
if not asset_prim:
asset_prim = stage.DefinePrim(f"{SCENE_PATH}/Asset", "Xform")
rig_prim = stage.GetPrimAtPath(f"{asset_prim.GetPath()}/Rig")
if not rig_prim:
rig_prim = stage.DefinePrim(f"{asset_prim.GetPath()}/Rig", "Xform")
UsdGeom.Xformable(rig_prim).AddTranslateOp()
UsdGeom.Xformable(rig_prim).AddRotateXOp()
translate_op = rig_prim.GetAttribute("xformOp:translate")
if not translate_op:
translate_op = UsdGeom.Xformable(rig_prim).AddTranslateOp()
translate_op.Set((0.0, 0.0, 0.0))
rotatex_op = rig_prim.GetAttribute("xformOp:rotateX")
if not rotatex_op:
UsdGeom.Xformable(rig_prim).AddRotateXOp()
ref_prim = stage.DefinePrim(f"{SCENE_PATH}/Asset/Rig/Preview")
self.add_semantics(ref_prim, "asset")
return asset_prim
async def _run(self):
i = 0
while i < len(self.asset_list):
self.progress["bar1"].set_value(i / len(self.asset_list))
if self.progress["stop_signal"]:
break
load_success = False
# If asset fails to load, remove from list and try the next one
while not load_success and i < len(self.asset_list):
carb.log_info(f"[kaolin_app.research.data_generator] Loading asset {self.asset_list[i]}...")
load_success = await self.load_asset(self.asset_list[i], use_cache=True)
if not load_success:
self.asset_list.pop(i)
if self.progress["stop_signal"]:
break
for j in range(self.config["renders_per_asset"]):
self.progress["bar2"].set_value(j / self.config["renders_per_asset"])
if self.progress["stop_signal"]:
break
app = omni.kit.app.get_app_interface()
await app.next_update_async()
await self.render_asset(j, self.config["renders_per_asset"])
self._preview_window.visible = False
await self._save_gt(i * self.config["renders_per_asset"] + j)
i += 1
self._ref_idx += 1
async def run(self):
root_layer = omni.usd.get_context().get_stage().GetRootLayer()
if len(root_layer.subLayerPaths) == 0 or self._preset_layer != Sdf.Find(root_layer.subLayerPaths[-1]):
self._on_preset_changed(self.presets[self._preset_model.get_item_value_model().as_int], update_config=False)
if not self.config["out_dir"]:
m = self._ui_modal("Output Dir Not Specified", "Please specify an output directory.")
# TODO Notification
return
is_custom_json_mode = (
self.config["cameramode"] == "Trajectory" and self.config["trajectorymode"] == "CustomJson"
)
if is_custom_json_mode and not os.path.exists(self.config.get("jsonpath", "")):
if not self.config.get("jsonpath"):
title = "JSON Path Not Specified"
else:
title = "Invalid JSON Path Specified"
m = self._ui_modal(title, "Please specify a valid path to a trajectory JSON file.")
# TODO Notification
return
# Set small camera near plane
cur_clipping_range = self.camera.GetAttribute("clippingRange").Get()
self.camera.GetAttribute("clippingRange").Set((0.01, cur_clipping_range[1]))
# Hide path visualization if exists
if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"):
self._set_visible(f"{SCENE_PATH}/Viz", False)
# Set SPP per config
self._settings.set("/rtx/pathtracing/spp", self.config["spp"])
# Capture scene state
cur_sel = omni.usd.get_context().get_selection().get_selected_prim_paths()
display_mode = self._settings.get("/persistent/app/viewport/displayOptions")
# Clear scene state
omni.usd.get_context().get_selection().clear_selected_prim_paths()
self._settings.set("/persistent/app/viewport/displayOptions", 0)
if self.asset_list is None:
self.asset_list = await utils.path.get_usd_files_async(self.root_dir)
self._ui_toggle_visible([self.option_frame, self.progress["block"]])
# Reset Camera
if not self.camera.GetAttribute("xformOp:translate"):
UsdGeom.Xformable(self.camera).AddTranslateOp()
self.camera.GetAttribute("xformOp:translate").Set((0, 0, 0))
if not self.camera.GetAttribute("xformOp:rotateXYZ"):
UsdGeom.Xformable(self.camera).AddRotateXYZOp()
self.camera.GetAttribute("xformOp:rotateXYZ").Set((0, 0, 0))
try:
await self._run()
except Exception as e:
raise e
finally:
self.progress["stop_signal"] = False
self._ui_toggle_visible([self.option_frame, self.progress["block"]])
# Re-apply scene state
omni.usd.get_context().get_selection().set_selected_prim_paths(cur_sel, True)
self._settings.set("/persistent/app/viewport/displayOptions", display_mode)
self._settings.set("/rtx/pathtracing/spp", 1)
self.camera.GetAttribute("clippingRange").Set((1.0, cur_clipping_range[1]))
if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"):
self._set_visible(f"{SCENE_PATH}/Viz", True)
async def preview(self):
root_layer = omni.usd.get_context().get_stage().GetRootLayer()
if len(root_layer.subLayerPaths) == 0 or self._preset_layer != Sdf.Find(root_layer.subLayerPaths[-1]):
self._on_preset_changed(self.presets[self._preset_model.get_item_value_model().as_int], update_config=False)
if self.asset_list is None:
self.asset_list = await utils.path.get_usd_files_async(self.root_dir)
# Hide path visualization if exists
if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"):
self._set_visible(f"{SCENE_PATH}/Viz", False)
success = False
# draw assets at random. Remove invalid assets if detected.
while not success and len(self.asset_list) > 0:
sel = random.randrange(len(self.asset_list))
success = await self.load_asset(self.asset_list[sel], use_cache=False)
if not success:
self.asset_list.pop(sel)
await self.render_asset(random.randrange(100), 100)
# ensure material is loaded
await wait_for_loaded()
self.sdv.build_visualization_ui(self._preview_window, "Viewport")
self._preview_window.visible = True
# Set camera target to facilitate camera control
viewport = omni.kit.viewport.get_viewport_interface().get_viewport_window()
viewport.set_camera_target(str(self.camera.GetPath()), 0.0, 0.0, 0.0, True)
def _add_ref(self, ref_prim, file):
# Check if file has a default prim - if not, use the first prim
ref_prim.GetReferences().ClearReferences()
file_stage = Usd.Stage.Open(file)
if file_stage.HasDefaultPrim():
ref_prim.GetPrim().GetReferences().AddReference(file)
else:
top_level_prims = file_stage.GetPseudoRoot().GetChildren()
if len(top_level_prims) == 0:
raise KaolinDataGeneratorError(f"Asset at {file} appears to be empty")
root_prim = top_level_prims[0]
ref_prim.GetPrim().GetReferences().AddReference(file, str(root_prim.GetPath()))
return True
async def load_asset(self, path: str, use_cache: bool = False):
# TODO docstring
stage = omni.usd.get_context().get_stage()
ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview")
if not ref_prim:
self.create_asset_prim()
ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview")
self._set_visible(str(ref_prim.GetPath()), True)
try:
self._add_ref(ref_prim, path)
except Tf.ErrorException:
carb.log_warn(f"Error opening {path}.")
return False
except KaolinDataGeneratorError as e:
carb.log_warn(e.args[0])
return False
# set transforms
UsdGeom.Xformable(ref_prim).ClearXformOpOrder()
if self.config.get("up_axis", 0):
UsdGeom.Xformable(ref_prim).AddRotateXOp().Set(-90.0) # If Z up, rotate about X axis
if self.config.get("asset_normalize"):
self._normalize(ref_prim)
if self.config["asset_override_bottom_elev"]:
bottom_to_elevation(ref_prim.GetParent(), 0.0)
else:
ref_prim.GetParent().GetAttribute("xformOp:translate").Set((0.0, 0.0, 0.0))
# ensure material is loaded
await asyncio.sleep(1)
await wait_for_loaded()
asset_size = UsdGeom.Imageable(ref_prim).ComputeLocalBound(0, "default").GetRange().GetSize()
if all([s < 1e-10 for s in asset_size]):
# Stage is empty, skip asset
carb.log_warn(f"Asset at {path} appears to be empty.")
print(
asset_size,
ref_prim,
ref_prim.GetAttribute("visibility").Get(),
ref_prim.GetMetadata("references").GetAddedOrExplicitItems()[0].assetPath,
)
return False
return True
async def render_asset(self, cur_frame: int = 0, num_frames: int = 0) -> None:
# TODO docstring
self._settings.set("/app/hydraEngine/waitIdle", True) # Necessary, waitIdle resets itself to false
stage = omni.usd.get_context().get_stage()
if not self.camera:
rig = stage.DefinePrim(f"{SCENE_PATH}/CameraRig", "Xform")
boom = stage.DefinePrim(f"{rig.GetPath()}/Boom", "Xform")
self.camera = stage.DefinePrim(f"{boom.GetPath()}/Camera", "Camera")
self.camera.GetAttribute("clippingRange").Set((1.0, 1000000))
self._vp_iface.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
if self.config.get("cameramode") == "Trajectory":
if self.config["trajectorymode"] == "Spiral":
centre = self._get_value("centre")
azimuth, elevation, distance = self._get_spiral_camera_pose(cur_frame, num_frames)
self._move_camera(centre, azimuth, elevation, distance)
elif self.config["trajectorymode"] == "CustomJson":
self._move_camera((0, 0, 0), 0, 0, 0)
self._set_trajectory_camera_pose(cur_frame, num_frames)
else:
centre = self._get_value("centre")
azimuth = self._get_value("azimuth")
elevation = self._get_value("elevation")
distance = self._get_value("distance")
self._move_camera(centre, azimuth, elevation, distance)
# Set focal length
focal_length_defaults = {"fixed": 24.0, "mode": 0, "random": Gf.Vec2f([1.0, 120.0])}
focal_length = self._get_value("camera_focal_length", focal_length_defaults)
self.camera.GetAttribute("focalLength").Set(focal_length)
self.move_asset()
self.sample_components()
app = omni.kit.app.get_app_interface()
await app.next_update_async() # This next frame await is needed to avoid camera transform remaining in place
def _get_camera_properties(self):
width = self._settings.get("/app/renderer/resolution/width")
height = self._settings.get("/app/renderer/resolution/height")
tf_mat = np.array(UsdGeom.Xformable(self.camera).ComputeLocalToWorldTransform(0.0).GetInverse()).tolist()
tf_mat[-1][2] *= 100
clippingrange = self.camera.GetAttribute("clippingRange").Get()
clippingrange[0] = 1
cam_props = {
"resolution": {"width": width, "height": height},
"clipping_range": tuple(clippingrange),#tuple(self.camera.GetAttribute("clippingRange").Get()),
"horizontal_aperture": self.camera.GetAttribute("horizontalAperture").Get(),
"focal_length": self.camera.GetAttribute("focalLength").Get(),
"tf_mat": tf_mat,#np.array(UsdGeom.Xformable(self.camera).ComputeLocalToWorldTransform(0.0).GetInverse()).tolist(),
}
return cam_props
def _get_filepath_from_primpath(self, prim_path):
""" Called to get file path from a prim object. """
if not prim_path:
return ""
prim = omni.usd.get_context().get_stage().GetPrimAtPath(prim_path)
if prim:
metadata = prim.GetMetadata("references")
if prim and metadata:
return metadata.GetAddedOrExplicitItems()[0].assetPath
return ""
def _get_frame_metadata(
self, bbox_2d_tight: np.ndarray = None, bbox_2d_loose: np.ndarray = None, bbox_3d: np.ndarray = None
):
frame = {"camera_properties": self._get_camera_properties()}
if bbox_2d_tight is not None:
frame["bbox_2d_tight"] = self._get_bbox_2d_data(bbox_2d_tight)
if bbox_2d_loose is not None:
frame["bbox_2d_loose"] = self._get_bbox_2d_data(bbox_2d_loose)
if bbox_3d is not None:
frame["bbox_3d"] = self._get_bbox_3d_data(bbox_3d)
ref_prim_path = f"{SCENE_PATH}/Asset/Rig/Preview"
stage = omni.usd.get_context().get_stage()
ref_prim = stage.GetPrimAtPath(ref_prim_path)
tf = np.array(UsdGeom.Xformable(ref_prim).ComputeLocalToWorldTransform(0.0)).tolist()
ref = self._get_filepath_from_primpath(ref_prim_path)
if os.path.isfile(self.root_dir):
rel_ref = os.path.basename(ref)
else:
rel_ref = posixpath.relpath(ref, self.root_dir)
frame["asset_transforms"] = [(rel_ref, tf)]
json_buffer = bytes(json.dumps(frame, indent=4), encoding="utf-8")
return json_buffer
def _get_bbox_2d_data(self, bboxes):
# TODO type
bbox_2d_list = []
for bb_data in bboxes:
ref = self._get_filepath_from_primpath(bb_data["name"])
rel_ref = posixpath.relpath(ref, self.root_dir) if ref else ""
bb_dict = {
"file": rel_ref,
"class": bb_data["semanticLabel"],
"bbox": {a: bb_data[a].item() for a in ["x_min", "y_min", "x_max", "y_max"]},
}
bbox_2d_list.append(bb_dict)
return bbox_2d_list
def _get_bbox_3d_data(self, bboxes):
# TODO type
bbox_3d_list = []
for bb_data in bboxes:
ref = self._get_filepath_from_primpath(bb_data["name"])
rel_ref = posixpath.relpath(ref, self.root_dir) if ref else ""
bb_dict = {
"file": rel_ref,
"class": bb_data["semanticLabel"],
"bbox": {a: bb_data[a].item() for a in ["x_min", "y_min", "x_max", "y_max", "z_min", "z_max"]},
}
bb_dict["transform"] = bb_data["transform"].tolist()
bbox_3d_list.append(bb_dict)
return bbox_3d_list
def move_asset(self):
stage = omni.usd.get_context().get_stage()
if self.config["asset_override_bottom_elev"]:
ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview")
bottom_to_elevation(ref_prim.GetParent(), self.config["asset_bottom_elev"])
async def _save_gt(self, idx: int):
vp = self._vp_iface.get_viewport_window()
self._sensors = self.sdv._sensors["Viewport"]
await sd.sensors.initialize_async(
vp, [st for _, s in self._sensors.items() if s["enabled"] for st in s["sensors"]]
)
io_tasks = []
img_funcs = {"rgb": partial(sd.sensors.get_rgb, vp), "normals": partial(sd.visualize.get_normals, vp)}
np_funcs = {
"depth": partial(sd.sensors.get_depth_linear, vp),
"instance": partial(sd.sensors.get_instance_segmentation, vp, parsed=(self._sensors["instance"]["mode"])),
"semantic": partial(sd.sensors.get_semantic_segmentation, vp),
}
for sensor, write_fn in img_funcs.items():
if self._sensors[sensor]["enabled"]:
filepath = posixpath.join(self.config["out_dir"], f"{idx}_{sensor}.png")
data = write_fn()
io_tasks.append(save_image(filepath, data))
carb.log_info(f"[kaolin.data_generator] Saving {sensor} to {filepath}")
for sensor, write_fn in np_funcs.items():
if self._sensors[sensor]["enabled"]:
filepath = posixpath.join(self.config["out_dir"], f"{idx}_{sensor}.npy")
data = write_fn()
io_tasks.append(save_numpy_array(filepath, data))
carb.log_info(f"[kaolin.data_generator] Saving {sensor} to {filepath}")
bbox_2d_tight, bbox_2d_loose, bbox_3d = None, None, None
if self._sensors["bbox_2d_tight"]["enabled"]:
bbox_2d_tight = sd.sensors.get_bounding_box_2d_tight(vp)
if self._sensors["bbox_2d_loose"]["enabled"]:
bbox_2d_loose = sd.sensors.get_bounding_box_2d_loose(vp)
if self._sensors["bbox_3d"]["enabled"]:
bbox_3d = sd.sensors.get_bounding_box_3d(vp, parsed=self._sensors["bbox_3d"]["mode"])
if self._sensors["pointcloud"]["enabled"]:
pc_gen = PointCloudGenerator()
pc_gen.stage = omni.usd.get_context().get_stage()
pc_gen.ref = pc_gen.stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig")
pc_gen.height_resolution = self._sensors["pointcloud"]["sampling_resolution"]
pc_gen.width_resolution = self._sensors["pointcloud"]["sampling_resolution"]
pointcloud = await pc_gen.generate_pointcloud()
filepath = posixpath.join(self.config["out_dir"], f"{idx}_pointcloud.usd")
up_axis = ["Y", "Z"][self.config.get("up_axis", 0)]
io_tasks.append(save_pointcloud(filepath, pointcloud, up_axis))
filepath = posixpath.join(self.config["out_dir"], f"{idx}_metadata.json")
frame = self._get_frame_metadata(bbox_2d_tight, bbox_2d_loose, bbox_3d) # TODO: fix and remove this
io_tasks.append(omni.client.write_file_async(filepath, frame))
await asyncio.gather(*io_tasks)
def sample_components(self):
# TODO docstring
for _, components in self.dr_components.items():
for component in components:
sample_component(component)
def _set_visible(self, path: str, value: bool):
opts = ["invisible", "inherited"]
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
if prim and prim.GetAttribute("visibility"):
prim.GetAttribute("visibility").Set(opts[value])
def _on_value_changed(self, option, value, idx: int = None, idx_opt=None):
# TODO type
has_mode = isinstance(self.config[option], dict)
if has_mode:
mode = ["fixed", "random"][self.config[option]["mode"]]
if idx is not None and idx_opt is not None:
self.config[option][mode][idx_opt][idx] = value
elif idx is not None:
self.config[option][mode][idx] = value
else:
self.config[option][mode] = value
else:
if idx is not None and idx_opt is not None:
self.config[option][idx_opt][idx] = value
elif idx is not None:
self.config[option][idx] = value
else:
self.config[option] = value
def _on_mode_changed(self, option, model):
# TODO type
idx = model.get_item_value_model().get_value_as_int()
self.config[option]["mode"] = idx
self._build_ui()
def _on_filepick(self, filename: str, dirpath: str):
if dirpath:
path = posixpath.join(dirpath, filename)
if utils.path.exists(path):
self._filepicker.hide()
save_to_log(CACHE, {"root_dir": dirpath, "root_file": filename})
self._ui_root_dir.set_value(path)
def _on_outpick(self, path: str):
self._outpicker.hide()
save_to_log(CACHE, {"out_dir": path})
self._ui_out_dir.set_value(path)
def _on_load_config(self, filename: str, dirpath: str):
self._configpicker.hide()
path = posixpath.join(dirpath, filename)
assert re.search("^.*\.(usd|usda|usdc|USD|USDA|USDC)$", path) # Confirm path is a valid USD
assert utils.path.exists(path) # Ensure path exists
save_to_log(CACHE, {"config_dir": dirpath})
if path not in self.presets:
self.presets.append(path)
self._preset_model.append_child_item(None, ui.SimpleStringModel(posixpath.splitext(filename)[0]))
self._preset_model.get_item_value_model().set_value(self.presets.index(path))
def _on_load_json(self, path: str):
self._jsonpicker.hide()
assert re.search("^.*\.(json)$", path) # Confirm path is a valid json file
assert utils.path.exists(path) # Ensure path exists
save_to_log(CACHE, {"json_dir": posixpath.dirname(path)})
with open(path, "r") as f:
data = json.load(f)
return data
async def _on_root_dir_changed(self, path: str):
"""
root usd directory changed
"""
if utils.path.exists(path):
self._settings.set("/kaolin/mode", 2) # Set app in data generation mode
self._reset()
self._settings.set("/app/asyncRendering", False) # Necessary to ensure correct GT output
self._settings.set("/app/hydraEngine/waitIdle", True) # Necessary to ensure correct GT output
omni.usd.get_context().new_stage()
stage = omni.usd.get_context().get_stage()
vis_prim = stage.GetPrimAtPath(SCENE_PATH)
if vis_prim and self._preset_layer is None:
omni.kit.commands.execute("DeletePrimsCommand", paths=[vis_prim.GetPath()])
elif vis_prim and stage.GetPrimAtPath(f"{vis_prim.GetPath()}/Asset/Rig"):
rig = stage.GetPrimAtPath(f"{vis_prim.GetPath()}/Asset/Rig")
for child in rig.GetChildren():
self._set_visible(str(child.GetPath()), False)
self.root_dir = path
self.asset_list = await utils.path.get_usd_files_async(self.root_dir)
if not self.option_frame:
self._build_ui()
if self.option_frame:
self.option_frame.visible = True
await self.preview()
self._preview_window.visible = False
else:
carb.log_error(f"[kaolin_app.research.data_generator] Directory not found: '{path}'")
def _set_settings(self, width: int, height: int, renderer: str, **kwargs):
self._settings.set("/app/renderer/resolution/width", width)
self._settings.set("/app/renderer/resolution/height", height)
self._settings.set("/rtx/rendermode", renderer)
self._settings.set("/app/viewport/grid/enabled", False)
self._settings.set("/app/viewport/grid/showOrigin", False)
def _on_save_config(self, filename: str, dirname: str):
assert utils.path.exists(dirname)
self._configsaver.hide()
# add sensor config to main config
self.config["sensors"] = {s: True for s, v in self.sdv._sensors["Viewport"].items() if v["enabled"]}
save_to_log(CACHE, {"config_dir": dirname})
if self._preset_layer is None:
raise ValueError("Something went wrong, Unable to save config.")
# Create new layer
filename = f"{posixpath.splitext(filename)[0]}.usda"
new_path = posixpath.join(dirname, filename)
if Sdf.Find(new_path) == self._preset_layer:
new_layer = self._preset_layer
else:
# Transfer layer content over to new layer
new_layer = Sdf.Layer.CreateNew(new_path)
new_layer.TransferContent(self._preset_layer)
new_layer.customLayerData = {"DataGenerator": self.config}
new_layer.Save()
self._on_load_config(filename, dirname)
def _on_resolution_changed(self, model, option):
# TODO type
value = model.as_int
self.config.update({option: value})
self._settings.set(f"/app/renderer/resolution/{option}", value)
model.set_value(value)
def _on_preset_changed(self, path: str, update_config: bool = True) -> None:
stage = omni.usd.get_context().get_stage()
root_layer = stage.GetRootLayer()
if self._preset_layer is not None:
delete_sublayer(self._preset_layer)
vis_prim = stage.GetPrimAtPath(SCENE_PATH)
if vis_prim:
omni.kit.commands.execute("DeletePrimsCommand", paths=[vis_prim.GetPath()])
omni.kit.commands.execute(
"CreateSublayerCommand",
layer_identifier=root_layer.identifier,
sublayer_position=-1,
new_layer_path=path,
transfer_root_content=False,
create_or_insert=False,
)
self._preset_layer = Sdf.Find(root_layer.subLayerPaths[-1])
if update_config:
config = self._preset_layer.customLayerData.get("DataGenerator")
if config:
self.config = config
if "sensors" in self.config:
# Enable sensors
for s in self.config["sensors"]:
self.sdv._sensors["Viewport"][s]["enabled"] = True
# Set preset as authoring layer
edit_target = Usd.EditTarget(self._preset_layer)
stage = omni.usd.get_context().get_stage()
if not stage.IsLayerMuted(self._preset_layer.identifier):
stage.SetEditTarget(edit_target)
self.dr_components = {}
for prim in stage.Traverse():
if str(prim.GetTypeName()) in DR_COMPONENTS:
key = prim.GetParent().GetName()
self.dr_components.setdefault(key, []).append(prim)
self.camera = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig/Boom/Camera")
self.create_asset_prim()
self.option_frame.clear()
with self.option_frame:
self._build_ui_options()
async def _preview_trajectory(self):
stage = omni.usd.get_context().get_stage()
trajectory_viz = stage.GetPrimAtPath(f"{SCENE_PATH}/Viz")
if not trajectory_viz:
carb.log_warn("Unable to preview trajectory, no trajectory detected.")
return
trajectory_viz.GetAttribute("visibility").Set("inherited")
viewport = omni.kit.viewport.get_viewport_interface()
omni.usd.get_context().get_selection().set_selected_prim_paths([f"{SCENE_PATH}/Viz"], True)
await omni.kit.app.get_app_interface().next_update_async()
viewport.get_viewport_window().focus_on_selected()
omni.usd.get_context().get_selection().clear_selected_prim_paths()
def _set_trajecotry_preview_visibility(self):
show_preview = (
self.config.get("cameramode") == "Trajectory" and self.config.get("trajectory_mode") == "CustomJson"
)
self._set_visible(f"{SCENE_PATH}/Viz", show_preview)
def _on_trajectory_mode_changed(self, trajectory_mode_model):
trajectory_mode = TRAJ_OPTIONS[trajectory_mode_model.get_item_value_model().as_int]
self.config.update({"trajectorymode": trajectory_mode})
self._set_trajecotry_preview_visibility()
def _ui_modal(self, title: str, text: str, no_close: bool = False, ok_btn: bool = True):
""" Create a modal window. """
window_flags = ui.WINDOW_FLAGS_NO_RESIZE
window_flags |= ui.WINDOW_FLAGS_NO_SCROLLBAR
window_flags |= ui.WINDOW_FLAGS_MODAL
if no_close:
window_flags |= ui.WINDOW_FLAGS_NO_CLOSE
modal = ui.Window(title, width=400, height=100, flags=window_flags)
with modal.frame:
with ui.VStack(spacing=5):
text = ui.Label(text, word_wrap=True, style={"alignment": ui.Alignment.CENTER})
if ok_btn:
btn = ui.Button("OK")
btn.set_clicked_fn(lambda: self._ui_toggle_visible([modal]))
return modal
def _ui_create_xyz(self, option, value=(0, 0, 0), idx=None, dtype=float):
# TODO type
colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F}
with ui.HStack():
for i, (label, colour) in enumerate(colors.items()):
if i != 0:
ui.Spacer(width=4)
with ui.ZStack(height=14):
with ui.ZStack(width=16):
ui.Rectangle(name="vector_label", style={"background_color": colour, "border_radius": 3})
ui.Label(label, alignment=ui.Alignment.CENTER)
with ui.HStack():
ui.Spacer(width=14)
self._ui_create_value(option, value[i], idx_opt=idx, idx=i, dtype=dtype)
ui.Spacer(width=4)
def _ui_create_value(self, option, value=0.0, idx=None, idx_opt=None, dtype=float):
# TODO type
if dtype == int:
widget = ui.IntDrag(min=0, max=int(1e6))
elif dtype == float:
widget = ui.FloatDrag(min=-1e6, max=1e6, step=0.1, style={"border_radius": 1})
elif dtype == bool:
widget = ui.CheckBox()
else:
raise NotImplementedError
widget.model.set_value(value)
widget.model.add_value_changed_fn(
lambda m: self._on_value_changed(option, m.get_value_as_float(), idx=idx, idx_opt=idx_opt)
)
# widget.model.add_value_changed_fn(lambda _: asyncio.ensure_future(self.render_asset())
return widget
def _ui_simple_block(self, label, option, is_xyz=False, dtype=float):
# TODO type
ui_fn = self._ui_create_xyz if is_xyz else self._ui_create_value
with ui.HStack(spacing=5):
ui.Label(label, width=120, height=10)
ui_fn(option, value=self.config[option], dtype=dtype)
def _ui_option_block(self, label, option, is_xyz=False, dtype=float):
"""
Create option block on the UI
"""
if option not in self.config:
return None
ui_fn = self._ui_create_xyz if is_xyz else self._ui_create_value
option_block = ui.HStack(spacing=5)
with option_block:
ui.Label(label, width=120, height=10)
model = ui.ComboBox(self.config[option]["mode"], "Fixed", "Random", width=80).model
# create option based on "fixed" or "random"
option_0 = ui.HStack(spacing=5) # fixed
option_1 = ui.VStack(spacing=5) # random
with option_0:
ui_fn(option, value=self.config[option]["fixed"], dtype=dtype)
with option_1:
for i, m in enumerate(["Min", "Max"]):
with ui.HStack(spacing=5):
ui.Label(m, width=30)
ui_fn(option, value=self.config[option]["random"][i], idx=i, dtype=dtype)
if self.config[option]["mode"] == 0:
option_1.visible = False
else:
option_0.visible = False
model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([option_0, option_1]))
model.add_item_changed_fn(
lambda m, i: self.config[option].update({"mode": m.get_item_value_model().as_int})
)
return option_block
def _ui_toggle_visible(self, ui_elements):
# TODO type
for ui_el in ui_elements:
ui_el.visible = not ui_el.visible
def _build_run_ui(self):
with self._window.frame:
pass
def _ui_up_axis(self):
collection = ui.RadioCollection()
with ui.HStack():
ui.Label("Up Axis", width=120)
with ui.HStack():
ui.RadioButton(text="Y", radio_collection=collection, height=30)
ui.RadioButton(text="Z", radio_collection=collection, height=30)
collection.model.add_value_changed_fn(self._change_up_axis)
collection.model.set_value(self.config.get("up_axis", 0))
def _build_ui(self):
with self._window.frame:
with ui.ScrollingFrame():
with ui.VStack(spacing=5):
with ui.HStack(spacing=5, height=15):
ui.Label("Root Dir", width=55)
self._ui_root_dir = ui.StringField().model
if self.root_dir:
self._ui_root_dir.set_value(self.root_dir)
self._ui_root_dir.add_value_changed_fn(
lambda m: asyncio.ensure_future(self._on_root_dir_changed(m.as_string))
)
browse = ui.Button(
image_url="resources/icons/folder.png",
width=30,
height=25,
style={"Button": {"margin": 0, "padding": 5, "alignment": ui.Alignment.CENTER}},
)
browse.set_clicked_fn(
lambda f=self._filepicker: self._show_filepicker(f, self._cache.get("root_dir", ""))
)
if self.root_dir:
with ui.HStack(height=0):
ui.Label("Presets", width=60)
self._preset_model = ui.ComboBox(
0, *[posixpath.splitext(posixpath.basename(p))[0] for p in self.presets]
).model
config_dir = self._cache.get("config_dir", "")
config_file = self._cache.get("config_file", "")
ui.Button(
"Save As...",
clicked_fn=lambda f=self._configsaver: self._show_filepicker(
f, config_dir, config_file
),
)
ui.Button(
"Import",
clicked_fn=lambda f=self._configpicker: self._show_filepicker(
f, config_dir, config_file
),
)
self.option_frame = ui.VStack(spacing=5)
self.option_frame.visible = False
self._preset_model.add_item_changed_fn(
lambda m, i: self._on_preset_changed(self.presets[m.get_item_value_model().as_int])
)
if self.presets and not self._preset_layer:
self._on_preset_changed(self.presets[0])
self._build_progress_ui()
ui.Spacer()
ui.Button("Demo", clicked_fn=lambda: webbrowser.open(DEMO_URL), height=60)
def _build_ui_options(self):
# Output
with ui.CollapsableFrame(title="Output", height=10):
with ui.VStack(spacing=5):
with ui.HStack(spacing=5, height=10):
ui.Label(
"Output Dir",
width=120,
height=10,
tooltip="Select directory to save output to. Existing files of the same name will be overwritten.",
)
self._ui_out_dir = ui.StringField().model
self._ui_out_dir.set_value(self.config["out_dir"])
self._ui_out_dir.add_value_changed_fn(lambda m: self.config.update({"out_dir": m.as_string}))
browse = ui.Button(
image_url="resources/icons/folder.png",
width=30,
height=25,
style={"Button": {"margin": 0, "padding": 5, "alignment": ui.Alignment.CENTER}},
)
browse.set_clicked_fn(
lambda f=self._outpicker: self._show_filepicker(f, self._cache.get("out_dir", ""))
)
with ui.HStack(spacing=5, height=10):
ui.Label(
"Renders per Scene",
width=120,
height=10,
tooltip="Number of randomized scenes to be captured before re-sampling a new scene.",
)
model = ui.IntDrag(min=1, max=int(1e6)).model
model.set_value(self.config["renders_per_asset"])
model.add_value_changed_fn(
lambda m: self.config.update({"renders_per_asset": m.get_value_as_int()})
)
_build_ui_sensor_selection("Viewport")
# Assets
with ui.CollapsableFrame(title="Assets", height=10):
with ui.VStack(spacing=5):
self._ui_simple_block("Fix Bottom Elevation", "asset_override_bottom_elev", dtype=bool)
self._ui_simple_block("Normalize", "asset_normalize", dtype=bool)
self._ui_up_axis()
ui.Spacer()
# Camera
with ui.CollapsableFrame(title="Camera", height=10):
with ui.VStack(spacing=5):
with ui.HStack(spacing=5):
ui.Label(
"Camera Mode",
width=120,
height=10,
tooltip="Select random camera poses or follow a trajectory.",
)
cur_camera_idx = CAMERAS.index(self.config.get("cameramode", "UniformSampling"))
camera_mode_model = ui.ComboBox(cur_camera_idx, *CAMERAS, width=150).model
camera_mode_model.add_item_changed_fn(
lambda m, i: self.config.update({"cameramode": CAMERAS[m.get_item_value_model().as_int]})
)
if "camera_focal_length" not in self.config:
self.config["camera_focal_length"] = {"fixed": 24.0, "mode": 0, "random": Gf.Vec2f([1.0, 120.0])}
uniform_options = [
self._ui_option_block("Focal Length", "camera_focal_length"),
self._ui_option_block("Look-at Position", "centre", is_xyz=True),
self._ui_option_block("Distance", "distance"),
self._ui_option_block("Elevation", "elevation"),
self._ui_option_block("Azimuth", "azimuth"),
]
if cur_camera_idx == 1:
self._ui_toggle_visible(uniform_options)
camera_mode_model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible(uniform_options))
camera_mode_model.add_item_changed_fn(lambda *_: self._set_trajecotry_preview_visibility())
# an indicator on turning on the trajectory
traject_block = ui.VStack(spacing=5)
with traject_block:
with ui.HStack(spacing=5):
ui.Label("Trajectory Mode", width=120, height=10, tooltip="Trajectory mode")
if "trajectorymode" not in self.config:
self.config["trajectorymode"] = "Spiral"
cur_traj_idx = TRAJ_OPTIONS.index(self.config.get("trajectorymode", "Spiral"))
trajmodel = ui.ComboBox(cur_traj_idx, *TRAJ_OPTIONS, width=150).model
trajmodel.add_item_changed_fn(lambda m, _: self._on_trajectory_mode_changed(m))
# spiral option
spiral_block = ui.VStack(spacing=5)
with spiral_block:
self._ui_option_block("Distance", "distance") # distance block
with ui.HStack(spacing=5): # elevation range block
ui.Label("Elevation Range", width=120, height=10, tooltip="Elevation range two numbers")
ui.Spacer(width=10)
for i, m in enumerate(["Min", "Max"]):
with ui.HStack(spacing=5):
ui.Label(m, width=30)
val = self.config["elevation"]["random"]
self._ui_create_value("elevation", value=val[i], idx=i, dtype=float)
with ui.HStack(spacing=5): # rotation block
ui.Label("Number of Rotations", width=120, height=10)
self.config["num_rotations"] = 3
n_rot = self.config.get("num_rotations")
self._ui_create_value("num_rotations", value=n_rot, dtype=int)
ui.Spacer()
spiral_block.visible = cur_traj_idx == 0
trajmodel.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([spiral_block]))
# jsonoption
json_block = ui.VStack(spacing=5)
with json_block:
with ui.HStack(spacing=5, height=15):
ui.Label("Json path", width=55)
ui.Button(
"Json File",
clicked_fn=lambda f=self._jsonpicker: self._show_filepicker(
f, self._cache.get("json_dir", "")
),
)
if self.config.get("jsonpath") and os.path.exists(self.config["jsonpath"]):
asyncio.ensure_future(self._import_trajectory_from_json(self.config["jsonpath"]))
ui.Button(
"View Trajectory", clicked_fn=lambda: asyncio.ensure_future(self._preview_trajectory())
)
ui.Spacer()
json_block.visible = cur_traj_idx == 1
trajmodel.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([json_block]))
traject_block.visible = cur_camera_idx == 1
camera_mode_model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([traject_block]))
ui.Spacer()
ui.Spacer()
# Create UI elements for DR Components
for title, components in self.dr_components.items():
build_component_frame(title, components)
# Render
with ui.CollapsableFrame(title="Render Settings", height=10):
self._settings.set("/rtx/rendermode", self.config["renderer"])
self._settings.set("/rtx/pathtracing/totalSpp", self.config["spp"])
self._settings.set("/rtx/pathtracing/optixDenoiser/enabled", self.config["denoiser"])
self._settings.set("/rtx/pathtracing/clampSpp", 0) # Disable spp clamping
self._settings.set("/rtx/post/aa/op", 2)
with ui.VStack(spacing=5):
with ui.HStack(spacing=5):
ui.Label("Resolution", width=120)
ui.Label("Width", width=40, tooltip="Rendered resolution width, in pixels.")
width = ui.IntDrag(min=MIN_RESOLUTION["width"], max=MAX_RESOLUTION["width"]).model
width.add_value_changed_fn(lambda m: self._on_resolution_changed(m, "width"))
ui.Spacer(width=10)
ui.Label("Height", width=40, tooltip="Rendered resolution height, in pixels.")
height = ui.IntDrag(min=MIN_RESOLUTION["height"], max=MAX_RESOLUTION["height"]).model
height.add_value_changed_fn(lambda m: self._on_resolution_changed(m, "height"))
width.set_value(self.config.get("width", self._settings.get("/app/renderer/resolution/width")))
height.set_value(self.config.get("height", self._settings.get("/app/renderer/resolution/height")))
with ui.HStack(spacing=5):
ui.Label("Renderer", width=120, tooltip="Render Mode")
cur_renderer_idx = RENDERERS.index(self.config["renderer"])
model = ui.ComboBox(cur_renderer_idx, *RENDERERS, width=200).model
model.add_item_changed_fn(
lambda m, i: self.config.update({"renderer": RENDERERS[m.get_item_value_model().as_int]})
)
model.add_item_changed_fn(
lambda m, i: self._settings.set("/rtx/rendermode", RENDERERS[m.get_item_value_model().as_int])
)
pt_block = ui.VStack(spacing=5)
with pt_block:
with ui.HStack(spacing=5):
ui.Label(
"Samples Per Pixel", width=120, tooltip="Number of samples taken at each pixel, per frame."
)
spp = ui.IntDrag().model
spp.set_value(self.config["spp"])
spp.add_value_changed_fn(
lambda m: self.config.update({"spp": m.as_int})
) # Only change SPP during run
spp.add_value_changed_fn(
lambda m: self._settings.set("/rtx/pathtracing/totalSpp", m.as_int)
) # SPP Max
with ui.HStack(spacing=5):
ui.Label("Denoiser", width=120, tooltip="Toggle denoiser")
denoiser = ui.CheckBox().model
denoiser.set_value(self.config["denoiser"])
denoiser.add_value_changed_fn(lambda m: self.config.update({"denoiser": m.as_bool}))
denoiser.add_value_changed_fn(
lambda m: self._settings.set("/rtx/pathtracing/optixDenoiser/enabled", m.as_bool)
)
ui.Spacer()
pt_block.visible = bool(cur_renderer_idx)
model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([pt_block]))
with ui.HStack():
ui.Label("Subdiv", width=120, tooltip="Subdivision Global Refinement Level")
with ui.HStack():
ui.Label("Refinement Level", width=100, tooltip="Subdivision Global Refinement Level")
subdiv = ui.IntDrag(min=0, max=2).model
subdiv.add_value_changed_fn(lambda m: self.config.update({"subdiv": m.as_int}))
subdiv.add_value_changed_fn(
lambda m: self._settings.set("/rtx/hydra/subdivision/refinementLevel", m.as_int)
)
ui.Spacer()
with ui.HStack(spacing=5):
btn = ui.Button("Preview", height=40, tooltip="Render a preview with the current settings.")
btn.set_clicked_fn(lambda: asyncio.ensure_future(self.preview()))
btn = ui.Button("Run", height=40, tooltip="Generate and save groundtruth with the current settings.")
btn.set_clicked_fn(lambda: asyncio.ensure_future(self.run()))
def _build_progress_ui(self):
self.progress = {"block": ui.VStack(spacing=5), "stop_signal": False}
self.progress["block"].visible = False
with self.progress["block"]:
with ui.HStack(height=0):
ui.Label(
"TOTAL",
width=80,
style={"font_size": 20.0},
tooltip="Render progress of all scenes to be rendered.",
)
self.progress["bar1"] = ui.ProgressBar(height=40, style={"font_size": 20.0}).model
with ui.HStack(height=0):
ui.Label(
"Per Scene",
width=80,
style={"font_size": 16.0},
tooltip="Render progress of the total number of renders for this scenes",
)
self.progress["bar2"] = ui.ProgressBar(height=20, style={"font_size": 16.0}).model
btn = ui.Button("Cancel", height=60)
btn.set_clicked_fn(lambda: self.progress.update({"stop_signal": True}))
@staticmethod
def get_instance():
return _extension_instance
| 66,049 | Python | 45.612562 | 127 | 0.570062 |
terrylincn/omniverse-tutorials/kaolin_data_generator_patch/README.md | 1. try this link to download the pxr kitchen set models http://graphics.pixar.com/usd/downloads.html and unzip it.</br>
2. follow this link to install kaolin https://kaolin.readthedocs.io/en/latest/notes/installation.html </br>
3. install kaolin from omniverse lanucher </br>
4. copy extension.py to kaolin_app.research.data_generator/kaolin_app/research/data_generator/ </br> | 376 | Markdown | 93.249977 | 119 | 0.781915 |
terrylincn/omniverse-tutorials/code_demo_mesh100/demo.py | import omni
from pxr import Usd, UsdLux, UsdGeom, UsdShade, Sdf, Gf, Vt, UsdPhysics
from omni.physx import get_physx_interface
from omni.physx.bindings._physx import SimulationEvent
from omni.physx.scripts.physicsUtils import *
import random
stage = omni.usd.get_context().get_stage()
# set up axis to z
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(stage, 0.01)
defaultPrimPath = str(stage.GetDefaultPrim().GetPath())
# light
sphereLight = UsdLux.SphereLight.Define(stage, defaultPrimPath + "/SphereLight")
sphereLight.CreateRadiusAttr(150)
sphereLight.CreateIntensityAttr(30000)
sphereLight.AddTranslateOp().Set(Gf.Vec3f(650.0, 0.0, 1150.0))
# Physics scene
UsdPhysics.Scene.Define(stage, defaultPrimPath + "/physicsScene")
rows = 10
cols = 10
sphereCount = rows*cols
_colors = []
material_scope_path = defaultPrimPath + "/Looks"
UsdGeom.Scope.Define(stage, material_scope_path)
# Trianglemesh materials
for i in range(rows):
for j in range(cols):
mtl_path = material_scope_path + "/OmniPBR" + str(i*cols+j)
mat_prim = stage.DefinePrim(mtl_path, "Material")
material_prim = UsdShade.Material.Get(stage, mat_prim.GetPath())
material = UsdPhysics.MaterialAPI.Apply(material_prim.GetPrim())
mu = 0.0 + ((i * cols + j) % sphereCount) * 0.01
material.CreateRestitutionAttr().Set(mu)
if material_prim:
shader_mtl_path = stage.DefinePrim("{}/Shader".format(mtl_path), "Shader")
shader_prim = UsdShade.Shader.Get(stage, shader_mtl_path.GetPath())
if shader_prim:
shader_out = shader_prim.CreateOutput("out", Sdf.ValueTypeNames.Token)
material_prim.CreateSurfaceOutput("mdl").ConnectToSource(shader_out)
material_prim.CreateVolumeOutput("mdl").ConnectToSource(shader_out)
material_prim.CreateDisplacementOutput("mdl").ConnectToSource(shader_out)
shader_prim.GetImplementationSourceAttr().Set(UsdShade.Tokens.sourceAsset)
shader_prim.SetSourceAsset(Sdf.AssetPath("OmniPBR.mdl"), "mdl")
shader_prim.SetSourceAssetSubIdentifier("OmniPBR", "mdl")
color = Gf.Vec3f(random.random(), random.random(), random.random())
shader_prim.GetPrim().CreateAttribute("inputs:diffuse_tint", Sdf.ValueTypeNames.Color3f).Set(color)
_colors.append(color)
# Triangle mesh with multiple materials
path = defaultPrimPath + "/triangleMesh"
_mesh_path = path
mesh = UsdGeom.Mesh.Define(stage, path)
# Fill in VtArrays
points = []
normals = []
indices = []
vertexCounts = []
for i in range(rows):
for j in range(cols):
subset = UsdGeom.Subset.Define(stage, path + "/subset" + str(i*cols+j))
subset.CreateElementTypeAttr().Set("face")
subset_indices = [i*cols+j]
rel = subset.GetPrim().CreateRelationship("material:binding", False)
rel.SetTargets([Sdf.Path(material_scope_path + "/OmniPBR" + str(i*cols+j))])
points.append(Gf.Vec3f(-stripSize/2 + stripSize * i, -stripSize/2 + stripSize * j, 0.0))
points.append(Gf.Vec3f(-stripSize/2 + stripSize * (i + 1), -stripSize/2 + stripSize * j, 0.0))
points.append(Gf.Vec3f(-stripSize/2 + stripSize * (i + 1), -stripSize/2 + stripSize * (j + 1), 0.0))
points.append(Gf.Vec3f(-stripSize/2 + stripSize * i,-stripSize/2 + stripSize * (j + 1), 0.0))
for k in range(4):
normals.append(Gf.Vec3f(0, 0, 1))
indices.append(k + (i * cols + j) * 4)
subset.CreateIndicesAttr().Set(subset_indices)
vertexCounts.append(4)
mesh.CreateFaceVertexCountsAttr().Set(vertexCounts)
mesh.CreateFaceVertexIndicesAttr().Set(indices)
mesh.CreatePointsAttr().Set(points)
mesh.CreateDoubleSidedAttr().Set(False)
mesh.CreateNormalsAttr().Set(normals)
UsdPhysics.CollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI.CreateApproximationAttr().Set("none")
# Sphere material
sphereMaterialpath = defaultPrimPath + "/sphereMaterial"
UsdShade.Material.Define(stage, sphereMaterialpath)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(sphereMaterialpath))
material.CreateRestitutionAttr().Set(0.9)
# Spheres
stripSize = 100.0
for i in range(rows):
for j in range(cols):
spherePath = "/sphere" + str(i)
size = 25.0
position = Gf.Vec3f(i * stripSize, j * stripSize, 250.0)
sphere_prim = add_rigid_sphere(stage, spherePath, size, position)
# Add material
collisionSpherePath = defaultPrimPath + spherePath
add_physics_material_to_prim(stage, sphere_prim, Sdf.Path(sphereMaterialpath))
# apply contact report
contactReportAPI = PhysxSchema.PhysxContactReportAPI.Apply(sphere_prim)
contactReportAPI.CreateThresholdAttr().Set(200000)
collider0 = None
collider1 = None
def _on_simulation_event(event):
global collider0, collider1, _mesh_path, stage, _colors
if event.type == int(SimulationEvent.CONTACT_DATA):
if collider1 == _mesh_path:
usdGeom = UsdGeom.Mesh.Get(stage, collider0)
color = Vt.Vec3fArray([_colors[event.payload['faceIndex1']]])
usdGeom.GetDisplayColorAttr().Set(color)
if event.type == int(SimulationEvent.CONTACT_FOUND):
contactDict = resolveContactEventPaths(event)
collider0 = contactDict["collider0"]
collider1 = contactDict["collider1"]
if event.type == int(SimulationEvent.CONTACT_PERSISTS):
contactDict = resolveContactEventPaths(event)
collider0 = contactDict["collider0"]
collider1 = contactDict["collider1"]
events = get_physx_interface().get_simulation_event_stream()
_simulation_event_sub = events.create_subscription_to_pop(_on_simulation_event)
| 5,922 | Python | 40.41958 | 117 | 0.681189 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/README.md | # Dofbot Reacher Reinforcement Learning Sim2Real Environment for Omniverse Isaac Gym/Sim
This repository adds a DofbotReacher environment based on [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) (commit [cc1aab0](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/tree/cc1aab0f904ade860fc0761d62edb6e706ab89ec)), and includes Sim2Real code to control a real-world [Dofbot](https://category.yahboom.net/collections/r-robotics-arm/products/dofbot-jetson_nano) with the policy learned by reinforcement learning in Omniverse Isaac Gym/Sim.
- We suggest using [the isaac-sim-2022.1.1 branch](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/tree/isaac-sim-2022.1.1) to prevent any potential issues. The RL code is tested on both Windows and Linux, while the Sim2Real code is tested on Linux and a real Dofbot using Isaac Sim 2022.1.1 and ROS Melodic.
- **WARNING**: The RL code in this branch is only tested on Linux using Isaac Sim 2023.1.0. The Sim2Real code isn't fully tested yet.
This repo is compatible with the following repositories:
- [OmniIsaacGymEnvs-DofbotReacher](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher)
- [OmniIsaacGymEnvs-UR10Reacher](https://github.com/j3soon/OmniIsaacGymEnvs-UR10Reacher)
- [OmniIsaacGymEnvs-KukaReacher](https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher)
- [OmniIsaacGymEnvs-HiwinReacher](https://github.com/j3soon/OmniIsaacGymEnvs-HiwinReacher)
## Preview


## Installation
Prerequisites:
- Before starting, please make sure your hardware and software meet the [system requirements](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html#system-requirements).
- [Install Omniverse Isaac Sim 2023.1.0](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) (Must setup Cache and Nucleus)
- You may try out newer versions of Isaac Sim along with [their corresponding patch](https://github.com/j3soon/isaac-extended#conda-issue-on-linux), but it is not guaranteed to work.
- Double check that Nucleus is correctly installed by [following these steps](https://github.com/j3soon/isaac-extended#nucleus).
- Your computer & GPU should be able to run the Cartpole example in [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs)
- (Optional) [Set up a Dofbot with Jetson Nano](http://www.yahboom.net/study/Dofbot-Jetson_nano) in the real world
Make sure to install Isaac Sim in the default directory and clone this repository to the home directory. Otherwise, you will encounter issues if you didn't modify the commands below accordingly.
We will use Anaconda to manage our virtual environment:
1. Clone this repository and the patches repo:
- Linux
```sh
cd ~
git clone https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher.git
git clone https://github.com/j3soon/isaac-extended.git
```
- Windows
```sh
cd %USERPROFILE%
git clone https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher.git
git clone https://github.com/j3soon/isaac-extended.git
```
2. Generate [instanceable](https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html) Dofbot assets for training:
[Launch the Script Editor](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gui_interactive_scripting.html#script-editor) in Isaac Sim. Copy the content in `omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot.py` and execute it inside the Script Editor window. Wait until you see the text `Done!`.
3. [Download and Install Anaconda](https://www.anaconda.com/products/distribution#Downloads).
```sh
# For 64-bit Linux (x86_64/x64/amd64/intel64)
wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh
bash Anaconda3-2022.10-Linux-x86_64.sh
```
For Windows users, make sure to use `Anaconda Prompt` instead of `Anaconda Powershell Prompt`, `Command Prompt`, or `Powershell` for the following commands.
4. Patch Isaac Sim 2023.1.0
- Linux
```sh
export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0"
cp $ISAAC_SIM/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh.bak
cp ~/isaac-extended/isaac_sim-2023.1.0-patch/linux/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh
```
- Windows
> (To be updated)
5. [Set up conda environment for Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html#advanced-running-with-anaconda)
- Linux
```sh
# conda remove --name isaac-sim --all
export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0"
cd $ISAAC_SIM
conda env create -f environment.yml
conda activate isaac-sim
cd ~/OmniIsaacGymEnvs-DofbotReacher
pip install -e .
```
- Windows
> (To be updated)
6. Activate conda environment
- Linux
```sh
export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0"
cd $ISAAC_SIM
conda activate isaac-sim
source setup_conda_env.sh
```
- Windows
```sh
set ISAAC_SIM="%LOCALAPPDATA%\ov\pkg\isaac_sim-2023.1.0"
cd %ISAAC_SIM%
conda activate isaac-sim
call setup_conda_env.bat
```
Please note that you should execute the commands in Step 6 for every new shell.
For Windows users, replace `~` to `%USERPROFILE%` for all the following commands.
## Dummy Policy
This is a sample to make sure you have setup the environment correctly. You should see a single Dofbot in Isaac Sim.
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/dummy_dofbot_policy.py task=DofbotReacher test=True num_envs=1
```
Alternatively, you can replace the dummy policy with a random policy with `omniisaacgymenvs/scripts/random_policy.py`.
## Training
You can launch the training in `headless` mode as follows:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True
```
The number of environments is set to 2048 by default. If your GPU has small memory, you can decrease the number of environments by changing the arguments `num_envs` as below:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True num_envs=2048
```
You can also skip training by downloading the pre-trained model checkpoint by:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
wget https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/releases/download/v1.1.0/runs.zip
unzip runs.zip
```
The learning curve of the pre-trained model:

## Testing
Make sure you have stored the model checkpoints at `~/OmniIsaacGymEnvs-DofbotReacher/runs`, you can check it with the following command:
```sh
ls ~/OmniIsaacGymEnvs-DofbotReacher/runs/DofbotReacher/nn/
```
In order to achieve the highest rewards, you may not want to use the latest checkpoint `./runs/DofbotReacher/nn/DofbotReacher.pth`. Instead, use the checkpoint with highest rewards such as `./runs/DofbotReacher/nn/last_DofbotReacher_ep_1000_rew_XXX.pth`. You can replace `DofbotReacher.pth` with the latest checkpoint before following the steps below, or simply modify the commands below to use the latest checkpoint.
You can visualize the learned policy by the following command:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=512 checkpoint=./runs/DofbotReacher/nn/DofbotReacher.pth
```
Likewise, you can decrease the number of environments by modifying the parameter `num_envs=512`.
## Using the Official URDF File
The official URDF file in `/thirdparty/dofbot_info` is provided by Yahboom. The details on how to download this file can be found in the commit message of [e866618](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/commit/e86661813cd941133b4dc68da4c20a21efa00a0b).
The only additional step is to generate [instanceable](https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html) Dofbot assets based on the official URDF file:
[Launch the Script Editor](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gui_interactive_scripting.html#script-editor) in Isaac Sim. Copy the content in `omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot_from_urdf.py` and execute it inside the Script Editor window. Wait until you see the text `Done!`.
You can now use the official URDF file by appending the `use_urdf=True` flag to any command above. For example:
- Try out the dummy policy script with the official URDF file:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/dummy_dofbot_policy.py task=DofbotReacher test=True num_envs=1 use_urdf=True
```
- Or download the pre-trained model checkpoint and run it:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
wget https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/releases/download/v1.2.0/runs_urdf.zip
unzip runs_urdf.zip
```
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=512 checkpoint=./runs_urdf/DofbotReacher/nn/DofbotReacher.pth use_urdf=True
```
Please note that the model trained with the USD file provided by Isaac Sim is not compatible with the official URDF file. Fortunately, we also provide a pre-trained checkpoint for the official URDF file.
The learning curve of the pre-trained model:

## Sim2Real
The learned policy has a very conservative constraint on the joint limits. Therefore, the gripper would not hit the ground under such limits. However, you should still make sure there are no other obstacles within Dofbot's workspace (reachable area). That being said, if things go wrong, press `Ctrl+C` twice in the terminal to kill the process.
> It would be possible to remove the conservative joint limit constraints by utilizing self-collision detection in Isaac Sim. We are currently investigating this feature.
For simplicity, we'll use TCP instead of ROS to control the real-world dofbot. Copy the server notebook file (`omniisaacgymenvs/sim2real/dofbot-server.ipynb`) to the Jetson Nano on your Dofbot. Launch a Jupyter Notebook on Jetson Nano and execute the server notebook file.
You should be able to reset the Dofbot's joints by the following script:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/sim2real/dofbot.py
```
Edit `omniisaacgymenvs/cfg/task/DofbotReacher.yaml`. Set `sim2real.enabled` to `True`, and set `sim2real.ip` to the IP of your Dofbot:
```yaml
sim2real:
enabled: True
fail_quietely: False
verbose: False
ip: <IP_OF_YOUR_DOFBOT>
port: 65432
```
Now you can control the real-world Dofbot in real-time by the following command:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=1 checkpoint=./runs/DofbotReacher/nn/DofbotReacher.pth
```
## Demo
We provide an interactable demo based on the `DofbotReacher` RL example. In this demo, you can click on any of
the Dofbot in the scene to manually control the robot with your keyboard as follows:
- `Q`/`A`: Control Joint 0.
- `W`/`S`: Control Joint 1.
- `E`/`D`: Control Joint 2.
- `R`/`F`: Control Joint 3.
- `T`/`G`: Control Joint 4.
- `Y`/`H`: Control Joint 5.
- `ESC`: Unselect a selected Dofbot and yields manual control
Launch this demo with the following command. Note that this demo limits the maximum number of Dofbot in the scene to 128.
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_demo.py task=DofbotReacher num_envs=64
```
## Running in Docker
If you have a [NVIDIA Enterprise subscription](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/planning.html), you can run all services with Docker Compose.
For users without a subscription, you can pull the [Isaac Docker image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim), but should still install Omniverse Nucleus beforehand. (only Isaac itself is dockerized)
Follow [this tutorial](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_container.html#isaac-sim-setup-remote-headless-container) to generate your NGC API Key.
Please note that you should clone this repositories in your home directory and generate instanceable assets beforehand as mentioned in the [Installation](#installation) section.
We will now set up the docker environment.
1. Build the docker image
```sh
docker pull nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1
docker build . -t j3soon/isaac-sim
```
2. Launch an Isaac Container in Headless mode:
```sh
scripts/run_docker_headless.sh
./runheadless.native.sh
```
Alternatively, launch an Isaac Container with GUI (The host machine should include a desktop environment):
```sh
scripts/run_docker.sh
./runapp.sh
```
3. Install this repository
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
pip install -e .
```
4. Run any command in the docker container
> Make sure to add `headless=True` if the container is launched in headless mode.
For an example, running the training script:
```sh
cd ~/OmniIsaacGymEnvs-DofbotReacher
python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True num_envs=2048
```
You can watch the training progress with:
```sh
docker exec -it isaac-sim /bin/bash
cd ~/OmniIsaacGymEnvs-DofbotReacher
tensorboard --logdir=./runs
```
## Acknowledgement
This project has been made possible through the support of [ElsaLab][elsalab] and [NVIDIA AI Technology Center (NVAITC)][nvaitc].
For a complete list of contributors to the code of this repository, please visit the [contributor list](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/graphs/contributors).
[][elsalab]
[][nvaitc]
[elsalab]: https://github.com/elsa-lab
[nvaitc]: https://github.com/NVAITC
Disclaimer: this is not an official NVIDIA product.
> **Note**: below are the original README of [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs).
# Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim
## About this repository
This repository contains Reinforcement Learning examples that can be run with the latest release of [Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html). RL examples are trained using PPO from [rl_games](https://github.com/Denys88/rl_games) library and examples are built on top of Isaac Sim's `omni.isaac.core` and `omni.isaac.gym` frameworks.
Please see [release notes](docs/release_notes.md) for the latest updates.
<img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="614" height="307"/>
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release.
*Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2023.1.0, to ensure examples work as expected.*
Note that the 2022.2.1 OmniIsaacGymEnvs release will no longer work with the latest Isaac Sim 2023.1.0 release. Due to a change in USD APIs, line 138 in rl_task.py is no longer valid. To run the previous OIGE release with the latest Isaac Sim release, please comment out lines 137 and 138 in rl_task.py or set `add_distant_light` to `False` in the task config file. No changes are required if running with the latest release of OmniIsaacGymEnvs.
Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim.
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path.
```
For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh
For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $*
For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh
```
Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`:
```bash
PYTHON_PATH -m pip install -e .
```
The following error may appear during the initial installation. This error is harmless and can be ignored.
```
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
```
### Running the examples
*Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.*
To train your first policy, run:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Cartpole
```
An Isaac Sim app window should be launched. Once Isaac Sim initialization completes, the Cartpole scene will be constructed and simulation will start running automatically. The process will terminate once training finishes.
Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar.
To achieve maximum performance, launch training in `headless` mode as follows:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True
```
#### A Note on the Startup Time of the Simulation
Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually
be optimized in future releases.
### Extension Workflow
The extension workflow provides a simple user interface for creating and launching RL tasks. To launch Isaac Sim for the extension workflow, run:
```bash
./<isaac_sim_root>/isaac-sim.gym.sh --ext-folder </parent/directory/to/OIGE>
```
Note: `isaac_sim_root` should be located in the same directory as `python.sh`.
The UI window can be activated from `Isaac Examples > RL Examples` by navigating the top menu bar.
For more details on the extension workflow, please refer to the [documentation](docs/extension_workflow.md).
### Loading trained models // Checkpoints
Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME`
defaults to the task name, but can also be overridden via the `experiment` argument.
To load a trained checkpoint and continue training, use the `checkpoint` argument:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth
```
To load a trained checkpoint and only perform inference (no training), pass `test=True`
as an argument, along with the checkpoint name. To avoid rendering overhead, you may
also want to run with fewer environments using `num_envs=64`:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64
```
Note that if there are special characters such as `[` or `=` in the checkpoint names,
you will need to escape them and put quotes around the string. For example,
`checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"`
We provide pre-trained checkpoints on the [Nucleus](https://docs.omniverse.nvidia.com/nucleus/latest/index.html) server under `Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints`. Run the following command
to launch inference with pre-trained checkpoint:
Localhost (To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html)):
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64
```
Production server:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64
```
When running with a pre-trained checkpoint for the first time, we will automatically download the checkpoint file to `omniisaacgymenvs/checkpoints`. For subsequent runs, we will re-use the file that has already been downloaded, and will not overwrite existing checkpoints with the same name in the `checkpoints` folder.
## Runing from Docker
Latest Isaac Sim Docker image can be found on [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim). A utility script is provided at `docker/run_docker.sh` to help initialize this repository and launch the Isaac Sim docker container. The script can be run with:
```bash
./docker/run_docker.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py headless=True task=Ant
```
To run the Isaac Sim docker with UI, use the following script:
```bash
./docker/run_docker_viewer.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py task=Ant
```
To avoid re-installing OIGE each time a container is launched, we also provide a dockerfile that can be used to build an image with OIGE installed. To build the image, run:
```bash
docker build -t isaac-sim-oige -f docker/dockerfile .
```
Then, start a container with the built image:
```bash
./docker/run_dockerfile.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py task=Ant headless=True
```
## Livestream
OmniIsaacGymEnvs supports livestream through the [Omniverse Streaming Client](https://docs.omniverse.nvidia.com/app_streaming-client/app_streaming-client/overview.html). To enable this feature, add the commandline argument `enable_livestream=True`:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True enable_livestream=True
```
Connect from the Omniverse Streaming Client once the SimulationApp has been created. Note that enabling livestream is equivalent to training with the viewer enabled, thus the speed of training/inferencing will decrease compared to running in headless mode.
## Training Scripts
All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`.
To test out a task without RL in the loop, run the random policy script with:
```bash
PYTHON_PATH scripts/random_policy.py task=Cartpole
```
This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated.
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Cartpole
```
This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI.
### Configuration and command line arguments
We use [Hydra](https://hydra.cc/docs/intro/) to manage the config.
Common arguments for the training scripts are:
* `task=TASK` - Selects which task to use. Any of `AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `CartpoleCamera`, `Crazyflie`, `FactoryTaskNutBoltPick`, `FactoryTaskNutBoltPlace`, `FactoryTaskNutBoltScrew`, `FrankaCabinet`, `FrankaDeformable`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM` (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task`)
* `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`).
* `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config).
* `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config
* `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step.
* `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU.
* `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU.
* `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax.
* `multi_gpu=MULTI_GPU` - Whether to train using multiple GPUs. Defaults to `False`. Note that this option is only available with `rlgames_train.py`.
* `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training.
* `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing.
* `headless=HEADLESS` - Whether to run in headless mode.
* `enable_livestream=ENABLE_LIVESTREAM` - Whether to enable Omniverse streaming.
* `experiment=EXPERIMENT` - Sets the name of the experiment.
* `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments.
* `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation).
* `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used.
Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`.
#### Hydra Notes
Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file.
The way that the `task` and `train` portions of the config works are through the use of config groups.
You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/)
The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`.
In some places in the config you will find other variables referenced (for example,
`num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy.
This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation).
### Tensorboard
Tensorboard can be launched during training via the following command:
```bash
PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries
```
## WandB support
You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating.
## Training with Multiple GPUs
To train with multiple GPUs, use the following command, where `--proc_per_node` represents the number of available GPUs:
```bash
PYTHON_PATH -m torch.distributed.run --nnodes=1 --nproc_per_node=2 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
## Multi-Node Training
To train across multiple nodes/machines, it is required to launch an individual process on each node.
For the master node, use the following command, where `--proc_per_node` represents the number of available GPUs, and `--nnodes` represents the number of nodes:
```bash
PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
Note that the port (`5555`) can be replaced with any other available port.
For non-master nodes, use the following command, replacing `--node_rank` with the index of each machine:
```bash
PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
For more details on multi-node training with PyTorch, please visit [here](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance.
## Tasks
Source code for tasks can be found in `omniisaacgymenvs/tasks`.
Each task follows the frameworks provided in `omni.isaac.core` and `omni.isaac.gym` in Isaac Sim.
Refer to [docs/framework.md](docs/framework.md) for how to create your own tasks.
Full details on each of the tasks available can be found in the [RL examples documentation](docs/rl_examples.md).
## Demo
We provide an interactable demo based on the `AnymalTerrain` RL example. In this demo, you can click on any of
the ANYmals in the scene to go into third-person mode and manually control the robot with your keyboard as follows:
- `Up Arrow`: Forward linear velocity command
- `Down Arrow`: Backward linear velocity command
- `Left Arrow`: Leftward linear velocity command
- `Right Arrow`: Rightward linear velocity command
- `Z`: Counterclockwise yaw angular velocity command
- `X`: Clockwise yaw angular velocity command
- `C`: Toggles camera view between third-person and scene view while maintaining manual control
- `ESC`: Unselect a selected ANYmal and yields manual control
Launch this demo with the following command. Note that this demo limits the maximum number of ANYmals in the scene to 128.
```
PYTHON_PATH scripts/rlgames_demo.py task=AnymalTerrain num_envs=64 checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth
```
<img src="https://user-images.githubusercontent.com/34286328/184688654-6e7899b2-5847-4184-8944-2a96b129b1ff.gif" width="600" height="300"/>
| 32,868 | Markdown | 50.843849 | 473 | 0.771967 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/config/extension.toml | [gym]
reloadable = true
[package]
version = "0.0.0"
category = "Simulation"
title = "Isaac Gym Envs"
description = "RL environments"
authors = ["Isaac Sim Team"]
repository = "https://gitlab-master.nvidia.com/carbon-gym/omniisaacgymenvs"
keywords = ["isaac"]
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
icon = "data/icon.png"
writeTarget.kit = true
[dependencies]
"omni.isaac.gym" = {}
"omni.isaac.core" = {}
"omni.isaac.cloner" = {}
"omni.isaac.ml_archive" = {} # torch
[[python.module]]
name = "omniisaacgymenvs"
| 532 | TOML | 20.319999 | 75 | 0.693609 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/extension.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import inspect
import os
import traceback
import weakref
from abc import abstractmethod
import hydra
import omni.ext
import omni.timeline
import omni.ui as ui
import omni.usd
from hydra import compose, initialize
from omegaconf import OmegaConf
from omni.isaac.cloner import GridCloner
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
from omni.isaac.core.utils.torch.maths import set_seed
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_train_mt import RLGTrainer, Trainer
from omniisaacgymenvs.utils.task_util import import_tasks, initialize_task
from omni.isaac.ui.callbacks import on_open_folder_clicked, on_open_IDE_clicked
from omni.isaac.ui.menu import make_menu_item_description
from omni.isaac.ui.ui_utils import (
btn_builder,
dropdown_builder,
get_style,
int_builder,
multi_btn_builder,
multi_cb_builder,
scrolling_frame_builder,
setup_ui_headers,
str_builder,
)
from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items
from omni.kit.viewport.utility import get_active_viewport, get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from pxr import Gf
ext_instance = None
class RLExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
self._render_modes = ["Full render", "UI only", "None"]
self._env = None
self._task = None
self._ext_id = ext_id
ext_manager = omni.kit.app.get_app().get_extension_manager()
extension_path = ext_manager.get_extension_path(ext_id)
self._ext_path = os.path.dirname(extension_path) if os.path.isfile(extension_path) else extension_path
self._ext_file_path = os.path.abspath(__file__)
self._initialize_task_list()
self.start_extension(
"",
"",
"RL Examples",
"RL Examples",
"",
"A set of reinforcement learning examples.",
self._ext_file_path,
)
self._task_initialized = False
self._task_changed = False
self._is_training = False
self._render = True
self._resume = False
self._test = False
self._evaluate = False
self._checkpoint_path = ""
self._timeline = omni.timeline.get_timeline_interface()
self._viewport = get_active_viewport()
self._viewport.updates_enabled = True
global ext_instance
ext_instance = self
def _initialize_task_list(self):
self._task_map, _ = import_tasks()
self._task_list = list(self._task_map.keys())
self._task_list.sort()
self._task_list.remove("CartpoleCamera") # we cannot run camera-based training from extension workflow for now. it requires a specialized app file.
self._task_name = self._task_list[0]
self._parse_config(self._task_name)
self._update_task_file_paths(self._task_name)
def _update_task_file_paths(self, task):
self._task_file_path = os.path.abspath(inspect.getfile(self._task_map[task]))
self._task_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/task/{task}.yaml")
self._train_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/train/{task}PPO.yaml")
def _parse_config(self, task, num_envs=None, overrides=None):
hydra.core.global_hydra.GlobalHydra.instance().clear()
initialize(version_base=None, config_path="cfg")
overrides_list = [f"task={task}"]
if overrides is not None:
overrides_list += overrides
if num_envs is None:
self._cfg = compose(config_name="config", overrides=overrides_list)
else:
self._cfg = compose(config_name="config", overrides=overrides_list + [f"num_envs={num_envs}"])
self._cfg_dict = omegaconf_to_dict(self._cfg)
self._sim_config = SimConfig(self._cfg_dict)
def start_extension(
self,
menu_name: str,
submenu_name: str,
name: str,
title: str,
doc_link: str,
overview: str,
file_path: str,
number_of_extra_frames=1,
window_width=550,
keep_window_open=False,
):
window = ui.Workspace.get_window("Property")
if window:
window.visible = False
window = ui.Workspace.get_window("Render Settings")
if window:
window.visible = False
menu_items = [make_menu_item_description(self._ext_id, name, lambda a=weakref.proxy(self): a._menu_callback())]
if menu_name == "" or menu_name is None:
self._menu_items = menu_items
elif submenu_name == "" or submenu_name is None:
self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)]
else:
self._menu_items = [
MenuItemDescription(
name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)]
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._task_dropdown = None
self._cbs = None
self._build_ui(
name=name,
title=title,
doc_link=doc_link,
overview=overview,
file_path=file_path,
number_of_extra_frames=number_of_extra_frames,
window_width=window_width,
keep_window_open=keep_window_open,
)
return
def _build_ui(
self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width, keep_window_open
):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=keep_window_open, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
self._main_stack = ui.VStack(spacing=5, height=0)
with self._main_stack:
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="World Controls",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
with ui.HStack(style=get_style()):
with ui.VStack(style=get_style(), width=ui.Fraction(20)):
dict = {
"label": "Select Task",
"type": "dropdown",
"default_val": 0,
"items": self._task_list,
"tooltip": "Select a task",
"on_clicked_fn": self._on_task_select,
}
self._task_dropdown = dropdown_builder(**dict)
with ui.Frame(tooltip="Open Source Code"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
with ui.Frame(tooltip="Open Task Config"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_cfg_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
with ui.Frame(tooltip="Open Training Config"):
ui.Button(
name="IconButton",
width=20,
height=20,
clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._train_cfg_file_path),
style=get_style()["IconButton.Image::OpenConfig"],
alignment=ui.Alignment.LEFT_CENTER,
tooltip="Open in IDE",
)
dict = {
"label": "Number of environments",
"tooltip": "Enter the number of environments to construct",
"min": 0,
"max": 8192,
"default_val": self._cfg.task.env.numEnvs,
}
self._num_envs_int = int_builder(**dict)
dict = {
"label": "Load Environment",
"type": "button",
"text": "Load",
"tooltip": "Load Environment and Task",
"on_clicked_fn": self._on_load_world,
}
self._load_env_button = btn_builder(**dict)
dict = {
"label": "Rendering Mode",
"type": "dropdown",
"default_val": 0,
"items": self._render_modes,
"tooltip": "Select a rendering mode",
"on_clicked_fn": self._on_render_mode_select,
}
self._render_dropdown = dropdown_builder(**dict)
dict = {
"label": "Configure Training",
"count": 3,
"text": ["Resume from Checkpoint", "Test", "Evaluate"],
"default_val": [False, False, False],
"tooltip": [
"",
"Resume training from checkpoint",
"Play a trained policy",
"Evaluate a policy during training",
],
"on_clicked_fn": [
self._on_resume_cb_update,
self._on_test_cb_update,
self._on_evaluate_cb_update,
],
}
self._cbs = multi_cb_builder(**dict)
dict = {
"label": "Load Checkpoint",
"tooltip": "Enter path to checkpoint file",
"on_clicked_fn": self._on_checkpoint_update,
}
self._checkpoint_str = str_builder(**dict)
dict = {
"label": "Train/Test",
"count": 2,
"text": ["Start", "Stop"],
"tooltip": [
"",
"Launch new training/inference run",
"Terminate current training/inference run",
],
"on_clicked_fn": [self._on_train, self._on_train_stop],
}
self._buttons = multi_btn_builder(**dict)
return
def create_task(self):
headless = self._cfg.headless
enable_viewport = "enable_cameras" in self._cfg.task.sim and self._cfg.task.sim.enable_cameras
self._env = VecEnvRLGamesMT(
headless=headless,
sim_device=self._cfg.device_id,
enable_livestream=self._cfg.enable_livestream,
enable_viewport=enable_viewport,
launch_simulation_app=False,
)
self._task = initialize_task(self._cfg_dict, self._env, init_sim=False)
self._task_initialized = True
def _on_task_select(self, value):
if self._task_initialized and value != self._task_name:
self._task_changed = True
self._task_initialized = False
self._task_name = value
self._parse_config(self._task_name)
self._num_envs_int.set_value(self._cfg.task.env.numEnvs)
self._update_task_file_paths(self._task_name)
def _on_render_mode_select(self, value):
if value == self._render_modes[0]:
self._viewport.updates_enabled = True
window = ui.Workspace.get_window("Viewport")
window.visible = True
if self._env:
self._env._update_viewport = True
self._env._render_mode = 0
elif value == self._render_modes[1]:
self._viewport.updates_enabled = False
window = ui.Workspace.get_window("Viewport")
window.visible = False
if self._env:
self._env._update_viewport = False
self._env._render_mode = 1
elif value == self._render_modes[2]:
self._viewport.updates_enabled = False
window = ui.Workspace.get_window("Viewport")
window.visible = False
if self._env:
self._env._update_viewport = False
self._env._render_mode = 2
def _on_render_cb_update(self, value):
self._render = value
print("updates enabled", value)
self._viewport.updates_enabled = value
if self._env:
self._env._update_viewport = value
if value:
window = ui.Workspace.get_window("Viewport")
window.visible = True
else:
window = ui.Workspace.get_window("Viewport")
window.visible = False
def _on_single_env_cb_update(self, value):
visibility = "invisible" if value else "inherited"
stage = omni.usd.get_context().get_stage()
env_root = stage.GetPrimAtPath("/World/envs")
if env_root.IsValid():
for i, p in enumerate(env_root.GetChildren()):
p.GetAttribute("visibility").Set(visibility)
if value:
stage.GetPrimAtPath("/World/envs/env_0").GetAttribute("visibility").Set("inherited")
env_pos = self._task._env_pos[0].cpu().numpy().tolist()
camera_pos = [env_pos[0] + 10, env_pos[1] + 10, 3]
camera_target = [env_pos[0], env_pos[1], env_pos[2]]
else:
camera_pos = [10, 10, 3]
camera_target = [0, 0, 0]
camera_state = ViewportCameraState("/OmniverseKit_Persp", get_active_viewport())
camera_state.set_position_world(Gf.Vec3d(*camera_pos), True)
camera_state.set_target_world(Gf.Vec3d(*camera_target), True)
def _on_test_cb_update(self, value):
self._test = value
if value is True and self._checkpoint_path.strip() == "":
self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth")
def _on_resume_cb_update(self, value):
self._resume = value
if value is True and self._checkpoint_path.strip() == "":
self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth")
def _on_evaluate_cb_update(self, value):
self._evaluate = value
def _on_checkpoint_update(self, value):
self._checkpoint_path = value.get_value_as_string()
async def _on_load_world_async(self, use_existing_stage):
# initialize task if not initialized
if not self._task_initialized or not omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid():
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int())
self.create_task()
else:
# update config
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int())
self._task.update_config(self._sim_config)
# clear scene
# self._env._world.scene.clear()
self._env._world._sim_params = self._sim_config.get_physics_params()
await self._env._world.initialize_simulation_context_async()
set_camera_view(eye=[10, 10, 3], target=[0, 0, 0], camera_prim_path="/OmniverseKit_Persp")
if not use_existing_stage:
# clear scene
self._env._world.scene.clear()
# clear environments added to world
omni.usd.get_context().get_stage().RemovePrim("/World/collisions")
omni.usd.get_context().get_stage().RemovePrim("/World/envs")
# create scene
await self._env._world.reset_async_set_up_scene()
# update num_envs in envs
self._env.update_task_params()
else:
self._task.initialize_views(self._env._world.scene)
def _on_load_world(self):
# stop simulation before updating stage
self._timeline.stop()
asyncio.ensure_future(self._on_load_world_async(use_existing_stage=False))
def _on_train_stop(self):
if self._task_initialized:
asyncio.ensure_future(self._env._world.stop_async())
async def _on_train_async(self, overrides=None):
try:
# initialize task if not initialized
print("task initialized:", self._task_initialized)
if not self._task_initialized:
# if this is the first launch of the extension, we do not want to re-create stage if stage already exists
use_existing_stage = False
if omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid():
use_existing_stage = True
print(use_existing_stage)
await self._on_load_world_async(use_existing_stage)
# update config
self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int(), overrides=overrides)
sim_config = SimConfig(self._cfg_dict)
self._task.update_config(sim_config)
cfg_dict = omegaconf_to_dict(self._cfg)
# sets seed. if seed is -1 will pick a random one
self._cfg.seed = set_seed(self._cfg.seed, torch_deterministic=self._cfg.torch_deterministic)
cfg_dict["seed"] = self._cfg.seed
self._checkpoint_path = self._checkpoint_str.get_value_as_string()
if self._resume or self._test:
self._cfg.checkpoint = self._checkpoint_path
self._cfg.test = self._test
self._cfg.evaluation = self._evaluate
cfg_dict["checkpoint"] = self._cfg.checkpoint
cfg_dict["test"] = self._cfg.test
cfg_dict["evaluation"] = self._cfg.evaluation
rlg_trainer = RLGTrainer(self._cfg, cfg_dict)
if not rlg_trainer._bad_checkpoint:
trainer = Trainer(rlg_trainer, self._env)
await self._env._world.reset_async_no_set_up_scene()
self._env._render_mode = self._render_dropdown.get_item_value_model().as_int
await self._env.run(trainer)
await omni.kit.app.get_app().next_update_async()
except Exception as e:
print(traceback.format_exc())
finally:
self._is_training = False
def _on_train(self):
# stop simulation if still running
self._timeline.stop()
self._on_render_mode_select(self._render_modes[self._render_dropdown.get_item_value_model().as_int])
if not self._is_training:
self._is_training = True
asyncio.ensure_future(self._on_train_async())
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def _on_window(self, status):
return
def on_shutdown(self):
self._extra_frames = []
if self._menu_items is not None:
self._sample_window_cleanup()
self.shutdown_cleanup()
global ext_instance
ext_instance = None
return
def shutdown_cleanup(self):
return
def _sample_window_cleanup(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
self._menu_items = None
self._buttons = None
self._load_env_button = None
self._task_dropdown = None
self._cbs = None
self._checkpoint_str = None
return
def get_instance():
return ext_instance
| 22,236 | Python | 42.262646 | 155 | 0.533189 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/envs/vec_env_rlgames_mt.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.gym.vec_env import TaskStopException, VecEnvMT
from .vec_env_rlgames import VecEnvRLGames
# VecEnv Wrapper for RL training
class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT):
def _parse_data(self, data):
self._obs = data["obs"]
self._rew = data["rew"].to(self._task.rl_device)
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._resets = data["reset"].to(self._task.rl_device)
self._extras = data["extras"]
def step(self, actions):
if self._stop:
raise TaskStopException()
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device)
self.send_actions(actions)
data = self.get_data()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf
)
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
obs_dict = {}
obs_dict["obs"] = self._obs
obs_dict["states"] = self._states
return obs_dict, self._rew, self._resets, self._extras
| 3,109 | Python | 42.194444 | 118 | 0.705693 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/envs/vec_env_rlgames.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import numpy as np
import torch
from omni.isaac.gym.vec_env import VecEnvBase
# VecEnv Wrapper for RL training
class VecEnvRLGames(VecEnvBase):
def _process_data(self):
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._rew = self._rew.to(self._task.rl_device)
self._states = torch.clamp(self._states, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._resets = self._resets.to(self._task.rl_device)
self._extras = self._extras
def set_task(self, task, backend="numpy", sim_params=None, init_sim=True, rendering_dt=1.0 / 60.0) -> None:
super().set_task(task, backend, sim_params, init_sim, rendering_dt)
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device)
self._task.pre_physics_step(actions)
if (self.sim_frame_count + self._task.control_frequency_inv) % self._task.rendering_interval == 0:
for _ in range(self._task.control_frequency_inv - 1):
self._world.step(render=False)
self.sim_frame_count += 1
self._world.step(render=self._render)
self.sim_frame_count += 1
else:
for _ in range(self._task.control_frequency_inv):
self._world.step(render=False)
self.sim_frame_count += 1
self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(device=self._task.rl_device), reset_buf=self._task.reset_buf
)
self._states = self._task.get_states()
self._process_data()
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
def reset(self, seed=None, options=None):
"""Resets the task and applies default zero actions to recompute observations and states."""
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] Running RL reset")
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.rl_device)
obs_dict, _, _, _ = self.step(actions)
return obs_dict
| 4,328 | Python | 43.628866 | 116 | 0.677218 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/allegro_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.allegro_hand import AllegroHand
from omniisaacgymenvs.robots.articulations.views.allegro_hand_view import AllegroHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class AllegroHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full"]):
raise Exception("Unknown type of observations!\nobservationType should be one of: [full_no_vel, full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full_no_vel": 50,
"full": 72,
}
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 16
self._num_states = 0
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782], device=self.device)
self.pose_dy, self.pose_dz = -0.2, 0.06
def get_hand(self):
allegro_hand = AllegroHand(
prim_path=self.default_zero_env_path + "/allegro_hand",
name="allegro_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"allegro_hand",
get_prim_at_path(allegro_hand.prim_path),
self._sim_config.parse_actor_config("allegro_hand"),
)
allegro_hand_prim = self._stage.GetPrimAtPath(allegro_hand.prim_path)
allegro_hand.set_allegro_hand_properties(stage=self._stage, allegro_hand_prim=allegro_hand_prim)
allegro_hand.set_motor_control_mode(
stage=self._stage, allegro_hand_path=self.default_zero_env_path + "/allegro_hand"
)
def get_hand_view(self, scene):
return AllegroHandView(prim_paths_expr="/World/envs/.*/allegro_hand", name="allegro_hand_view")
def get_observations(self):
self.get_object_goal_observations()
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 16:19] = self.object_pos
self.obs_buf[:, 19:23] = self.object_rot
self.obs_buf[:, 23:26] = self.goal_pos
self.obs_buf[:, 26:30] = self.goal_rot
self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 34:50] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 32:35] = self.object_pos
self.obs_buf[:, 35:39] = self.object_rot
self.obs_buf[:, 39:42] = self.object_linvel
self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 45:48] = self.goal_pos
self.obs_buf[:, 48:52] = self.goal_rot
self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 56:72] = self.actions
| 6,329 | Python | 42.655172 | 115 | 0.658872 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ball_balance.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.maths import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.balance_bot import BalanceBot
from pxr import PhysxSchema
class BallBalanceTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 12 + 12
self._num_actions = 3
self.anchored = False
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = self._task_cfg["sim"]["dt"]
self._table_position = torch.tensor([0, 0, 0.56])
self._ball_position = torch.tensor([0.0, 0.0, 1.0])
self._ball_radius = 0.1
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
def set_up_scene(self, scene) -> None:
self.get_balance_table()
self.add_ball()
super().set_up_scene(scene, replicate_physics=False)
self.set_up_table_anchors()
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("balance_bot_view"):
scene.remove_object("balance_bot_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
def get_balance_table(self):
balance_table = BalanceBot(
prim_path=self.default_zero_env_path + "/BalanceBot", name="BalanceBot", translation=self._table_position
)
self._sim_config.apply_articulation_settings(
"table", get_prim_at_path(balance_table.prim_path), self._sim_config.parse_actor_config("table")
)
def add_ball(self):
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball/ball",
translation=self._ball_position,
name="ball_0",
radius=self._ball_radius,
color=torch.tensor([0.9, 0.6, 0.2]),
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
def set_up_table_anchors(self):
from pxr import Gf
height = 0.08
stage = get_current_stage()
for i in range(self._num_envs):
base_path = f"{self.default_base_env_path}/env_{i}/BalanceBot"
for j, leg_offset in enumerate([(0.4, 0, height), (-0.2, 0.34641, 0), (-0.2, -0.34641, 0)]):
# fix the legs to ground
leg_path = f"{base_path}/lower_leg{j}"
ground_joint_path = leg_path + "_ground"
env_pos = stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}").GetAttribute("xformOp:translate").Get()
anchor_pos = env_pos + Gf.Vec3d(*leg_offset)
self.fix_to_ground(stage, ground_joint_path, leg_path, anchor_pos)
def fix_to_ground(self, stage, joint_path, prim_path, anchor_pos):
from pxr import UsdPhysics, Gf
# D6 fixed joint
d6FixedJoint = UsdPhysics.Joint.Define(stage, joint_path)
d6FixedJoint.CreateBody0Rel().SetTargets(["/World/defaultGroundPlane"])
d6FixedJoint.CreateBody1Rel().SetTargets([prim_path])
d6FixedJoint.CreateLocalPos0Attr().Set(anchor_pos)
d6FixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
d6FixedJoint.CreateLocalPos1Attr().Set(Gf.Vec3f(0, 0, 0.18))
d6FixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
# lock all DOF (lock - low is greater than high)
d6Prim = stage.GetPrimAtPath(joint_path)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
def get_observations(self) -> dict:
ball_positions, ball_orientations = self._balls.get_world_poses(clone=False)
ball_positions = ball_positions[:, 0:3] - self._env_pos
ball_velocities = self._balls.get_velocities(clone=False)
ball_linvels = ball_velocities[:, 0:3]
ball_angvels = ball_velocities[:, 3:6]
dof_pos = self._balance_bots.get_joint_positions(clone=False)
dof_vel = self._balance_bots.get_joint_velocities(clone=False)
sensor_force_torques = self._balance_bots.get_measured_joint_forces(joint_indices=self._sensor_indices) # (num_envs, num_sensors, 6)
self.obs_buf[..., 0:3] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 3:6] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 6:9] = ball_positions
self.obs_buf[..., 9:12] = ball_linvels
self.obs_buf[..., 12:15] = sensor_force_torques[..., 0] / 20.0
self.obs_buf[..., 15:18] = sensor_force_torques[..., 3] / 20.0
self.obs_buf[..., 18:21] = sensor_force_torques[..., 4] / 20.0
self.obs_buf[..., 21:24] = sensor_force_torques[..., 5] / 20.0
self.ball_positions = ball_positions
self.ball_linvels = ball_linvels
observations = {"ball_balance": {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += (
self._dt * self._action_speed_scale * actions.to(self.device)
)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits
)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self._balance_bots.set_joint_position_targets(self.dof_position_targets) # .clone())
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.001 # min horizontal dist from origin
max_d = 0.4 # max horizontal dist from origin
min_height = 1.0
max_height = 2.0
min_horizontal_speed = 0
max_horizontal_speed = 2
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze()
ball_pos = self.initial_ball_pos.clone()
ball_rot = self.initial_ball_rot.clone()
# position
ball_pos[env_ids_64, 0:2] += hpos[..., 0:2]
ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
ball_rot[env_ids_64, 0] = 1
ball_rot[env_ids_64, 1:] = 0
ball_velocities = self.initial_ball_velocities.clone()
# linear
ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2]
ball_velocities[env_ids_64, 2] = vspeeds
# angular
ball_velocities[env_ids_64, 3:6] = 0
# reset root state for bbots and balls in selected envs
self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32)
self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32)
# reset root pose and velocity
self._balance_bots.set_world_poses(
self.initial_bot_pos[env_ids_64].clone(), self.initial_bot_rot[env_ids_64].clone(), indices=env_ids_32
)
self._balance_bots.set_velocities(self.initial_bot_velocities[env_ids_64].clone(), indices=env_ids_32)
# reset DOF states for bbots in selected envs
self._balance_bots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
dof_limits = self._balance_bots.get_dof_limits()
self.bbot_dof_lower_limits, self.bbot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
self.initial_dof_positions = self._balance_bots.get_joint_positions()
self.initial_bot_pos, self.initial_bot_rot = self._balance_bots.get_world_poses()
# self.initial_bot_pos[..., 2] = 0.559 # tray_height
self.initial_bot_velocities = self._balance_bots.get_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_ball_velocities = self._balls.get_velocities()
self.dof_position_targets = torch.zeros(
(self.num_envs, self._balance_bots.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
actuated_joints = ["lower_leg0", "lower_leg1", "lower_leg2"]
self.actuated_dof_indices = torch.tensor(
[self._balance_bots._dof_indices[j] for j in actuated_joints], device=self._device, dtype=torch.long
)
force_links = ["upper_leg0", "upper_leg1", "upper_leg2"]
self._sensor_indices = torch.tensor(
[self._balance_bots._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
def calculate_metrics(self) -> None:
ball_dist = torch.sqrt(
self.ball_positions[..., 0] * self.ball_positions[..., 0]
+ (self.ball_positions[..., 2] - 0.7) * (self.ball_positions[..., 2] - 0.7)
+ (self.ball_positions[..., 1]) * self.ball_positions[..., 1]
)
ball_speed = torch.sqrt(
self.ball_linvels[..., 0] * self.ball_linvels[..., 0]
+ self.ball_linvels[..., 1] * self.ball_linvels[..., 1]
+ self.ball_linvels[..., 2] * self.ball_linvels[..., 2]
)
pos_reward = 1.0 / (1.0 + ball_dist)
speed_reward = 1.0 / (1.0 + ball_speed)
self.rew_buf[:] = pos_reward * speed_reward
def is_done(self) -> None:
reset = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
reset = torch.where(
self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset
)
self.reset_buf[:] = reset
| 13,958 | Python | 44.174757 | 140 | 0.630391 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole_camera.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gym import spaces
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleCameraTask(CartpoleTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
# use multi-dimensional observation for camera RGB
self.observation_space = spaces.Box(
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * -np.Inf,
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * np.Inf)
RLTask.__init__(self, name, env)
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self.camera_type = self._task_cfg["env"].get("cameraType", 'rgb')
self.camera_width = self._task_cfg["env"]["cameraWidth"]
self.camera_height = self._task_cfg["env"]["cameraHeight"]
self.camera_channels = 3
self._export_images = self._task_cfg["env"]["exportImages"]
def cleanup(self) -> None:
# initialize remaining buffers
RLTask.cleanup(self)
# override observation buffer for camera data
self.obs_buf = torch.zeros(
(self.num_envs, self.camera_width, self.camera_height, 3), device=self.device, dtype=torch.float)
def set_up_scene(self, scene) -> None:
self.get_cartpole()
RLTask.set_up_scene(self, scene)
# start replicator to capture image data
self.rep.orchestrator._orchestrator._is_started = True
# set up cameras
self.render_products = []
env_pos = self._env_pos.cpu()
for i in range(self._num_envs):
camera = self.rep.create.camera(
position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55))
render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height))
self.render_products.append(render_product)
# initialize pytorch writer for vectorized collection
self.pytorch_listener = self.PytorchListener()
self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter")
self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda")
self.pytorch_writer.attach(self.render_products)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
# retrieve RGB data from all render products
images = self.pytorch_listener.get_rgb_data()
if images is not None:
if self._export_images:
from torchvision.utils import save_image, make_grid
img = images/255
save_image(make_grid(img, nrows = 2), 'cartpole_export.png')
self.obs_buf = torch.swapaxes(images, 1, 3).clone().float()/255.0
else:
print("Image tensor is NONE!")
return self.obs_buf
| 5,824 | Python | 41.518248 | 119 | 0.67342 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.anymal_terrain_generator import *
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
from pxr import UsdLux, UsdPhysics
class AnymalTerrainTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.height_samples = None
self.custom_origins = False
self.init_done = False
self._env_spacing = 0.0
self._num_observations = 188
self._num_actions = 12
self.update_config(sim_config)
RLTask.__init__(self, name, env)
self.height_points = self.init_height_points()
self.measured_heights = None
# joint positions offsets
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
# reward episode sums
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"lin_vel_xy": torch_zeros(),
"lin_vel_z": torch_zeros(),
"ang_vel_z": torch_zeros(),
"ang_vel_xy": torch_zeros(),
"orient": torch_zeros(),
"torques": torch_zeros(),
"joint_acc": torch_zeros(),
"base_height": torch_zeros(),
"air_time": torch_zeros(),
"collision": torch_zeros(),
"stumble": torch_zeros(),
"action_rate": torch_zeros(),
"hip": torch_zeros(),
}
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.height_meas_scale = self._task_cfg["env"]["learn"]["heightMeasurementScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["termination"] = self._task_cfg["env"]["learn"]["terminalReward"]
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["ang_vel_xy"] = self._task_cfg["env"]["learn"]["angularVelocityXYRewardScale"]
self.rew_scales["orient"] = self._task_cfg["env"]["learn"]["orientationRewardScale"]
self.rew_scales["torque"] = self._task_cfg["env"]["learn"]["torqueRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["base_height"] = self._task_cfg["env"]["learn"]["baseHeightRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["hip"] = self._task_cfg["env"]["learn"]["hipRewardScale"]
self.rew_scales["fallen_over"] = self._task_cfg["env"]["learn"]["fallenOverRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
self.base_init_state = pos + rot + v_lin + v_ang
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.decimation = self._task_cfg["env"]["control"]["decimation"]
self.dt = self.decimation * self._task_cfg["sim"]["dt"]
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.push_interval = int(self._task_cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
self.curriculum = self._task_cfg["env"]["terrain"]["curriculum"]
self.base_threshold = 0.2
self.knee_threshold = 0.1
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._task_cfg["sim"]["default_physics_material"]["static_friction"] = self._task_cfg["env"]["terrain"][
"staticFriction"
]
self._task_cfg["sim"]["default_physics_material"]["dynamic_friction"] = self._task_cfg["env"]["terrain"][
"dynamicFriction"
]
self._task_cfg["sim"]["default_physics_material"]["restitution"] = self._task_cfg["env"]["terrain"][
"restitution"
]
self._task_cfg["sim"]["add_ground_plane"] = False
def _get_noise_scale_vec(self, cfg):
noise_vec = torch.zeros_like(self.obs_buf[0])
self.add_noise = self._task_cfg["env"]["learn"]["addNoise"]
noise_level = self._task_cfg["env"]["learn"]["noiseLevel"]
noise_vec[:3] = self._task_cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale
noise_vec[3:6] = self._task_cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale
noise_vec[6:9] = self._task_cfg["env"]["learn"]["gravityNoise"] * noise_level
noise_vec[9:12] = 0.0 # commands
noise_vec[12:24] = self._task_cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale
noise_vec[24:36] = self._task_cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale
noise_vec[36:176] = (
self._task_cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale
)
noise_vec[176:188] = 0.0 # previous actions
return noise_vec
def init_height_points(self):
# 1mx1.6m rectangle (without center line)
y = 0.1 * torch.tensor(
[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False
) # 10-50cm on each side
x = 0.1 * torch.tensor(
[-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False
) # 20-80cm on each side
grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
self.num_height_points = grid_x.numel()
points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False)
points[:, :, 0] = grid_x.flatten()
points[:, :, 1] = grid_y.flatten()
return points
def _create_trimesh(self, create_mesh=True):
self.terrain = Terrain(self._task_cfg["env"]["terrain"], num_robots=self.num_envs)
vertices = self.terrain.vertices
triangles = self.terrain.triangles
position = torch.tensor([-self.terrain.border_size, -self.terrain.border_size, 0.0])
if create_mesh:
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position)
self.height_samples = (
torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device)
)
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
self.get_terrain()
self.get_anymal()
super().set_up_scene(scene, collision_filter_global_paths=["/World/terrain"])
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def initialize_views(self, scene):
# initialize terrain variables even if we do not need to re-create the terrain mesh
self.get_terrain(create_mesh=False)
super().initialize_views(scene)
if scene.object_exists("anymal_view"):
scene.remove_object("anymal_view", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_terrain(self, create_mesh=True):
self.env_origins = torch.zeros((self.num_envs, 3), device=self.device, requires_grad=False)
if not self.curriculum:
self._task_cfg["env"]["terrain"]["maxInitMapLevel"] = self._task_cfg["env"]["terrain"]["numLevels"] - 1
self.terrain_levels = torch.randint(
0, self._task_cfg["env"]["terrain"]["maxInitMapLevel"] + 1, (self.num_envs,), device=self.device
)
self.terrain_types = torch.randint(
0, self._task_cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device
)
self._create_trimesh(create_mesh=create_mesh)
self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float)
def get_anymal(self):
anymal_translation = torch.tensor([0.0, 0.0, 0.66])
anymal_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0])
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal",
name="anymal",
translation=anymal_translation,
orientation=anymal_orientation,
)
self._sim_config.apply_articulation_settings(
"anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("anymal")
)
anymal.set_anymal_properties(self._stage, anymal.prim)
anymal.prepare_contacts(self._stage, anymal.prim)
self.dof_names = anymal.dof_names
for i in range(self.num_actions):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
def post_reset(self):
self.base_init_state = torch.tensor(
self.base_init_state, dtype=torch.float, device=self.device, requires_grad=False
)
self.timeout_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# initialize some data used later on
self.up_axis_idx = 2
self.common_step_counter = 0
self.extras = {}
self.noise_scale_vec = self._get_noise_scale_vec(self._task_cfg)
self.commands = torch.zeros(
self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False
) # x vel, y vel, yaw vel, heading
self.commands_scale = torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
device=self.device,
requires_grad=False,
)
self.gravity_vec = torch.tensor(
get_axis_params(-1.0, self.up_axis_idx), dtype=torch.float, device=self.device
).repeat((self.num_envs, 1))
self.forward_vec = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float, device=self.device).repeat(
(self.num_envs, 1)
)
self.torques = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.last_actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False)
self.last_dof_vel = torch.zeros((self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.num_envs):
self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]]
self.num_dof = self._anymals.num_dof
self.dof_pos = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.base_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device)
self.base_quat = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device)
self.base_velocities = torch.zeros((self.num_envs, 6), dtype=torch.float, device=self.device)
self.knee_pos = torch.zeros((self.num_envs * 4, 3), dtype=torch.float, device=self.device)
self.knee_quat = torch.zeros((self.num_envs * 4, 4), dtype=torch.float, device=self.device)
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
self.init_done = True
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
self.update_terrain_level(env_ids)
self.base_pos[env_ids] = self.base_init_state[0:3]
self.base_pos[env_ids, 0:3] += self.env_origins[env_ids]
self.base_pos[env_ids, 0:2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device)
self.base_quat[env_ids] = self.base_init_state[3:7]
self.base_velocities[env_ids] = self.base_init_state[7:]
self._anymals.set_world_poses(
positions=self.base_pos[env_ids].clone(), orientations=self.base_quat[env_ids].clone(), indices=indices
)
self._anymals.set_velocities(velocities=self.base_velocities[env_ids].clone(), indices=indices)
self._anymals.set_joint_positions(positions=self.dof_pos[env_ids].clone(), indices=indices)
self._anymals.set_joint_velocities(velocities=self.dof_vel[env_ids].clone(), indices=indices)
self.commands[env_ids, 0] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 1] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 3] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(
1
) # set small commands to zero
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
self.feet_air_time[env_ids] = 0.0
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]["rew_" + key] = (
torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s
)
self.episode_sums[key][env_ids] = 0.0
self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float())
def update_terrain_level(self, env_ids):
if not self.init_done or not self.curriculum:
# do not change on initial reset
return
root_pos, _ = self._anymals.get_world_poses(clone=False)
distance = torch.norm(root_pos[env_ids, :2] - self.env_origins[env_ids, :2], dim=1)
self.terrain_levels[env_ids] -= 1 * (
distance < torch.norm(self.commands[env_ids, :2]) * self.max_episode_length_s * 0.25
)
self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2)
self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows
self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]]
def refresh_dof_state_tensors(self):
self.dof_pos = self._anymals.get_joint_positions(clone=False)
self.dof_vel = self._anymals.get_joint_velocities(clone=False)
def refresh_body_state_tensors(self):
self.base_pos, self.base_quat = self._anymals.get_world_poses(clone=False)
self.base_velocities = self._anymals.get_velocities(clone=False)
self.knee_pos, self.knee_quat = self._anymals._knees.get_world_poses(clone=False)
def pre_physics_step(self, actions):
if not self._env._world.is_playing():
return
self.actions = actions.clone().to(self.device)
for i in range(self.decimation):
if self._env._world.is_playing():
torques = torch.clip(
self.Kp * (self.action_scale * self.actions + self.default_dof_pos - self.dof_pos)
- self.Kd * self.dof_vel,
-80.0,
80.0,
)
self._anymals.set_joint_efforts(torques)
self.torques = torques
SimulationContext.step(self._env._world, render=False)
self.refresh_dof_state_tensors()
def post_physics_step(self):
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5 * wrap_to_pi(self.commands[:, 3] - heading), -1.0, 1.0)
self.check_termination()
self.get_states()
self.calculate_metrics()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def push_robots(self):
self.base_velocities[:, 0:2] = torch_rand_float(
-1.0, 1.0, (self.num_envs, 2), device=self.device
) # lin vel x/y
self._anymals.set_velocities(self.base_velocities)
def check_termination(self):
self.timeout_buf = torch.where(
self.progress_buf >= self.max_episode_length - 1,
torch.ones_like(self.timeout_buf),
torch.zeros_like(self.timeout_buf),
)
knee_contact = (
torch.norm(self._anymals._knees.get_net_contact_forces(clone=False).view(self._num_envs, 4, 3), dim=-1)
> 1.0
)
self.has_fallen = (torch.norm(self._anymals._base.get_net_contact_forces(clone=False), dim=1) > 1.0) | (
torch.sum(knee_contact, dim=-1) > 1.0
)
self.reset_buf = self.has_fallen.clone()
self.reset_buf = torch.where(self.timeout_buf.bool(), torch.ones_like(self.reset_buf), self.reset_buf)
def calculate_metrics(self):
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
# other base velocity penalties
rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"]
# orientation penalty
rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"]
# base height penalty
rew_base_height = torch.square(self.base_pos[:, 2] - 0.52) * self.rew_scales["base_height"]
# torque penalty
rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"]
# joint acc penalty
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"]
# fallen over penalty
rew_fallen_over = self.has_fallen * self.rew_scales["fallen_over"]
# action rate penalty
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
# cosmetic penalty for hip motion
rew_hip = (
torch.sum(torch.abs(self.dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["hip"]
)
# total reward
self.rew_buf = (
rew_lin_vel_xy
+ rew_ang_vel_z
+ rew_lin_vel_z
+ rew_ang_vel_xy
+ rew_orient
+ rew_base_height
+ rew_torque
+ rew_joint_acc
+ rew_action_rate
+ rew_hip
+ rew_fallen_over
)
self.rew_buf = torch.clip(self.rew_buf, min=0.0, max=None)
# add termination reward
self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf
# log episode reward sums
self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy
self.episode_sums["ang_vel_z"] += rew_ang_vel_z
self.episode_sums["lin_vel_z"] += rew_lin_vel_z
self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy
self.episode_sums["orient"] += rew_orient
self.episode_sums["torques"] += rew_torque
self.episode_sums["joint_acc"] += rew_joint_acc
self.episode_sums["action_rate"] += rew_action_rate
self.episode_sums["base_height"] += rew_base_height
self.episode_sums["hip"] += rew_hip
def get_observations(self):
self.measured_heights = self.get_heights()
heights = (
torch.clip(self.base_pos[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.0) * self.height_meas_scale
)
self.obs_buf = torch.cat(
(
self.base_lin_vel * self.lin_vel_scale,
self.base_ang_vel * self.ang_vel_scale,
self.projected_gravity,
self.commands[:, :3] * self.commands_scale,
self.dof_pos * self.dof_pos_scale,
self.dof_vel * self.dof_vel_scale,
heights,
self.actions,
),
dim=-1,
)
def get_ground_heights_below_knees(self):
points = self.knee_pos.reshape(self.num_envs, 4, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_ground_heights_below_base(self):
points = self.base_pos.reshape(self.num_envs, 1, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_heights(self, env_ids=None):
if env_ids:
points = quat_apply_yaw(
self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]
) + (self.base_pos[env_ids, 0:3]).unsqueeze(1)
else:
points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (
self.base_pos[:, 0:3]
).unsqueeze(1)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
@torch.jit.script
def quat_apply_yaw(quat, vec):
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, 1:3] = 0.0
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def wrap_to_pi(angles):
angles %= 2 * np.pi
angles -= 2 * np.pi * (angles > np.pi)
return angles
def get_axis_params(value, axis_idx, x_value=0.0, dtype=float, n_dims=3):
"""construct arguments to `Vec` according to axis index."""
zs = np.zeros((n_dims,))
assert axis_idx < n_dims, "the axis dim should be within the vector dimensions"
zs[axis_idx] = 1.0
params = np.where(zs == 1.0, value, zs)
params[0] = x_value
return list(params.astype(dtype))
| 29,337 | Python | 45.568254 | 120 | 0.609128 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shadow_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.shadow_hand import ShadowHand
from omniisaacgymenvs.robots.articulations.views.shadow_hand_view import ShadowHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class ShadowHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]"
)
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"openai": 42,
"full_no_vel": 77,
"full": 157,
"full_state": 187,
}
self.asymmetric_obs = self._task_cfg["env"]["asymmetric_observations"]
self.use_vel_obs = False
self.fingertip_obs = True
self.fingertips = [
"robot0:ffdistal",
"robot0:mfdistal",
"robot0:rfdistal",
"robot0:lfdistal",
"robot0:thdistal",
]
self.num_fingertips = len(self.fingertips)
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self.force_torque_obs_scale = 10.0
num_states = 0
if self.asymmetric_obs:
num_states = 187
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 20
self._num_states = num_states
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.pose_dy, self.pose_dz = -0.39, 0.10
def get_hand(self):
shadow_hand = ShadowHand(
prim_path=self.default_zero_env_path + "/shadow_hand",
name="shadow_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"shadow_hand",
get_prim_at_path(shadow_hand.prim_path),
self._sim_config.parse_actor_config("shadow_hand"),
)
shadow_hand.set_shadow_hand_properties(stage=self._stage, shadow_hand_prim=shadow_hand.prim)
shadow_hand.set_motor_control_mode(stage=self._stage, shadow_hand_path=shadow_hand.prim_path)
def get_hand_view(self, scene):
hand_view = ShadowHandView(prim_paths_expr="/World/envs/.*/shadow_hand", name="shadow_hand_view")
scene.add(hand_view._fingers)
return hand_view
def get_observations(self):
self.get_object_goal_observations()
self.fingertip_pos, self.fingertip_rot = self._hands._fingers.get_world_poses(clone=False)
self.fingertip_pos -= self._env_pos.repeat((1, self.num_fingertips)).reshape(
self.num_envs * self.num_fingertips, 3
)
self.fingertip_velocities = self._hands._fingers.get_velocities(clone=False)
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.vec_sensor_tensor = self._hands.get_measured_joint_forces(
joint_indices=self._hands._sensor_indices
).view(self._num_envs, -1)
if self.obs_type == "openai":
self.compute_fingertip_observations(True)
elif self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state(False)
else:
print("Unkown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_fingertip_observations(self, no_vel=False):
if no_vel:
# Per https://arxiv.org/pdf/1808.00177.pdf Table 2
# Fingertip positions
# Object Position, but not orientation
# Relative target orientation
# 3*self.num_fingertips = 15
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15)
self.obs_buf[:, 15:18] = self.object_pos
self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 22:42] = self.actions
else:
# 13*self.num_fingertips = 65
self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65)
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 15:35] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 35:65] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 65:68] = self.object_pos
self.obs_buf[:, 68:72] = self.object_rot
self.obs_buf[:, 72:75] = self.object_linvel
self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 78:81] = self.goal_pos
self.obs_buf[:, 81:85] = self.goal_rot
self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 89:109] = self.actions
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 24:37] = self.object_pos
self.obs_buf[:, 27:31] = self.object_rot
self.obs_buf[:, 31:34] = self.goal_pos
self.obs_buf[:, 34:38] = self.goal_rot
self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 57:77] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 48:51] = self.object_pos
self.obs_buf[:, 51:55] = self.object_rot
self.obs_buf[:, 55:58] = self.object_linvel
self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 61:64] = self.goal_pos
self.obs_buf[:, 64:68] = self.goal_rot
self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# (7+6)*self.num_fingertips = 65
self.obs_buf[:, 72:87] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 87:107] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 107:137] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 137:157] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.states_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
# self.states_buf[:, 2*self.num_hand_dofs:3*self.num_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 2 * self.num_hand_dofs # 48
self.states_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.states_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.states_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.states_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.states_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.states_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.states_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 72 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.states_buf[:, obs_end : obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 2 * self.num_hand_dofs : 3 * self.num_hand_dofs] = (
self.force_torque_obs_scale * self.dof_force_tensor
)
obj_obs_start = 3 * self.num_hand_dofs # 48
self.obs_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.obs_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.obs_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.obs_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.obs_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.obs_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.obs_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 96 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end : obs_end + self.num_actions] = self.actions
| 15,107 | Python | 48.211726 | 129 | 0.609188 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_cabinet.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import numpy as np
import torch
from omni.isaac.cloner import Cloner
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cabinet import Cabinet
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.cabinet_view import CabinetView
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from pxr import Usd, UsdGeom
class FrankaCabinetTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self.distX_offset = 0.04
self.dt = 1 / 60.0
self._num_observations = 23
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.num_props = self._task_cfg["env"]["numProps"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
def set_up_scene(self, scene) -> None:
self.get_franka()
self.get_cabinet()
if self.num_props > 0:
self.get_props()
super().set_up_scene(scene, filter_collisions=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("cabinet_view"):
scene.remove_object("cabinet_view", registry_only=True)
if scene.object_exists("drawers_view"):
scene.remove_object("drawers_view", registry_only=True)
if scene.object_exists("prop_view"):
scene.remove_object("prop_view", registry_only=True)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
def get_franka(self):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka")
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
def get_cabinet(self):
cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet")
self._sim_config.apply_articulation_settings(
"cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet")
)
def get_props(self):
prop_cloner = Cloner()
drawer_pos = torch.tensor([0.0515, 0.0, 0.7172])
prop_color = torch.tensor([0.2, 0.4, 0.6])
props_per_row = int(math.ceil(math.sqrt(self.num_props)))
prop_size = 0.08
prop_spacing = 0.09
xmin = -0.5 * prop_spacing * (props_per_row - 1)
zmin = -0.5 * prop_spacing * (props_per_row - 1)
prop_count = 0
prop_pos = []
for j in range(props_per_row):
prop_up = zmin + j * prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * prop_spacing
prop_pos.append([propx, prop_up, 0.0])
prop_count += 1
prop = DynamicCuboid(
prim_path=self.default_zero_env_path + "/prop/prop_0",
name="prop",
color=prop_color,
size=prop_size,
density=100.0,
)
self._sim_config.apply_articulation_settings(
"prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop")
)
prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)]
prop_cloner.clone(
source_prim_path=self.default_zero_env_path + "/prop/prop_0",
prim_paths=prop_paths,
positions=np.array(prop_pos) + drawer_pos.numpy(),
replicate_physics=False,
)
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")),
self._device,
)
lfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")),
self._device,
)
rfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")),
self._device,
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3])
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(
hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]
)
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device)
self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1))
self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.franka_default_dof_pos = torch.tensor(
[1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False)
self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
(
self.franka_grasp_rot,
self.franka_grasp_pos,
self.drawer_grasp_rot,
self.drawer_grasp_pos,
) = self.compute_grasp_transforms(
hand_rot,
hand_pos,
self.franka_local_grasp_rot,
self.franka_local_grasp_pos,
drawer_rot,
drawer_pos,
self.drawer_local_grasp_rot,
self.drawer_local_grasp_pos,
)
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
to_target = self.drawer_grasp_pos - self.franka_grasp_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1),
self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {self._frankas.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
# reset cabinet
self._cabinets.set_joint_positions(
torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices
)
self._cabinets.set_joint_velocities(
torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices
)
# reset props
if self.num_props > 0:
self._props.set_world_poses(
self.default_prop_pos[self.prop_indices[env_ids].flatten()],
self.default_prop_rot[self.prop_indices[env_ids].flatten()],
self.prop_indices[env_ids].flatten().to(torch.int32),
)
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
if self.num_props > 0:
self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses()
self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view(
self._num_envs, self.num_props
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = self.compute_franka_reward(
self.reset_buf,
self.progress_buf,
self.actions,
self.cabinet_dof_pos,
self.franka_grasp_pos,
self.drawer_grasp_pos,
self.franka_grasp_rot,
self.drawer_grasp_rot,
self.franka_lfinger_pos,
self.franka_rfinger_pos,
self.gripper_forward_axis,
self.drawer_inward_axis,
self.gripper_up_axis,
self.drawer_up_axis,
self._num_envs,
self.dist_reward_scale,
self.rot_reward_scale,
self.around_handle_reward_scale,
self.open_reward_scale,
self.finger_dist_reward_scale,
self.action_penalty_scale,
self.distX_offset,
self._max_episode_length,
self.franka_dof_pos,
self.finger_close_reward_scale,
)
def is_done(self) -> None:
# reset if drawer is open or max length reached
self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
def compute_grasp_transforms(
self,
hand_rot,
hand_pos,
franka_local_grasp_rot,
franka_local_grasp_pos,
drawer_rot,
drawer_pos,
drawer_local_grasp_rot,
drawer_local_grasp_pos,
):
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos
)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
def compute_franka_reward(
self,
reset_buf,
progress_buf,
actions,
cabinet_dof_pos,
franka_grasp_pos,
drawer_grasp_pos,
franka_grasp_rot,
drawer_grasp_rot,
franka_lfinger_pos,
franka_rfinger_pos,
gripper_forward_axis,
drawer_inward_axis,
gripper_up_axis,
drawer_up_axis,
num_envs,
dist_reward_scale,
rot_reward_scale,
around_handle_reward_scale,
open_reward_scale,
finger_dist_reward_scale,
action_penalty_scale,
distX_offset,
max_episode_length,
joint_positions,
finger_close_reward_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d**2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = (
torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of forward axis for gripper
dot2 = (
torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward
),
around_handle_reward,
)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist),
finger_dist_reward,
),
finger_dist_reward,
)
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(
d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward
)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions**2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = (
dist_reward_scale * dist_reward
+ rot_reward_scale * rot_reward
+ around_handle_reward_scale * around_handle_reward
+ open_reward_scale * open_reward
+ finger_dist_reward_scale * finger_dist_reward
- action_penalty_scale * action_penalty
+ finger_close_reward * finger_close_reward_scale
)
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
# # prevent bad style in opening drawer
# rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
# rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
return rewards
| 22,939 | Python | 41.324723 | 222 | 0.599895 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/crazyflie.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.crazyflie import Crazyflie
from omniisaacgymenvs.robots.articulations.views.crazyflie_view import CrazyflieView
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class CrazyflieTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 18
self._num_actions = 4
self._crazyflie_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
# parameters for the crazyflie
self.arm_length = 0.05
# parameters for the controller
self.motor_damp_time_up = 0.15
self.motor_damp_time_down = 0.15
# I use the multiplier 4, since 4*T ~ time for a step response to finish, where
# T is a time constant of the first-order filter
self.motor_tau_up = 4 * self.dt / (self.motor_damp_time_up + EPS)
self.motor_tau_down = 4 * self.dt / (self.motor_damp_time_down + EPS)
# thrust max
self.mass = 0.028
self.thrust_to_weight = 1.9
self.motor_assymetry = np.array([1.0, 1.0, 1.0, 1.0])
# re-normalizing to sum-up to 4
self.motor_assymetry = self.motor_assymetry * 4.0 / np.sum(self.motor_assymetry)
self.grav_z = -1.0 * self._task_cfg["sim"]["gravity"][2]
def set_up_scene(self, scene) -> None:
self.get_crazyflie()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("crazyflie_view"):
scene.remove_object("crazyflie_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
for i in range(1, 5):
scene.remove_object(f"m{i}_prop_view", registry_only=True)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
def get_crazyflie(self):
copter = Crazyflie(
prim_path=self.default_zero_env_path + "/Crazyflie", name="crazyflie", translation=self._crazyflie_position
)
self._sim_config.apply_articulation_settings(
"crazyflie", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("crazyflie")
)
def get_target(self):
radius = 0.2
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = self.target_positions - root_positions
self.obs_buf[..., 3:6] = rot_x
self.obs_buf[..., 6:9] = rot_y
self.obs_buf[..., 9:12] = rot_z
self.obs_buf[..., 12:15] = root_linvels
self.obs_buf[..., 15:18] = root_angvels
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
self.actions = actions
# clamp to [-1.0, 1.0]
thrust_cmds = torch.clamp(actions, min=-1.0, max=1.0)
# scale to [0.0, 1.0]
thrust_cmds = (thrust_cmds + 1.0) / 2.0
# filtering the thruster and adding noise
motor_tau = self.motor_tau_up * torch.ones((self._num_envs, 4), dtype=torch.float32, device=self._device)
motor_tau[thrust_cmds < self.thrust_cmds_damp] = self.motor_tau_down
motor_tau[motor_tau > 1.0] = 1.0
# Since NN commands thrusts we need to convert to rot vel and back
thrust_rot = thrust_cmds**0.5
self.thrust_rot_damp = motor_tau * (thrust_rot - self.thrust_rot_damp) + self.thrust_rot_damp
self.thrust_cmds_damp = self.thrust_rot_damp**2
## Adding noise
thrust_noise = 0.01 * torch.randn(4, dtype=torch.float32, device=self._device)
thrust_noise = thrust_cmds * thrust_noise
self.thrust_cmds_damp = torch.clamp(self.thrust_cmds_damp + thrust_noise, min=0.0, max=1.0)
thrusts = self.thrust_max * self.thrust_cmds_damp
# thrusts given rotation
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
rot_matrix = torch.cat((rot_x, rot_y, rot_z), 1).reshape(-1, 3, 3)
force_x = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_y = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_xy = torch.cat((force_x, force_y), 1).reshape(-1, 4, 2)
thrusts = thrusts.reshape(-1, 4, 1)
thrusts = torch.cat((force_xy, thrusts), 2)
thrusts_0 = thrusts[:, 0]
thrusts_0 = thrusts_0[:, :, None]
thrusts_1 = thrusts[:, 1]
thrusts_1 = thrusts_1[:, :, None]
thrusts_2 = thrusts[:, 2]
thrusts_2 = thrusts_2[:, :, None]
thrusts_3 = thrusts[:, 3]
thrusts_3 = thrusts_3[:, :, None]
mod_thrusts_0 = torch.matmul(rot_matrix, thrusts_0)
mod_thrusts_1 = torch.matmul(rot_matrix, thrusts_1)
mod_thrusts_2 = torch.matmul(rot_matrix, thrusts_2)
mod_thrusts_3 = torch.matmul(rot_matrix, thrusts_3)
self.thrusts[:, 0] = torch.squeeze(mod_thrusts_0)
self.thrusts[:, 1] = torch.squeeze(mod_thrusts_1)
self.thrusts[:, 2] = torch.squeeze(mod_thrusts_2)
self.thrusts[:, 3] = torch.squeeze(mod_thrusts_3)
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
prop_rot = self.thrust_cmds_damp * self.prop_max_rot
self.dof_vel[:, 0] = prop_rot[:, 0]
self.dof_vel[:, 1] = -1.0 * prop_rot[:, 1]
self.dof_vel[:, 2] = prop_rot[:, 2]
self.dof_vel[:, 3] = -1.0 * prop_rot[:, 3]
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(4):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
thrust_max = self.grav_z * self.mass * self.thrust_to_weight * self.motor_assymetry / 4.0
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_max = torch.tensor(thrust_max, device=self._device, dtype=torch.float32)
self.motor_linearity = 1.0
self.prop_max_rot = 433.3
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.actions = torch.zeros((self._num_envs, 4), device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
# Extra info
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"rew_pos": torch_zeros(),
"rew_orient": torch_zeros(),
"rew_effort": torch_zeros(),
"rew_spin": torch_zeros(),
"raw_dist": torch_zeros(),
"raw_orient": torch_zeros(),
"raw_effort": torch_zeros(),
"raw_spin": torch_zeros(),
}
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control parameters
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.set_targets(self.all_indices)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (0, 0) and z in (2)
self.target_positions[envs_long, 0:2] = torch.zeros((num_sets, 2), device=self._device)
self.target_positions[envs_long, 2] = torch.ones(num_sets, device=self._device) * 2.0
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.0
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.0, 0.0, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.thrust_cmds_damp[env_ids] = 0
self.thrust_rot_damp[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0.0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# pos reward
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# orient reward
ups = quat_axis(root_quats, 2)
self.orient_z = ups[..., 2]
up_reward = torch.clamp(ups[..., 2], min=0.0, max=1.0)
# effort reward
effort = torch.square(self.actions).sum(-1)
effort_reward = 0.05 * torch.exp(-0.5 * effort)
# spin reward
spin = torch.square(root_angvels).sum(-1)
spin_reward = 0.01 * torch.exp(-1.0 * spin)
# combined reward
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spin_reward) - effort_reward
# log episode reward sums
self.episode_sums["rew_pos"] += pos_reward
self.episode_sums["rew_orient"] += up_reward
self.episode_sums["rew_effort"] += effort_reward
self.episode_sums["rew_spin"] += spin_reward
# log raw info
self.episode_sums["raw_dist"] += target_dist
self.episode_sums["raw_orient"] += ups[..., 2]
self.episode_sums["raw_effort"] += effort
self.episode_sums["raw_spin"] += spin
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 5.0, ones, die)
# z >= 0.5 & z <= 5.0 & up > 0
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
die = torch.where(self.root_positions[..., 2] > 5.0, ones, die)
die = torch.where(self.orient_z < 0.0, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 16,830 | Python | 41.502525 | 120 | 0.61937 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class HumanoidLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTask.set_up_scene(self, scene)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("humanoid_view"):
scene.remove_object("humanoid_view", registry_only=True)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
def get_humanoid(self):
humanoid = Humanoid(
prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions
)
self._sim_config.apply_articulation_settings(
"Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid")
)
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = torch.tensor(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
)
self.max_motor_effort = torch.max(self.joint_gears)
self.motor_effort_ratio = self.joint_gears / self.max_motor_effort
dof_limits = self._humanoids.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = torch.tensor(
[self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, motor_effort_ratio, joints_at_limit_cost_scale):
# type: (Tensor, Tensor, float) -> Tensor
scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02
dof_at_limit_cost = torch.sum(
(torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1
)
return dof_at_limit_cost
| 5,980 | Python | 41.119718 | 117 | 0.651003 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_deformable.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils
from omni.isaac.core.materials.deformable_material import DeformableMaterial
from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim
from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView
from omni.physx.scripts import deformableUtils, physicsUtils
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema
class FrankaDeformableTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.dt = 1/60.
self._num_observations = 39
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["actionScale"]
def set_up_scene(self, scene) -> None:
self.stage = get_current_stage()
self.assets_root_path = get_assets_root_path()
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self.get_franka()
self.get_beaker()
self.get_deformable_tube()
super().set_up_scene(scene=scene, replicate_physics=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self.deformableView)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("deformabletube_view"):
scene.remove_object("deformabletube_view", registry_only=True)
self._frankas = FrankaView(
prim_paths_expr="/World/envs/.*/franka", name="franka_view"
)
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self.deformableView)
def get_franka(self):
franka = Franka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
translation=torch.tensor([0.0, 0.0, 0.0]),
)
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
franka.set_franka_properties(stage=self.stage, prim=franka.prim)
def get_beaker(self):
_usd_path = self.assets_root_path + "/Isaac/Props/Beaker/beaker_500ml.usd"
mesh_path = self.default_zero_env_path + "/beaker"
add_reference_to_stage(_usd_path, mesh_path)
beaker = RigidPrim(
prim_path=mesh_path+"/beaker",
name="beaker",
position=torch.tensor([0.5, 0.2, 0.095]),
)
self._sim_config.apply_articulation_settings("beaker", beaker.prim, self._sim_config.parse_actor_config("beaker"))
def get_deformable_tube(self):
_usd_path = self.assets_root_path + "/Isaac/Props/DeformableTube/tube.usd"
mesh_path = self.default_zero_env_path + "/deformableTube/tube"
add_reference_to_stage(_usd_path, mesh_path)
skin_mesh = get_prim_at_path(mesh_path)
physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh)
physicsUtils.set_or_add_translate_op(skin_mesh, (0.6, 0.0, 0.005))
physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([0, 0, 1]), 90).GetQuat())
def get_observations(self) -> dict:
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
dof_pos_scaled = (
2.0 * (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
tube_positions = self.deformableView.get_simulation_mesh_nodal_positions(clone=False)
tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities(clone=False)
self.tube_front_positions = tube_positions[:, 200, :] - self._env_pos
self.tube_front_velocities = tube_velocities[:, 200, :]
self.tube_back_positions = tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = tube_velocities[:, -1, :]
front_to_gripper = self.tube_front_positions - self.gripper_site_pos
to_front_goal = self.front_goal_pos - self.tube_front_positions
to_back_goal = self.back_goal_pos - self.tube_back_positions
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
front_to_gripper,
to_front_goal,
to_back_goal,
self.tube_front_positions,
self.tube_front_velocities,
self.tube_back_positions,
self.tube_back_velocities,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
self.franka_dof_targets[:, -1] = self.franka_dof_targets[:, -2]
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
pos = self.franka_default_dof_pos
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
self.deformableView.set_simulation_mesh_nodal_positions(self.initial_tube_positions[env_ids], indices)
self.deformableView.set_simulation_mesh_nodal_velocities(self.initial_tube_velocities[env_ids], indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.franka_default_dof_pos = torch.tensor(
[0.00, 0.63, 0.00, -2.15, 0.00, 2.76, 0.75, 0.02, 0.02], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
self.front_goal_pos = torch.tensor([0.36, 0.0, 0.23], device=self._device).repeat((self._num_envs, 1))
self.back_goal_pos = torch.tensor([0.5, 0.2, 0.0], device=self._device).repeat((self._num_envs, 1))
self.goal_hand_rot = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self._device).repeat((self.num_envs, 1))
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
self.initial_tube_positions = self.deformableView.get_simulation_mesh_nodal_positions()
self.initial_tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities()
self.tube_front_positions = self.initial_tube_positions[:, 0, :] - self._env_pos
self.tube_front_velocities = self.initial_tube_velocities[:, 0, :]
self.tube_back_positions = self.initial_tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = self.initial_tube_velocities[:, -1, :]
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
goal_distance_error = torch.norm(self.tube_back_positions[:, 0:2] - self.back_goal_pos[:, 0:2], p = 2, dim = -1)
goal_dist_reward = 1.0 / (5*goal_distance_error + .025)
current_z_level = self.tube_back_positions[:, 2:3]
z_lift_level = torch.where(
goal_distance_error < 0.07, torch.zeros_like(current_z_level), torch.ones_like(current_z_level)*0.18
)
front_lift_error = torch.norm(current_z_level - z_lift_level, p = 2, dim = -1)
front_lift_reward = 1.0 / (5*front_lift_error + .025)
rewards = goal_dist_reward + 4*front_lift_reward
self.rew_buf[:] = rewards
def is_done(self) -> None:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] < 0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] < -1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
| 13,322 | Python | 42.825658 | 136 | 0.641045 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class AntLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = torch.tensor([0, 0, 0.5])
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTask.set_up_scene(self, scene)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("ant_view"):
scene.remove_object("ant_view", registry_only=True)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings(
"Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant")
)
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = torch.tensor([15, 15, 15, 15, 15, 15, 15, 15], dtype=torch.float32, device=self._device)
dof_limits = self._ants.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = torch.tensor(
[self._ants._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self._ants.num_dof)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, num_dof):
# type: (Tensor, int) -> Tensor
return torch.sum(obs_buf[:, 12 : 12 + num_dof] > 0.99, dim=-1)
| 4,691 | Python | 41.654545 | 115 | 0.69708 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("cartpole_view"):
scene.remove_object("cartpole_view", registry_only=True)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
def get_cartpole(self):
cartpole = Cartpole(
prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole")
)
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
self.obs_buf[:, 0] = self.cart_pos
self.obs_buf[:, 1] = self.cart_vel
self.obs_buf[:, 2] = self.pole_pos
self.obs_buf[:, 3] = self.pole_vel
observations = {self._cartpoles.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.to(self._device)
forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device)
forces[:, self._cart_dof_idx] = self._max_push_effort * actions[:, 0]
indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device)
self._cartpoles.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions
dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# randomize DOF velocities
dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._cartpoles.set_joint_positions(dof_pos, indices=indices)
self._cartpoles.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
# randomize all envs
indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
reward = 1.0 - self.pole_pos * self.pole_pos - 0.01 * torch.abs(self.cart_vel) - 0.005 * torch.abs(self.pole_vel)
reward = torch.where(torch.abs(self.cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward)
reward = torch.where(torch.abs(self.pole_pos) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
self.rew_buf[:] = reward
def is_done(self) -> None:
resets = torch.where(torch.abs(self.cart_pos) > self._reset_dist, 1, 0)
resets = torch.where(torch.abs(self.pole_pos) > math.pi / 2, 1, resets)
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets)
self.reset_buf[:] = resets
| 7,256 | Python | 42.981818 | 121 | 0.659179 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/dofbot_reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/tasks/shadow_hand.py
import math
import numpy as np
import torch
from omniisaacgymenvs.sim2real.dofbot import RealWorldDofbot
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from omniisaacgymenvs.robots.articulations.views.dofbot_view import DofbotView
from omniisaacgymenvs.robots.articulations.dofbot import Dofbot
from omniisaacgymenvs.tasks.shared.reacher import ReacherTask
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omni.isaac.gym.vec_env import VecEnvBase
class DofbotReacherTask(ReacherTask):
def __init__(
self,
name: str,
sim_config: SimConfig,
env: VecEnvBase,
offset=None
) -> None:
self.update_config(sim_config)
ReacherTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full": 29,
# 6: dofbot joints position (action space)
# 6: dofbot joints velocity
# 3: goal position
# 4: goal rotation
# 4: goal relative rotation
# 6: previous action
}
self.object_scale = torch.tensor([0.1] * 3)
self.goal_scale = torch.tensor([0.5] * 3)
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 6
self._num_states = 0
pi = math.pi
# For actions
self._dof_limits = torch.tensor([[
[-pi/2, pi/2],
[-pi/4, pi/4],
[-pi/4, pi/4],
[-pi/4, pi/4],
[-pi/2, pi/2],
[-0.1, 0.1], # The gripper joint will be ignored, since it is not used in the Reacher task
]], dtype=torch.float32, device=self._cfg["sim_device"])
# The last action space cannot be [0, 0]
# It will introduce the following error:
# ValueError: Expected parameter loc (Tensor of shape (2048, 6)) of distribution Normal(loc: torch.Size([2048, 6]), scale: torch.Size([2048, 6])) to satisfy the constraint Real(), but found invalid values
self.useURDF = self._task_cfg["env"]["useURDF"]
# Setup Sim2Real
sim2real_config = self._task_cfg['sim2real']
if sim2real_config['enabled'] and self.test and self.num_envs == 1:
self.real_world_dofbot = RealWorldDofbot(
sim2real_config['ip'],
sim2real_config['port'],
sim2real_config['fail_quietely'],
sim2real_config['verbose']
)
ReacherTask.update_config(self)
def get_num_dof(self):
# assert self._arms.num_dof == 11
return min(self._arms.num_dof, 6)
def get_arm(self):
if not self.useURDF:
usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_instanceable.usd"
else:
usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf_instanceable.usd"
dofbot = Dofbot(
prim_path=self.default_zero_env_path + "/Dofbot",
name="Dofbot",
usd_path=usd_path
)
self._sim_config.apply_articulation_settings(
"dofbot",
get_prim_at_path(dofbot.prim_path),
self._sim_config.parse_actor_config("dofbot"),
)
def get_arm_view(self, scene):
if not self.useURDF:
end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5/Wrist_Twist"
else:
end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5"
arm_view = DofbotView(
prim_paths_expr="/World/envs/.*/Dofbot",
end_effector_prim_paths_expr=end_effector_prim_paths_expr,
name="dofbot_view"
)
scene.add(arm_view._end_effectors)
return arm_view
def get_object_displacement_tensor(self):
return torch.tensor([0.0, 0.015, 0.1], device=self.device).repeat((self.num_envs, 1))
def get_observations(self):
self.arm_dof_pos = self._arms.get_joint_positions()
self.arm_dof_vel = self._arms.get_joint_velocities()
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {self._arms.name: {"obs_buf": self.obs_buf}}
return observations
def get_reset_target_new_pos(self, n_reset_envs):
# Randomly generate goal positions, although the resulting goal may still not be reachable.
new_pos = torch_rand_float(-1, 1, (n_reset_envs, 3), device=self.device)
new_pos[:, 0] = new_pos[:, 0] * 0.05 + 0.15 * torch.sign(new_pos[:, 0])
new_pos[:, 1] = new_pos[:, 1] * 0.05 + 0.15 * torch.sign(new_pos[:, 1])
new_pos[:, 2] = torch.abs(new_pos[:, 2] * 0.2) + 0.15
return new_pos
def compute_full_observations(self, no_vel=False):
if no_vel:
raise NotImplementedError()
else:
# There are many redundant information for the simple Reacher task, but we'll keep them for now.
self.obs_buf[:, 0:self.num_arm_dofs] = unscale(self.arm_dof_pos[:, :self.num_arm_dofs],
self.arm_dof_lower_limits, self.arm_dof_upper_limits)
self.obs_buf[:, self.num_arm_dofs:2*self.num_arm_dofs] = self.vel_obs_scale * self.arm_dof_vel[:, :self.num_arm_dofs]
base = 2 * self.num_arm_dofs
self.obs_buf[:, base+0:base+3] = self.goal_pos
self.obs_buf[:, base+3:base+7] = self.goal_rot
self.obs_buf[:, base+7:base+11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, base+11:base+17] = self.actions
def send_joint_pos(self, joint_pos):
self.real_world_dofbot.send_joint_pos(joint_pos)
| 7,961 | Python | 41.57754 | 212 | 0.633589 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/quadcopter.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.quadcopter import Quadcopter
from omniisaacgymenvs.robots.articulations.views.quadcopter_view import QuadcopterView
class QuadcopterTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 21
self._num_actions = 12
self._copter_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
max_thrust = 2.0
self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_copter()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("quadcopter_view"):
scene.remove_object("quadcopter_view", registry_only=True)
if scene.object_exists("rotors_view"):
scene.remove_object("rotors_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
def get_copter(self):
copter = Quadcopter(
prim_path=self.default_zero_env_path + "/Quadcopter", name="quadcopter", translation=self._copter_position
)
self._sim_config.apply_articulation_settings(
"copter", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("copter")
)
def get_target(self):
radius = 0.05
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
self.obs_buf[..., 13:21] = self.dof_pos
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.clone().to(self._device)
dof_action_speed_scale = 8 * math.pi
self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits
)
thrust_action_speed_scale = 100
self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]
self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)
self.forces[:, 0, 2] = self.thrusts[:, 0]
self.forces[:, 1, 2] = self.thrusts[:, 1]
self.forces[:, 2, 2] = self.thrusts[:, 2]
self.forces[:, 3, 2] = self.thrusts[:, 3]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids]
# apply actions
self._copters.set_joint_position_targets(self.dof_position_targets)
self._copters.rotors.apply_forces(self.forces, is_global=False)
def post_reset(self):
# control tensors
self.dof_position_targets = torch.zeros(
(self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False)
self.forces = torch.zeros(
(self._num_envs, self._copters.rotors.count // self._num_envs, 3),
dtype=torch.float32,
device=self._device,
requires_grad=False,
)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device)
self.target_positions[:, 2] = 1.0
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
self.dof_vel = self._copters.get_joint_velocities(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
dof_limits = self._copters.get_dof_limits()
self.dof_lower_limits = dof_limits[0][:, 0].to(device=self._device)
self.dof_upper_limits = dof_limits[0][:, 1].to(device=self._device)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 3 * target_dist * target_dist) # 2
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage)
rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1 / 400))
rew = torch.clip(rew, 0.0, None)
self.rew_buf[:] = rew
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 3.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.3, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 11,498 | Python | 42.889313 | 120 | 0.640633 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ingenuity.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ingenuity import Ingenuity
from omniisaacgymenvs.robots.articulations.views.ingenuity_view import IngenuityView
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
import numpy as np
import torch
import math
class IngenuityTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.thrust_limit = 2000
self.thrust_lateral_component = 0.2
self._num_observations = 13
self._num_actions = 6
self._ingenuity_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_ingenuity()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("ingenuity_view"):
scene.remove_object("ingenuity_view", registry_only=True)
for i in range(2):
if scene.object_exists(f"physics_rotor_{i}_view"):
scene.remove_object(f"physics_rotor_{i}_view", registry_only=True)
if scene.object_exists(f"visual_rotor_{i}_view"):
scene.remove_object(f"visual_rotor_{i}_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
def get_ingenuity(self):
copter = Ingenuity(prim_path=self.default_zero_env_path + "/Ingenuity", name="ingenuity", translation=self._ingenuity_position)
self._sim_config.apply_articulation_settings("ingenuity", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("ingenuity"))
def get_target(self):
radius = 0.1
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball"))
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
observations = {
self._copters.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
lateral_fraction_prop_0 = torch.clamp(
actions[:, 0:2] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
lateral_fraction_prop_1 = torch.clamp(
actions[:, 3:5] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0
self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0
self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1
self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
self.dof_vel[:, self.spinning_indices[0]] = 50
self.dof_vel[:, self.spinning_indices[1]] = -50
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(2):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
self.spinning_indices = torch.tensor([1, 3], device=self._device)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control tensors
self.thrusts = torch.zeros((self._num_envs, 2, 3), dtype=torch.float32, device=self._device)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (-1, 1) and z in (1, 2)
self.target_positions[envs_long, 0:2] = torch.rand((num_sets, 2), device=self._device) * 2 - 1
self.target_positions[envs_long, 2] = torch.rand(num_sets, device=self._device) + 1
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.4
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, 1] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_pos[env_ids, 3] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 2.5 * target_dist * target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 30 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 10 * spinnage * spinnage)
# combined reward
# uprightness and spinning only matter when close to the target
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spinnage_reward)
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 20.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 12,391 | Python | 42.943262 | 151 | 0.635138 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
class AnymalTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 48
self._num_actions = 12
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["cosmetic"] = self._task_cfg["env"]["learn"]["cosmeticRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.dt = 1 / 60
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._anymal_translation = torch.tensor([0.0, 0.0, 0.62])
self._env_spacing = self._task_cfg["env"]["envSpacing"]
def set_up_scene(self, scene) -> None:
self.get_anymal()
super().set_up_scene(scene)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("anymalview"):
scene.remove_object("anymalview", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_anymal(self):
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal", name="Anymal", translation=self._anymal_translation
)
self._sim_config.apply_articulation_settings(
"Anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("Anymal")
)
# Configure joint properties
joint_paths = []
for quadrant in ["LF", "LH", "RF", "RH"]:
for component, abbrev in [("HIP", "H"), ("THIGH", "K")]:
joint_paths.append(f"{quadrant}_{component}/{quadrant}_{abbrev}FE")
joint_paths.append(f"base/{quadrant}_HAA")
for joint_path in joint_paths:
set_drive(f"{anymal.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000)
def get_observations(self) -> dict:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity) * self.lin_vel_scale
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity) * self.ang_vel_scale
projected_gravity = quat_rotate(torso_rotation, self.gravity_vec)
dof_pos_scaled = (dof_pos - self.default_dof_pos) * self.dof_pos_scale
commands_scaled = self.commands * torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
requires_grad=False,
device=self.commands.device,
)
obs = torch.cat(
(
base_lin_vel,
base_ang_vel,
projected_gravity,
commands_scaled,
dof_pos_scaled,
dof_vel * self.dof_vel_scale,
self.actions,
),
dim=-1,
)
self.obs_buf[:] = obs
observations = {self._anymals.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
indices = torch.arange(self._anymals.count, dtype=torch.int32, device=self._device)
self.actions[:] = actions.clone().to(self._device)
current_targets = self.current_targets + self.action_scale * self.actions * self.dt
self.current_targets[:] = tensor_clamp(
current_targets, self.anymal_dof_lower_limits, self.anymal_dof_upper_limits
)
self._anymals.set_joint_position_targets(self.current_targets, indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF velocities
velocities = torch_rand_float(-0.1, 0.1, (num_resets, self._anymals.num_dof), device=self._device)
dof_pos = self.default_dof_pos[env_ids]
dof_vel = velocities
self.current_targets[env_ids] = dof_pos[:]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._anymals.set_joint_positions(dof_pos, indices)
self._anymals.set_joint_velocities(dof_vel, indices)
self._anymals.set_world_poses(
self.initial_root_pos[env_ids].clone(), self.initial_root_rot[env_ids].clone(), indices
)
self._anymals.set_velocities(root_vel, indices)
self.commands_x[env_ids] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_y[env_ids] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (num_resets, 1), device=self._device
).squeeze()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
def post_reset(self):
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
dof_names = self._anymals.dof_names
for i in range(self.num_actions):
name = dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
self.initial_root_pos, self.initial_root_rot = self._anymals.get_world_poses()
self.current_targets = self.default_dof_pos.clone()
dof_limits = self._anymals.get_dof_limits()
self.anymal_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.anymal_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.commands = torch.zeros(self._num_envs, 3, dtype=torch.float, device=self._device, requires_grad=False)
self.commands_y = self.commands.view(self._num_envs, 3)[..., 1]
self.commands_x = self.commands.view(self._num_envs, 3)[..., 0]
self.commands_yaw = self.commands.view(self._num_envs, 3)[..., 2]
# initialize some data used later on
self.extras = {}
self.gravity_vec = torch.tensor([0.0, 0.0, -1.0], device=self._device).repeat((self._num_envs, 1))
self.actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.last_dof_vel = torch.zeros(
(self._num_envs, 12), dtype=torch.float, device=self._device, requires_grad=False
)
self.last_actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.time_out_buf = torch.zeros_like(self.reset_buf)
# randomize all envs
indices = torch.arange(self._anymals.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity)
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity)
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
rew_lin_vel_z = torch.square(base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - dof_vel), dim=1) * self.rew_scales["joint_acc"]
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
rew_cosmetic = (
torch.sum(torch.abs(dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["cosmetic"]
)
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_joint_acc + rew_action_rate + rew_cosmetic + rew_lin_vel_z
total_reward = torch.clip(total_reward, 0.0, None)
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = dof_vel[:]
self.fallen_over = self._anymals.is_base_below_threshold(threshold=0.51, ground_heights=0.0)
total_reward[torch.nonzero(self.fallen_over)] = -1
self.rew_buf[:] = total_reward.detach()
def is_done(self) -> None:
# reset agents
time_out = self.progress_buf >= self.max_episode_length - 1
self.reset_buf[:] = time_out | self.fallen_over
| 14,350 | Python | 44.55873 | 118 | 0.630941 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class HumanoidLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTaskWarp.set_up_scene(self, scene)
self._humanoids = ArticulationView(prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False)
scene.add(self._humanoids)
return
def get_humanoid(self):
humanoid = Humanoid(prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions)
self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path),
self._sim_config.parse_actor_config("Humanoid"))
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = wp.array(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
dtype=wp.float32
)
self.max_motor_effort = 135.0
self.motor_effort_ratio = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(compute_effort_ratio, dim=self._humanoids._num_dof,
inputs=[self.motor_effort_ratio, self.joint_gears, self.max_motor_effort], device=self._device)
dof_limits = self._humanoids.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._humanoids._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = wp.array([self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._humanoids._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale])
return self.dof_at_limit_cost
@wp.kernel
def compute_effort_ratio(motor_effort_ratio: wp.array(dtype=wp.float32),
joint_gears: wp.array(dtype=wp.float32),
max_motor_effort: float):
tid = wp.tid()
motor_effort_ratio[tid] = joint_gears[tid] / max_motor_effort
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1]
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32),
joints_at_limit_cost_scale: float):
i, j = wp.tid()
dof_i = j + 12
scaled_cost = joints_at_limit_cost_scale * (wp.abs(obs_buf[i, dof_i]) - 0.98) / 0.02
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.98:
cost = scaled_cost * motor_effort_ratio[j]
dof_at_limit_cost[i] = cost
| 6,686 | Python | 42.422078 | 143 | 0.639994 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class AntLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = wp.array([0, 0, 0.5], dtype=wp.float32, device="cpu")
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTaskWarp.set_up_scene(self, scene)
self._ants = ArticulationView(prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False)
scene.add(self._ants)
return
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings("Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant"))
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = wp.array([15, 15, 15, 15, 15, 15, 15, 15], dtype=wp.float32, device=self._device)
dof_limits = self._ants.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._ants._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.motor_effort_ratio = wp.array([1, 1, 1, 1, 1, 1, 1, 1], dtype=wp.float32, device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = wp.array([self._ants._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._ants._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio])
return self.dof_at_limit_cost
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32)):
i, j = wp.tid()
dof_i = j + 12
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.99:
cost = 1.0
dof_at_limit_cost[i] = cost
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1] | 5,221 | Python | 44.807017 | 136 | 0.685309 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class CartpoleTask(RLTaskWarp):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = wp.array([0.0, 0.0, 2.0], dtype=wp.float32)
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTaskWarp.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False)
scene.add(self._cartpoles)
return
def get_cartpole(self):
cartpole = Cartpole(prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole"))
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, dof_pos, dof_vel, self._cart_dof_idx, self._pole_dof_idx], device=self._device)
observations = {
self._cartpoles.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
forces = wp.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=wp.float32, device=self._device)
wp.launch(compute_forces, dim=self._num_envs,
inputs=[forces, actions_wp, self._cart_dof_idx, self._max_push_effort], device=self._device)
self._cartpoles.set_joint_efforts(forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_idx, num_resets,
inputs=[self.dof_pos, self.dof_vel, indices, self.reset_buf, self.progress_buf, self._cart_dof_idx, self._pole_dof_idx, self._rand_seed],
device=self._device)
# apply resets
self._cartpoles.set_joint_positions(self.dof_pos[indices], indices=indices)
self._cartpoles.set_joint_velocities(self.dof_vel[indices], indices=indices)
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
self.dof_pos = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
# randomize all envs
self.reset_idx()
def calculate_metrics(self) -> None:
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.obs_buf, self.rew_buf, self._reset_dist], device=self._device)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.reset_buf, self.progress_buf, self._reset_dist, self._max_episode_length],
device=self._device)
@wp.kernel
def reset_idx(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
indices: wp.array(dtype=wp.int32),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
cart_dof_idx: int,
pole_dof_idx: int,
rand_seed: int):
i = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i)
# randomize DOF positions
dof_pos[idx, cart_dof_idx] = 1.0 * (1.0 - 2.0 * wp.randf(rand_state))
dof_pos[idx, pole_dof_idx] = 0.125 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# randomize DOF velocities
dof_vel[idx, cart_dof_idx] = 0.5 * (1.0 - 2.0 * wp.randf(rand_state))
dof_vel[idx, pole_dof_idx] = 0.25 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# bookkeeping
progress_buf[idx] = 0
reset_buf[idx] = 0
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
max_push_effort: float):
i = wp.tid()
forces[i, cart_dof_idx] = max_push_effort * actions[i, 0]
@wp.kernel
def get_observations(obs_buf: wp.array(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
pole_dof_idx: int):
i = wp.tid()
obs_buf[i, 0] = dof_pos[i, cart_dof_idx]
obs_buf[i, 1] = dof_vel[i, cart_dof_idx]
obs_buf[i, 2] = dof_pos[i, pole_dof_idx]
obs_buf[i, 3] = dof_vel[i, pole_dof_idx]
@wp.kernel
def calculate_metrics(obs_buf: wp.array(dtype=wp.float32, ndim=2),
rew_buf: wp.array(dtype=wp.float32),
reset_dist: float):
i = wp.tid()
cart_pos = obs_buf[i, 0]
cart_vel = obs_buf[i, 1]
pole_angle = obs_buf[i, 2]
pole_vel = obs_buf[i, 3]
rew_buf[i] = 1.0 - pole_angle * pole_angle - 0.01 * wp.abs(cart_vel) - 0.005 * wp.abs(pole_vel)
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_angle) > warp_utils.PI / 2.0:
rew_buf[i] = -2.0
@wp.kernel
def is_done(obs_buf: wp.array(dtype=wp.float32, ndim=2),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
reset_dist: float,
max_episode_length: int):
i = wp.tid()
cart_pos = obs_buf[i, 0]
pole_pos = obs_buf[i, 2]
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_pos) > warp_utils.PI / 2.0 or progress_buf[i] > max_episode_length:
reset_buf[i] = 1
else:
reset_buf[i] = 0
| 8,665 | Python | 38.390909 | 154 | 0.635661 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class LocomotionTask(RLTaskWarp):
def __init__(
self,
name,
env,
offset=None
) -> None:
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._num_sensors = 2
RLTaskWarp.__init__(self, name, env)
return
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces()
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, torso_position, torso_rotation, self._env_pos, velocities, dof_pos, dof_vel,
self.prev_potentials, self.potentials, self.dt, self.target,
self.basis_vec0, self.basis_vec1, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
sensor_force_torques, self.contact_force_scale, self.actions, self.angular_velocity_scale,
self._robots._num_dof, self._num_sensors, self._sensor_indices], device=self._device
)
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
self.actions = actions_wp
wp.launch(compute_forces, dim=(self._num_envs, self._robots._num_dof),
inputs=[self.forces, self.actions, self.joint_gears, self.power_scale], device=self._device)
# applies joint torques
self._robots.set_joint_efforts(self.forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_dofs, dim=(num_resets, self._robots._num_dof),
inputs=[self.dof_pos, self.dof_vel, self.initial_dof_pos, self.dof_limits_lower, self.dof_limits_upper, indices, self._rand_seed],
device=self._device)
wp.launch(reset_idx, dim=num_resets,
inputs=[self.root_pos, self.root_rot, self.initial_root_pos, self.initial_root_rot, self._env_pos,
self.target, self.prev_potentials, self.potentials, self.dt,
self.reset_buf, self.progress_buf, indices, self._rand_seed],
device=self._device)
# apply resets
self._robots.set_joint_positions(self.dof_pos[indices], indices=indices)
self._robots.set_joint_velocities(self.dof_vel[indices], indices=indices)
self._robots.set_world_poses(self.root_pos[indices], self.root_rot[indices], indices=indices)
self._robots.set_velocities(self.root_vel[indices], indices=indices)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.basis_vec0 = wp.vec3(1, 0, 0)
self.basis_vec1 = wp.vec3(0, 0, 1)
self.target = wp.vec3(1000, 0, 0)
self.dt = 1.0 / 60.0
# initialize potentials
self.potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
self.prev_potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
wp.launch(init_potentials, dim=self._num_envs,
inputs=[self.potentials, self.prev_potentials, self.dt], device=self._device)
self.actions = wp.zeros((self.num_envs, self.num_actions), device=self._device, dtype=wp.float32)
self.forces = wp.zeros((self._num_envs, self._robots._num_dof), dtype=wp.float32, device=self._device)
self.dof_pos = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.root_pos = wp.zeros((self.num_envs, 3), device=self._device, dtype=wp.float32)
self.root_rot = wp.zeros((self.num_envs, 4), device=self._device, dtype=wp.float32)
self.root_vel = wp.zeros((self.num_envs, 6), device=self._device, dtype=wp.float32)
# randomize all env
self.reset_idx()
def calculate_metrics(self) -> None:
dof_at_limit_cost = self.get_dof_at_limit_cost()
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.rew_buf, self.obs_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials,
self.actions_cost_scale, self.energy_cost_scale, self.termination_height,
self.death_cost, self._robots.num_dof, dof_at_limit_cost, self.alive_reward_scale, self.motor_effort_ratio],
device=self._device
)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length],
device=self._device
)
#####################################################################
###==========================warp kernels=========================###
#####################################################################
@wp.kernel
def init_potentials(potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
dt: float):
i = wp.tid()
potentials[i] = -1000.0 / dt
prev_potentials[i] = -1000.0 / dt
@wp.kernel
def reset_idx(root_pos: wp.array(dtype=wp.float32, ndim=2),
root_rot: wp.array(dtype=wp.float32, ndim=2),
initial_root_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
initial_root_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
target: wp.vec3,
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i = wp.tid()
idx = indices[i]
# reset root states
for j in range(3):
root_pos[idx, j] = initial_root_pos[idx, j]
for j in range(4):
root_rot[idx, j] = initial_root_rot[idx, j]
# reset potentials
to_target = target - wp.vec3(initial_root_pos[idx, 0] - env_pos[idx, 0], initial_root_pos[idx, 1] - env_pos[idx, 1], target[2])
prev_potentials[idx] = -wp.length(to_target) / dt
potentials[idx] = -wp.length(to_target) / dt
temp = potentials[idx] - prev_potentials[idx]
# bookkeeping
reset_buf[idx] = 0
progress_buf[idx] = 0
@wp.kernel
def reset_dofs(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
initial_dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i, j = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i * j + j)
# randomize DOF positions and velocities
dof_pos[idx, j] = wp.clamp(wp.randf(rand_state, -0.2, 0.2) + initial_dof_pos[idx, j], dof_limits_lower[j], dof_limits_upper[j])
dof_vel[idx, j] = wp.randf(rand_state, -0.1, 0.1)
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
joint_gears: wp.array(dtype=wp.float32),
power_scale: float):
i, j = wp.tid()
forces[i, j] = actions[i, j] * joint_gears[j] * power_scale
@wp.func
def get_euler_xyz(q: wp.quat):
qx = 0
qy = 1
qz = 2
qw = 3
# roll (x-axis rotation)
sinr_cosp = 2.0 * (q[qw] * q[qx] + q[qy] * q[qz])
cosr_cosp = q[qw] * q[qw] - q[qx] * q[qx] - q[qy] * q[qy] + q[qz] * q[qz]
roll = wp.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (q[qw] * q[qy] - q[qz] * q[qx])
if wp.abs(sinp) >= 1:
pitch = warp_utils.PI / 2.0 * (wp.abs(sinp)/sinp)
else:
pitch = wp.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2.0 * (q[qw] * q[qz] + q[qx] * q[qy])
cosy_cosp = q[qw] * q[qw] + q[qx] * q[qx] - q[qy] * q[qy] - q[qz] * q[qz]
yaw = wp.atan2(siny_cosp, cosy_cosp)
rpy = wp.vec3(roll % (2.0 * warp_utils.PI), pitch % (2.0 * warp_utils.PI), yaw % (2.0 * warp_utils.PI))
return rpy
@wp.func
def compute_up_vec(torso_rotation: wp.quat, vec1: wp.vec3):
up_vec = wp.quat_rotate(torso_rotation, vec1)
return up_vec
@wp.func
def compute_heading_vec(torso_rotation: wp.quat, vec0: wp.vec3):
heading_vec = wp.quat_rotate(torso_rotation, vec0)
return heading_vec
@wp.func
def unscale(x:float, lower:float, upper:float):
return (2.0 * x - upper - lower) / (upper - lower)
@wp.func
def normalize_angle(x: float):
return wp.atan2(wp.sin(x), wp.cos(x))
@wp.kernel
def get_observations(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
torso_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
torso_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
velocity: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
target: wp.vec3,
basis_vec0: wp.vec3,
basis_vec1: wp.vec3,
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_vel_scale: float,
sensor_force_torques: wp.indexedarray(dtype=wp.float32, ndim=3),
contact_force_scale: float,
actions: wp.array(dtype=wp.float32, ndim=2),
angular_velocity_scale: float,
num_dofs: int,
num_sensors: int,
sensor_indices: wp.array(dtype=wp.int32)
):
i = wp.tid()
torso_position_x = torso_pos[i, 0] - env_pos[i, 0]
torso_position_y = torso_pos[i, 1] - env_pos[i, 1]
torso_position_z = torso_pos[i, 2] - env_pos[i, 2]
to_target = target - wp.vec3(torso_position_x, torso_position_y, target[2])
prev_potentials[i] = potentials[i]
potentials[i] = -wp.length(to_target) / dt
temp = potentials[i] - prev_potentials[i]
torso_quat = wp.quat(torso_rot[i, 1], torso_rot[i, 2], torso_rot[i, 3], torso_rot[i, 0])
up_vec = compute_up_vec(torso_quat, basis_vec1)
up_proj = up_vec[2]
heading_vec = compute_heading_vec(torso_quat, basis_vec0)
target_dir = wp.normalize(to_target)
heading_proj = wp.dot(heading_vec, target_dir)
lin_velocity = wp.vec3(velocity[i, 0], velocity[i, 1], velocity[i, 2])
ang_velocity = wp.vec3(velocity[i, 3], velocity[i, 4], velocity[i, 5])
rpy = get_euler_xyz(torso_quat)
vel_loc = wp.quat_rotate_inv(torso_quat, lin_velocity)
angvel_loc = wp.quat_rotate_inv(torso_quat, ang_velocity)
walk_target_angle = wp.atan2(target[2] - torso_position_z, target[0] - torso_position_x)
angle_to_target = walk_target_angle - rpy[2] # yaw
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs_offset = 0
obs_buf[i, 0] = torso_position_z
obs_offset = obs_offset + 1
for j in range(3):
obs_buf[i, j+obs_offset] = vel_loc[j]
obs_offset = obs_offset + 3
for j in range(3):
obs_buf[i, j+obs_offset] = angvel_loc[j] * angular_velocity_scale
obs_offset = obs_offset + 3
obs_buf[i, obs_offset+0] = normalize_angle(rpy[2])
obs_buf[i, obs_offset+1] = normalize_angle(rpy[0])
obs_buf[i, obs_offset+2] = normalize_angle(angle_to_target)
obs_buf[i, obs_offset+3] = up_proj
obs_buf[i, obs_offset+4] = heading_proj
obs_offset = obs_offset + 5
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = unscale(dof_pos[i, j], dof_limits_lower[j], dof_limits_upper[j])
obs_offset = obs_offset + num_dofs
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = dof_vel[i, j] * dof_vel_scale
obs_offset = obs_offset + num_dofs
for j in range(num_sensors):
sensor_idx = sensor_indices[j]
for k in range(6):
obs_buf[i, obs_offset+j*6+k] = sensor_force_torques[i, sensor_idx, k] * contact_force_scale
obs_offset = obs_offset + (num_sensors * 6)
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = actions[i, j]
@wp.kernel
def is_done(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
termination_height: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
max_episode_length: int
):
i = wp.tid()
if obs_buf[i, 0] < termination_height or progress_buf[i] >= max_episode_length - 1:
reset_buf[i] = 1
else:
reset_buf[i] = 0
@wp.kernel
def calculate_metrics(
rew_buf: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
up_weight: float,
heading_weight: float,
potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
actions_cost_scale: float,
energy_cost_scale: float,
termination_height: float,
death_cost: float,
num_dof: int,
dof_at_limit_cost: wp.array(dtype=wp.float32),
alive_reward_scale: float,
motor_effort_ratio: wp.array(dtype=wp.float32)
):
i = wp.tid()
# heading reward
if obs_buf[i, 11] > 0.8:
heading_reward = heading_weight
else:
heading_reward = heading_weight * obs_buf[i, 11] / 0.8
# aligning up axis of robot and environment
up_reward = 0.0
if obs_buf[i, 10] > 0.93:
up_reward = up_weight
# energy penalty for movement
actions_cost = float(0.0)
electricity_cost = float(0.0)
for j in range(num_dof):
actions_cost = actions_cost + (actions[i, j] * actions[i, j])
electricity_cost = electricity_cost + (wp.abs(actions[i, j] * obs_buf[i, 12+num_dof+j]) * motor_effort_ratio[j])
# reward for duration of staying alive
progress_reward = potentials[i] - prev_potentials[i]
total_reward = (
progress_reward
+ alive_reward_scale
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost[i]
)
# adjust reward for fallen agents
if obs_buf[i, 0] < termination_height:
total_reward = death_cost
rew_buf[i] = total_reward
| 18,233 | Python | 39.52 | 147 | 0.624198 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/base/rl_task.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from abc import abstractmethod
import numpy as np
import omni.isaac.core.utils.warp.tensor as wp_utils
import omni.kit
import omni.usd
import torch
import warp as wp
from gym import spaces
from omni.isaac.cloner import GridCloner
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer
from pxr import Gf, UsdGeom, UsdLux
class RLTask(RLTaskInterface):
"""This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
"""Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
BaseTask.__init__(self, name=name, offset=offset)
self._rand_seed = self._cfg["seed"]
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
# set up randomizer for DR
self._dr_randomizer = Randomizer(self._cfg, self._task_cfg)
if self._dr_randomizer.randomize:
import omni.replicator.isaac as dr
self.dr = dr
# set up replicator for camera data collection
if self._task_cfg["sim"].get("enable_cameras", False):
from omni.replicator.isaac.scripts.writers.pytorch_writer import PytorchWriter
from omni.replicator.isaac.scripts.writers.pytorch_listener import PytorchListener
import omni.replicator.core as rep
self.rep = rep
self.PytorchWriter = PytorchWriter
self.PytorchListener = PytorchListener
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf)
self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
self.rendering_interval = self._task_cfg.get("renderingInterval", 1)
print("RL device: ", self.rl_device)
self._env = env
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(
np.ones(self.num_actions, dtype=np.float32) * -1.0, np.ones(self.num_actions, dtype=np.float32) * 1.0
)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(
np.ones(self.num_observations, dtype=np.float32) * -np.Inf,
np.ones(self.num_observations, dtype=np.float32) * np.Inf,
)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(
np.ones(self.num_states, dtype=np.float32) * -np.Inf,
np.ones(self.num_states, dtype=np.float32) * np.Inf,
)
self.cleanup()
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(
self, scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True, copy_from_source=False
) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance.
collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked.
filter_collisions (bool): Mask off collision between environments.
copy_from_source (bool): Copy from source prim when cloning instead of inheriting.
"""
super().set_up_scene(scene)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
stage = omni.usd.get_context().get_stage()
UsdGeom.Xform.Define(stage, self.default_zero_env_path)
if self._task_cfg["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source
)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
if filter_collisions:
self._cloner.filter_collisions(
self._env._world.get_physics_context().prim_path,
"/World/collisions",
prim_paths,
collision_filter_global_paths,
)
if self._env._render:
self.set_initial_camera_params(camera_position=[10, 10, 3], camera_target=[0, 0, 0])
if self._task_cfg["sim"].get("add_distant_light", True):
self._create_distant_light()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
from omni.kit.viewport.utility import get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True)
camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True)
def _create_distant_light(self, prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.CreateIntensityAttr().Set(intensity)
def initialize_views(self, scene):
"""Optionally implemented by individual task classes to initialize views used in the task.
This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage.
Args:
scene (Scene): Scene to remove existing views and initialize/add new views.
"""
self._cloner = GridCloner(spacing=self._env_spacing)
pos, _ = self._cloner.get_clone_transforms(self._num_envs)
self._env_pos = torch.tensor(np.array(pos), device=self._device, dtype=torch.float)
@property
def default_base_env_path(self):
"""Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
"""Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
def reset(self):
"""Flags all environments for reset."""
self.reset_buf = torch.ones_like(self.reset_buf)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
class RLTaskWarp(RLTask):
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = wp.zeros((self._num_envs, self.num_observations), device=self._device, dtype=wp.float32)
self.states_buf = wp.zeros((self._num_envs, self.num_states), device=self._device, dtype=wp.float32)
self.rew_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.float32)
self.reset_buf = wp_utils.ones(self._num_envs, device=self._device, dtype=wp.int32)
self.progress_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.int32)
self.zero_states_buf_torch = torch.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=torch.float32
)
self.extras = {}
def reset(self):
"""Flags all environments for reset."""
wp.launch(reset_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
wp.launch(increment_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
obs_buf_torch = wp.to_torch(self.obs_buf)
rew_buf_torch = wp.to_torch(self.rew_buf)
reset_buf_torch = wp.to_torch(self.reset_buf)
return obs_buf_torch, rew_buf_torch, reset_buf_torch, self.extras
def get_states(self):
"""API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
if self.num_states > 0:
return wp.to_torch(self.states_buf)
else:
return self.zero_states_buf_torch
def set_up_scene(self, scene) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
"""
super().set_up_scene(scene)
self._env_pos = wp.from_torch(self._env_pos)
@wp.kernel
def increment_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = progress_buf[i] + 1
@wp.kernel
def reset_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = 1
| 14,224 | Python | 41.717718 | 143 | 0.653121 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: base class.
Inherits Gym's RLTask class and abstract base class. Inherited by environment classes. Not directly executed.
Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml.
"""
import carb
import hydra
import math
import numpy as np
import torch
from omni.isaac.core.objects import FixedCuboid
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.factory_franka import FactoryFranka
from pxr import PhysxSchema, UsdPhysics
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from omniisaacgymenvs.tasks.factory.factory_schema_config_base import (
FactorySchemaConfigBase,
)
class FactoryBase(RLTask, FactoryABCBase):
def __init__(self, name, sim_config, env) -> None:
"""Initialize instance variables. Initialize RLTask superclass."""
# Set instance variables from base YAML
self._get_base_yaml_params()
self._env_spacing = self.cfg_base.env.env_spacing
# Set instance variables from task and train YAMLs
self._sim_config = sim_config
self._cfg = sim_config.config # CL args, task config, and train config
self._task_cfg = sim_config.task_config # just task config
self._num_envs = sim_config.task_config["env"]["numEnvs"]
self._num_observations = sim_config.task_config["env"]["numObservations"]
self._num_actions = sim_config.task_config["env"]["numActions"]
super().__init__(name, env)
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase)
config_path = (
"task/FactoryBase.yaml" # relative to Gym's Hydra search path (cfg dir)
)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][
"tasks"
]["factory"][
"yaml"
] # strip superfluous nesting
def import_franka_assets(self, add_to_stage=True):
"""Set Franka and table asset options. Import assets."""
self._stage = get_current_stage()
if add_to_stage:
franka_translation = np.array([self.cfg_base.env.franka_depth, 0.0, 0.0])
franka_orientation = np.array([0.0, 0.0, 0.0, 1.0])
franka = FactoryFranka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
translation=franka_translation,
orientation=franka_orientation,
)
self._sim_config.apply_articulation_settings(
"franka",
get_prim_at_path(franka.prim_path),
self._sim_config.parse_actor_config("franka"),
)
for link_prim in franka.prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(
self._stage, link_prim.GetPrimPath()
)
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(False)
if self.cfg_base.sim.add_damping:
rb.GetLinearDampingAttr().Set(
1.0
) # default = 0.0; increased to improve stability
rb.GetMaxLinearVelocityAttr().Set(
1.0
) # default = 1000.0; reduced to prevent CUDA errors
rb.GetAngularDampingAttr().Set(
5.0
) # default = 0.5; increased to improve stability
rb.GetMaxAngularVelocityAttr().Set(
2 / math.pi * 180
) # default = 64.0; reduced to prevent CUDA errors
else:
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.5)
rb.GetMaxAngularVelocityAttr().Set(64 / math.pi * 180)
table_translation = np.array(
[0.0, 0.0, self.cfg_base.env.table_height * 0.5]
)
table_orientation = np.array([1.0, 0.0, 0.0, 0.0])
table = FixedCuboid(
prim_path=self.default_zero_env_path + "/table",
name="table",
translation=table_translation,
orientation=table_orientation,
scale=np.array(
[
self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width,
self.cfg_base.env.table_height,
]
),
size=1.0,
color=np.array([0, 0, 0]),
)
self.parse_controller_spec(add_to_stage=add_to_stage)
def acquire_base_tensors(self):
"""Acquire tensors."""
self.num_dofs = 9
self.env_pos = self._env_pos
self.dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_torque = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_dof_pos = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.ctrl_target_gripper_dof_pos = torch.zeros(
(self.num_envs, 2), device=self.device
)
self.ctrl_target_fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.prev_actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def refresh_base_tensors(self):
"""Refresh tensors."""
if not self._env._world.is_playing():
return
self.dof_pos = self.frankas.get_joint_positions(clone=False)
self.dof_vel = self.frankas.get_joint_velocities(clone=False)
# Jacobian shape: [4, 11, 6, 9] (root has no Jacobian)
self.franka_jacobian = self.frankas.get_jacobians()
self.franka_mass_matrix = self.frankas.get_mass_matrices(clone=False)
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_mass_matrix = self.franka_mass_matrix[
:, 0:7, 0:7
] # for Franka arm (not gripper)
self.hand_pos, self.hand_quat = self.frankas._hands.get_world_poses(clone=False)
self.hand_pos -= self.env_pos
hand_velocities = self.frankas._hands.get_velocities(clone=False)
self.hand_linvel = hand_velocities[:, 0:3]
self.hand_angvel = hand_velocities[:, 3:6]
(
self.left_finger_pos,
self.left_finger_quat,
) = self.frankas._lfingers.get_world_poses(clone=False)
self.left_finger_pos -= self.env_pos
left_finger_velocities = self.frankas._lfingers.get_velocities(clone=False)
self.left_finger_linvel = left_finger_velocities[:, 0:3]
self.left_finger_angvel = left_finger_velocities[:, 3:6]
self.left_finger_jacobian = self.franka_jacobian[:, 8, 0:6, 0:7]
left_finger_forces = self.frankas._lfingers.get_net_contact_forces(clone=False)
self.left_finger_force = left_finger_forces[:, 0:3]
(
self.right_finger_pos,
self.right_finger_quat,
) = self.frankas._rfingers.get_world_poses(clone=False)
self.right_finger_pos -= self.env_pos
right_finger_velocities = self.frankas._rfingers.get_velocities(clone=False)
self.right_finger_linvel = right_finger_velocities[:, 0:3]
self.right_finger_angvel = right_finger_velocities[:, 3:6]
self.right_finger_jacobian = self.franka_jacobian[:, 9, 0:6, 0:7]
right_finger_forces = self.frankas._rfingers.get_net_contact_forces(clone=False)
self.right_finger_force = right_finger_forces[:, 0:3]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
(
self.fingertip_centered_pos,
self.fingertip_centered_quat,
) = self.frankas._fingertip_centered.get_world_poses(clone=False)
self.fingertip_centered_pos -= self.env_pos
fingertip_centered_velocities = self.frankas._fingertip_centered.get_velocities(
clone=False
)
self.fingertip_centered_linvel = fingertip_centered_velocities[:, 0:3]
self.fingertip_centered_angvel = fingertip_centered_velocities[:, 3:6]
self.fingertip_centered_jacobian = self.franka_jacobian[:, 10, 0:6, 0:7]
self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) / 2
self.fingertip_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length,
device=self.device,
)
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
# TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf)
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(
self.fingertip_centered_angvel,
(self.fingertip_midpoint_pos - self.fingertip_centered_pos),
dim=1,
)
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (
self.left_finger_jacobian + self.right_finger_jacobian
) * 0.5
def parse_controller_spec(self, add_to_stage):
"""Parse controller specification into lower-level controller configuration."""
cfg_ctrl_keys = {
"num_envs",
"jacobian_type",
"gripper_prop_gains",
"gripper_deriv_gains",
"motor_ctrl_mode",
"gain_space",
"ik_method",
"joint_prop_gains",
"joint_deriv_gains",
"do_motion_ctrl",
"task_prop_gains",
"task_deriv_gains",
"do_inertial_comp",
"motion_ctrl_axes",
"do_force_ctrl",
"force_ctrl_method",
"wrench_prop_gains",
"force_ctrl_axes",
}
self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys}
self.cfg_ctrl["num_envs"] = self.num_envs
self.cfg_ctrl["jacobian_type"] = self.cfg_task.ctrl.all.jacobian_type
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
ctrl_type = self.cfg_task.ctrl.ctrl_type
if ctrl_type == "gym_default":
self.cfg_ctrl["motor_ctrl_mode"] = "gym"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.gym_default.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "joint_space_ik":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_ik.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
elif ctrl_type == "joint_space_id":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_id.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
elif ctrl_type == "task_space_impedance":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "operational_space_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "open_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "open"
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "closed_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "hybrid_force_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
if add_to_stage:
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "angular")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["joint_prop_gains"][0, i].item() * np.pi / 180
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["joint_deriv_gains"][0, i].item() * np.pi / 180
)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "linear")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "angular")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl["jacobian_type"] == "geometric":
self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian
elif self.cfg_ctrl["jacobian_type"] == "analytic":
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_midpoint_jacobian,
num_envs=self.num_envs,
device=self.device,
)
# Set PD joint pos target or joint torque
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
self._set_dof_pos_target()
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device,
)
self.frankas.set_joint_position_targets(positions=self.ctrl_target_dof_pos)
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
fingertip_midpoint_linvel=self.fingertip_midpoint_linvel,
fingertip_midpoint_angvel=self.fingertip_midpoint_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device,
)
self.frankas.set_joint_efforts(efforts=self.dof_torque)
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
gravity = [0.0, 0.0, -gravity_mag]
self._env._world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
def disable_gravity(self):
"""Disable gravity."""
gravity = [0.0, 0.0, 0.0]
self._env._world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
| 26,838 | Python | 45.921329 | 148 | 0.588419 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for task class configurations.
Used by Hydra. Defines template for task class YAML files. Not enforced.
"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Sim:
use_gpu_pipeline: bool # use GPU pipeline
dt: float # timestep size
gravity: list[float] # gravity vector
@dataclass
class Env:
numObservations: int # number of observations per env; camel case required by VecTask
numActions: int # number of actions per env; camel case required by VecTask
numEnvs: int # number of envs; camel case required by VecTask
@dataclass
class Randomize:
franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7)
@dataclass
class RL:
pos_action_scale: list[
float
] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m
rot_action_scale: list[
float
] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad
force_action_scale: list[
float
] # scale on force targets (3), to convert [-1, 1] to +- x N
torque_action_scale: list[
float
] # scale on torque targets (3), to convert [-1, 1] to +- x Nm
clamp_rot: bool # clamp small values of rotation actions to zero
clamp_rot_thresh: float # smallest acceptable value
max_episode_length: int # max number of timesteps in each episode
@dataclass
class All:
jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic}
gripper_prop_gains: list[
float
] # proportional gains on left and right Franka gripper finger DOF position (2)
gripper_deriv_gains: list[
float
] # derivative gains on left and right Franka gripper finger DOF position (2)
@dataclass
class GymDefault:
joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7)
joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7)
@dataclass
class JointSpaceIK:
ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd}
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class JointSpaceID:
ik_method: str
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class TaskSpaceImpedance:
motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6)
task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6)
task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6)
@dataclass
class OperationalSpaceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
@dataclass
class OpenLoopForce:
force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6)
@dataclass
class ClosedLoopForce:
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float] # proportional gains on Franka finger force (6)
@dataclass
class HybridForceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float]
@dataclass
class Ctrl:
ctrl_type: str # {gym_default,
# joint_space_ik,
# joint_space_id,
# task_space_impedance,
# operational_space_motion,
# open_loop_force,
# closed_loop_force,
# hybrid_force_motion}
gym_default: GymDefault
joint_space_ik: JointSpaceIK
joint_space_id: JointSpaceID
task_space_impedance: TaskSpaceImpedance
operational_space_motion: OperationalSpaceMotion
open_loop_force: OpenLoopForce
closed_loop_force: ClosedLoopForce
hybrid_force_motion: HybridForceMotion
@dataclass
class FactorySchemaConfigTask:
name: str
physics_engine: str
sim: Sim
env: Env
rl: RL
ctrl: Ctrl
| 5,517 | Python | 30.895954 | 130 | 0.719413 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt place task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPlace
"""
import asyncio
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
import omni.isaac.core.utils.torch as torch_utils
from omni.isaac.core.utils.torch.transformations import tf_combine
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPlacePPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Nut-bolt tensors
self.nut_base_pos_local = self.bolt_head_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths
self.bolt_tip_pos_local = bolt_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_nut = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut within gripper
self.nut_pos[env_ids, 0] = 0.0
self.nut_pos[env_ids, 1] = 0.0
fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset
nut_base_pos_local = self.bolt_head_heights.squeeze(-1)
self.nut_pos[env_ids, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local
nut_noise_pos_in_gripper = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag(
torch.tensor(
self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device
)
)
self.nut_pos[env_ids, :] += nut_noise_pos_in_gripper[env_ids]
nut_rot_euler = torch.tensor(
[0.0, 0.0, math.pi * 0.5], device=self.device
).repeat(len(env_ids), 1)
nut_noise_rot_in_gripper = 2 * (
torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5
) # [-1, 1]
nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper
nut_rot_euler[:, 2] += nut_noise_rot_in_gripper
nut_rot_quat = torch_utils.quat_from_euler_xyz(
nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2]
)
self.nut_quat[env_ids, :] = nut_rot_quat
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.bolt_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
# Compute pos of keypoints on gripper, nut, and bolt in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_nut[:, idx] = tf_combine(
self.nut_quat,
self.nut_pos,
self.identity_quat,
(keypoint_offset + self.nut_base_pos_local),
)[1]
self.keypoints_bolt[:, idx] = tf_combine(
self.bolt_quat,
self.bolt_pos,
self.identity_quat,
(keypoint_offset + self.bolt_tip_pos_local),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_pos,
self.nut_quat,
self.bolt_pos,
self.bolt_quat,
]
if self.cfg_task.rl.add_obs_bolt_tip_pos:
obs_tensors += [self.bolt_tip_pos_local]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is close enough to bolt
is_nut_close_to_bolt = self._check_nut_close_to_bolt()
self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(is_nut_close_to_bolt.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance between nut and bolt."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
SimulationContext.step(self._env._world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
SimulationContext.step(self._env._world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
SimulationContext.step(self._env._world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _close_gripper(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
SimulationContext.step(self._env._world, render=True)
async def _close_gripper_async(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_nut_close_to_bolt(self) -> torch.Tensor:
"""Check if nut is close to bolt."""
keypoint_dist = torch.norm(
self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1
)
is_nut_close_to_bolt = torch.where(
torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,
torch.ones_like(self.progress_buf),
torch.zeros_like(self.progress_buf),
)
return is_nut_close_to_bolt
| 29,034 | Python | 37.868809 | 131 | 0.594303 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for task classes.
Inherits ABC class. Inherited by task classes. Defines template for task classes.
"""
from abc import ABC, abstractmethod
class FactoryABCTask(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize environment superclass."""
pass
@abstractmethod
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
@abstractmethod
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
@abstractmethod
def pre_physics_step(self):
"""Reset environments. Apply actions from policy as controller targets. Simulation step called after this method."""
pass
@abstractmethod
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
pass
@abstractmethod
def get_observations(self):
"""Compute observations."""
pass
@abstractmethod
def calculate_metrics(self):
"""Detect successes and failures. Update reward and reset buffers."""
pass
@abstractmethod
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
@abstractmethod
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
@abstractmethod
def reset_idx(self):
"""Reset specified environments."""
pass
@abstractmethod
def _reset_franka(self):
"""Reset DOF states and DOF targets of Franka."""
pass
@abstractmethod
def _reset_object(self):
"""Reset root state of object."""
pass
@abstractmethod
def _reset_buffers(self):
"""Reset buffers."""
pass
| 3,492 | Python | 31.342592 | 124 | 0.69559 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_env.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for environment classes.
Inherits ABC class. Inherited by environment classes. Defines template for environment classes.
"""
from abc import ABC, abstractmethod
class FactoryABCEnv(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize base superclass. Acquire tensors."""
pass
@abstractmethod
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def set_up_scene(self):
"""Set env options. Import assets. Create actors."""
pass
@abstractmethod
def _import_env_assets(self):
"""Set asset options. Import assets."""
pass
@abstractmethod
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
| 2,489 | Python | 37.906249 | 95 | 0.73644 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt screw task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltScrew
"""
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltScrewPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
self.reset_idx(indices)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
target_heights = (
self.cfg_base.env.table_height
+ self.bolt_head_heights
+ self.nut_heights * 0.5
)
self.target_pos = target_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def reset_idx(self, env_ids) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max[env_ids] * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max[env_ids] * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root state of nut."""
nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids]
self.nut_pos[env_ids, :] = nut_pos * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat(len(env_ids), 1)
nut_rot = (
self.cfg_task.randomize.nut_rot_initial
* torch.ones((len(env_ids), 1), device=self.device)
* math.pi
/ 180.0
)
self.nut_quat[env_ids, :] = torch.cat(
(
torch.cos(nut_rot * 0.5),
torch.zeros((len(env_ids), 1), device=self.device),
torch.zeros((len(env_ids), 1), device=self.device),
torch.sin(nut_rot * 0.5),
),
dim=-1,
)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if self.cfg_task.rl.unidirectional_pos:
pos_actions[:, 2] = -(pos_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if self.cfg_task.rl.unidirectional_rot:
rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if self.cfg_task.rl.unidirectional_force:
force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
self.fingerpad_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length
- self.asset_info_franka_table.franka_fingerpad_length * 0.5,
device=self.device,
)
self.finger_nut_keypoint_dist = self._get_keypoint_dist(body="finger_nut")
self.nut_keypoint_dist = self._get_keypoint_dist(body="nut")
self.nut_dist_to_target = torch.norm(
self.target_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and target
self.nut_dist_to_fingerpads = torch.norm(
self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and midpoint between centers of fingerpads
self.was_success = torch.zeros_like(self.progress_buf, dtype=torch.bool)
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_com_pos,
self.nut_com_quat,
self.nut_com_linvel,
self.nut_com_angvel,
]
if self.cfg_task.rl.add_obs_finger_force:
obs_tensors += [self.left_finger_force, self.right_finger_force]
else:
obs_tensors += [
torch.zeros_like(self.left_finger_force),
torch.zeros_like(self.right_finger_force),
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
# Get successful and failed envs at current timestep
curr_successes = self._get_curr_successes()
curr_failures = self._get_curr_failures(curr_successes)
self._update_reset_buf(curr_successes, curr_failures)
self._update_rew_buf(curr_successes)
if torch.any(self.is_expired):
self.extras["successes"] = torch.mean(curr_successes.float())
def _update_reset_buf(self, curr_successes, curr_failures) -> None:
"""Assign environments for reset if successful or failed."""
self.reset_buf[:] = self.is_expired
def _update_rew_buf(self, curr_successes) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist)
action_penalty = torch.norm(self.actions, p=2, dim=-1)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
+ curr_successes * self.cfg_task.rl.success_bonus
)
def _get_keypoint_dist(self, body) -> torch.Tensor:
"""Get keypoint distance."""
axis_length = (
self.asset_info_franka_table.franka_hand_length
+ self.asset_info_franka_table.franka_finger_length
)
if body == "finger" or body == "nut":
# Keypoint distance between finger/nut and target
if body == "finger":
self.keypoint1 = self.fingertip_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
elif body == "nut":
self.keypoint1 = self.nut_com_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint1_targ = self.target_pos
self.keypoint2_targ = self.keypoint1_targ + torch.tensor(
[0.0, 0.0, axis_length], device=self.device
)
elif body == "finger_nut":
# Keypoint distance between finger and nut
self.keypoint1 = self.fingerpad_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
self.keypoint1_targ = self.nut_com_pos
self.keypoint2_targ = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0
self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0
self.keypoint3_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0
)
self.keypoint4_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0
)
keypoint_dist = (
torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1)
+ torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1)
+ torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1)
+ torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1)
)
return keypoint_dist
def _get_curr_successes(self) -> torch.Tensor:
"""Get success mask at current timestep."""
curr_successes = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If nut is close enough to target pos
is_close = torch.where(
self.nut_dist_to_target < self.thread_pitches.squeeze(-1) * 5,
torch.ones_like(curr_successes),
torch.zeros_like(curr_successes),
)
curr_successes = torch.logical_or(curr_successes, is_close)
return curr_successes
def _get_curr_failures(self, curr_successes) -> torch.Tensor:
"""Get failure mask at current timestep."""
curr_failures = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If max episode length has been reached
self.is_expired = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut is too far from target pos
self.is_far = torch.where(
self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut has slipped (distance-based definition)
self.is_slipped = torch.where(
self.nut_dist_to_fingerpads
> self.asset_info_franka_table.franka_fingerpad_length * 0.5
+ self.nut_heights.squeeze(-1) * 0.5,
torch.ones_like(curr_failures),
curr_failures,
)
self.is_slipped = torch.logical_and(
self.is_slipped, torch.logical_not(curr_successes)
) # ignore slip if successful
# If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt)
self.is_fallen = torch.logical_and(
torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1)
> self.bolt_widths.squeeze(-1) * 0.5,
self.nut_com_pos[:, 2]
< self.cfg_base.env.table_height
+ self.bolt_head_heights.squeeze(-1)
+ self.bolt_shank_lengths.squeeze(-1)
+ self.nut_heights.squeeze(-1) * 0.5,
)
curr_failures = torch.logical_or(curr_failures, self.is_expired)
curr_failures = torch.logical_or(curr_failures, self.is_far)
curr_failures = torch.logical_or(curr_failures, self.is_slipped)
curr_failures = torch.logical_or(curr_failures, self.is_fallen)
return curr_failures
| 20,039 | Python | 37.390805 | 131 | 0.589051 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt pick task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPick
"""
import asyncio
import hydra
import omegaconf
import torch
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.torch.transformations import tf_combine
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPickPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Grasp pose tensors
nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM
self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.nut_grasp_quat_local = (
torch.tensor([0.0, 0.0, 1.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_gripper = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_nut = torch.zeros_like(
self.keypoints_gripper, device=self.device
)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut
nut_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_xy = nut_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_pos_xy_noise, device=self.device)
)
self.nut_pos[env_ids, 0] = (
self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[env_ids, 0]
)
self.nut_pos[env_ids, 1] = (
self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[env_ids, 1]
)
self.nut_pos[
env_ids, 2
] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1)
self.nut_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if self.cfg_task.env.close_and_lift:
self._close_gripper(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self._lift_gripper(
franka_gripper_width=0.0,
lift_distance=0.3,
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps,
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
async def post_physics_step_async(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if self.cfg_task.env.close_and_lift:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if is_last_step:
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
await self._lift_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of nut grasping frame
self.nut_grasp_quat, self.nut_grasp_pos = tf_combine(
self.nut_quat,
self.nut_pos,
self.nut_grasp_quat_local,
self.nut_grasp_pos_local,
)
# Compute pos of keypoints on gripper and nut in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gripper[:, idx] = tf_combine(
self.fingertip_midpoint_quat,
self.fingertip_midpoint_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_nut[:, idx] = tf_combine(
self.nut_grasp_quat,
self.nut_grasp_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_grasp_pos,
self.nut_grasp_quat,
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_multiple=3.0)
self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(lift_success.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _close_gripper(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
SimulationContext.step(self._env._world, render=True)
def _lift_gripper(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
SimulationContext.step(self._env._world, render=True)
async def _close_gripper_async(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps=20
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
async def _lift_gripper_async(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_lift_success(self, height_multiple) -> torch.Tensor:
"""Check if nut is above table by more than specified multiple times height of nut."""
lift_success = torch.where(
self.nut_pos[:, 2]
> self.cfg_base.env.table_height
+ self.nut_heights.squeeze(-1) * height_multiple,
torch.ones((self.num_envs,), device=self.device),
torch.zeros((self.num_envs,), device=self.device),
)
return lift_success
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
SimulationContext.step(self._env._world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
if not self._env._world.is_playing():
return
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
SimulationContext.step(self._env._world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
SimulationContext.step(self._env._world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
| 31,568 | Python | 37.926017 | 131 | 0.589268 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for base class configuration.
Used by Hydra. Defines template for base class YAML file.
"""
from dataclasses import dataclass
@dataclass
class Mode:
export_scene: bool # export scene to USD
export_states: bool # export states to NPY
@dataclass
class Sim:
dt: float # timestep size (default = 1.0 / 60.0)
num_substeps: int # number of substeps (default = 2)
num_pos_iters: int # number of position iterations for PhysX TGS solver (default = 4)
num_vel_iters: int # number of velocity iterations for PhysX TGS solver (default = 1)
gravity_mag: float # magnitude of gravitational acceleration
add_damping: bool # add damping to stabilize gripper-object interactions
@dataclass
class Env:
env_spacing: float # lateral offset between envs
franka_depth: float # depth offset of Franka base relative to env origin
table_height: float # height of table
franka_friction: float # coefficient of friction associated with Franka
table_friction: float # coefficient of friction associated with table
@dataclass
class FactorySchemaConfigBase:
mode: Mode
sim: Sim
env: Env
| 2,724 | Python | 39.073529 | 90 | 0.757342 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_env_nut_bolt.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for nut-bolt env.
Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed.
Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml.
"""
import hydra
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omni.physx.scripts import physicsUtils, utils
from omniisaacgymenvs.robots.articulations.views.factory_franka_view import (
FactoryFrankaView,
)
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_base import FactoryBase
from omniisaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from omniisaacgymenvs.tasks.factory.factory_schema_config_env import (
FactorySchemaConfigEnv,
)
class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv):
def __init__(self, name, sim_config, env) -> None:
"""Initialize base superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_env_yaml_params()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv)
config_path = (
"task/FactoryEnvNutBolt.yaml" # relative to Hydra search path (cfg dir)
)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml"
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._num_observations = self._task_cfg["env"]["numObservations"]
self._num_actions = self._task_cfg["env"]["numActions"]
self._env_spacing = self.cfg_base["env"]["env_spacing"]
self._get_env_yaml_params()
def set_up_scene(self, scene) -> None:
"""Import assets. Add to scene."""
# Increase buffer size to prevent overflow for Place and Screw tasks
physxSceneAPI = self._env._world.get_physics_context()._physx_scene_api
physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(256 * 1024 * 1024)
self.import_franka_assets(add_to_stage=True)
self.create_nut_bolt_material()
RLTask.set_up_scene(self, scene, replicate_physics=False)
self._import_env_assets(add_to_stage=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*",
name="nuts_view",
track_contact_forces=True,
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*",
name="bolts_view",
track_contact_forces=True,
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
return
def initialize_views(self, scene) -> None:
"""Initialize views for extension workflow."""
super().initialize_views(scene)
self.import_franka_assets(add_to_stage=False)
self._import_env_assets(add_to_stage=False)
if scene.object_exists("frankas_view"):
scene.remove_object("frankas_view", registry_only=True)
if scene.object_exists("nuts_view"):
scene.remove_object("nuts_view", registry_only=True)
if scene.object_exists("bolts_view"):
scene.remove_object("bolts_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("fingertips_view"):
scene.remove_object("fingertips_view", registry_only=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*", name="nuts_view"
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*", name="bolts_view"
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
def create_nut_bolt_material(self):
"""Define nut and bolt material."""
self.nutboltPhysicsMaterialPath = "/World/Physics_Materials/NutBoltMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.nutboltPhysicsMaterialPath,
density=self.cfg_env.env.nut_bolt_density,
staticFriction=self.cfg_env.env.nut_bolt_friction,
dynamicFriction=self.cfg_env.env.nut_bolt_friction,
restitution=0.0,
)
def _import_env_assets(self, add_to_stage=True):
"""Set nut and bolt asset options. Import assets."""
self.nut_heights = []
self.nut_widths_max = []
self.bolt_widths = []
self.bolt_head_heights = []
self.bolt_shank_lengths = []
self.thread_pitches = []
assets_root_path = get_assets_root_path()
for i in range(0, self._num_envs):
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_nut_bolt[subassembly])
nut_translation = torch.tensor(
[
0.0,
self.cfg_env.env.nut_lateral_offset,
self.cfg_base.env.table_height,
],
device=self._device,
)
nut_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
nut_height = self.asset_info_nut_bolt[subassembly][components[0]]["height"]
nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]][
"width_max"
]
self.nut_heights.append(nut_height)
self.nut_widths_max.append(nut_width_max)
nut_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[0]]["usd_path"]
)
if add_to_stage:
add_reference_to_stage(nut_file, f"/World/envs/env_{i}" + "/nut")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/nut",
translation=nut_translation,
orientation=nut_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/nut/factory_{components[0]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/nut/factory_{components[0]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"nut",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/nut"),
self._sim_config.parse_actor_config("nut"),
)
bolt_translation = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self._device
)
bolt_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]["width"]
bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]][
"head_height"
]
bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]][
"shank_length"
]
self.bolt_widths.append(bolt_width)
self.bolt_head_heights.append(bolt_head_height)
self.bolt_shank_lengths.append(bolt_shank_length)
if add_to_stage:
bolt_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[1]]["usd_path"]
)
add_reference_to_stage(bolt_file, f"/World/envs/env_{i}" + "/bolt")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/bolt",
translation=bolt_translation,
orientation=bolt_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/bolt/factory_{components[1]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/bolt/factory_{components[1]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"bolt",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/bolt"),
self._sim_config.parse_actor_config("bolt"),
)
thread_pitch = self.asset_info_nut_bolt[subassembly]["thread_pitch"]
self.thread_pitches.append(thread_pitch)
# For computing body COM pos
self.nut_heights = torch.tensor(
self.nut_heights, device=self._device
).unsqueeze(-1)
self.bolt_head_heights = torch.tensor(
self.bolt_head_heights, device=self._device
).unsqueeze(-1)
# For setting initial state
self.nut_widths_max = torch.tensor(
self.nut_widths_max, device=self._device
).unsqueeze(-1)
self.bolt_shank_lengths = torch.tensor(
self.bolt_shank_lengths, device=self._device
).unsqueeze(-1)
# For defining success or failure
self.bolt_widths = torch.tensor(
self.bolt_widths, device=self._device
).unsqueeze(-1)
self.thread_pitches = torch.tensor(
self.thread_pitches, device=self._device
).unsqueeze(-1)
def refresh_env_tensors(self):
"""Refresh tensors."""
# Nut tensors
self.nut_pos, self.nut_quat = self.nuts.get_world_poses(clone=False)
self.nut_pos -= self.env_pos
self.nut_com_pos = fc.translate_along_local_z(
pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device,
)
self.nut_com_quat = self.nut_quat # always equal
nut_velocities = self.nuts.get_velocities(clone=False)
self.nut_linvel = nut_velocities[:, 0:3]
self.nut_angvel = nut_velocities[:, 3:6]
self.nut_com_linvel = self.nut_linvel + torch.cross(
self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1
)
self.nut_com_angvel = self.nut_angvel # always equal
self.nut_force = self.nuts.get_net_contact_forces(clone=False)
# Bolt tensors
self.bolt_pos, self.bolt_quat = self.bolts.get_world_poses(clone=False)
self.bolt_pos -= self.env_pos
self.bolt_force = self.bolts.get_net_contact_forces(clone=False)
| 14,709 | Python | 39.30137 | 110 | 0.603372 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_control.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: control module.
Imported by base, environment, and task classes. Not directly executed.
"""
import math
import omni.isaac.core.utils.torch as torch_utils
import torch
def compute_dof_pos_target(
cfg_ctrl,
arm_dof_pos,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
jacobian,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos,
device,
):
"""Compute Franka DOF position target to move fingertips towards target pose."""
ctrl_target_dof_pos = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos
ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints
return ctrl_target_dof_pos
def compute_dof_torque(
cfg_ctrl,
dof_pos,
dof_vel,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
left_finger_force,
right_finger_force,
jacobian,
arm_mass_matrix,
ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench,
device,
):
"""Compute Franka DOF torque to move fingertips towards target pose."""
# References:
# 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# 2) Modern Robotics
dof_torque = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
if cfg_ctrl["gain_space"] == "joint":
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
dof_torque[:, 0:7] = cfg_ctrl[
"joint_prop_gains"
] * delta_arm_dof_pos + cfg_ctrl["joint_deriv_gains"] * (0.0 - dof_vel[:, 0:7])
if cfg_ctrl["do_inertial_comp"]:
# Set tau = M * tau, where M is the joint-space mass matrix
arm_mass_matrix_joint = arm_mass_matrix
dof_torque[:, 0:7] = (
arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)
).squeeze(-1)
elif cfg_ctrl["gain_space"] == "task":
task_wrench = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
if cfg_ctrl["do_motion_ctrl"]:
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98)
task_wrench_motion = _apply_task_space_gains(
delta_fingertip_pose=delta_fingertip_pose,
fingertip_midpoint_linvel=fingertip_midpoint_linvel,
fingertip_midpoint_angvel=fingertip_midpoint_angvel,
task_prop_gains=cfg_ctrl["task_prop_gains"],
task_deriv_gains=cfg_ctrl["task_deriv_gains"],
)
if cfg_ctrl["do_inertial_comp"]:
# Set tau = Lambda * tau, where Lambda is the task-space mass matrix
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
arm_mass_matrix_task = torch.inverse(
jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T
) # ETH eq. 3.86; geometric Jacobian is assumed
task_wrench_motion = (
arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)
).squeeze(-1)
task_wrench = (
task_wrench + cfg_ctrl["motion_ctrl_axes"] * task_wrench_motion
)
if cfg_ctrl["do_force_ctrl"]:
# Set tau = tau + F_t, where F_t is the target contact wrench
task_wrench_force = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
task_wrench_force = (
task_wrench_force + ctrl_target_fingertip_contact_wrench
) # open-loop force control (building towards ETH eq. 3.96-3.98)
if cfg_ctrl["force_ctrl_method"] == "closed":
force_error, torque_error = _get_wrench_error(
left_finger_force=left_finger_force,
right_finger_force=right_finger_force,
ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench,
num_envs=cfg_ctrl["num_envs"],
device=device,
)
# Set tau = tau + k_p * contact_wrench_error
task_wrench_force = task_wrench_force + cfg_ctrl[
"wrench_prop_gains"
] * torch.cat(
(force_error, torque_error), dim=1
) # part of Modern Robotics eq. 11.61
task_wrench = (
task_wrench
+ torch.tensor(cfg_ctrl["force_ctrl_axes"], device=device).unsqueeze(0)
* task_wrench_force
)
# Set tau = J^T * tau, i.e., map tau into joint space as desired
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1)
dof_torque[:, 7:9] = cfg_ctrl["gripper_prop_gains"] * (
ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]
) + cfg_ctrl["gripper_deriv_gains"] * (
0.0 - dof_vel[:, 7:9]
) # gripper finger joints
dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0)
return dof_torque
def get_pose_error(
fingertip_midpoint_pos,
fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
jacobian_type,
rot_error_type,
):
"""Compute task-space error between target Franka fingertip pose and current pose."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# Compute pos error
pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos
# Compute rot error
if (
jacobian_type == "geometric"
): # See example 2.9.8; note use of J_g and transformation between rotation vectors
# Compute quat error (i.e., difference quat)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html
fingertip_midpoint_quat_norm = torch_utils.quat_mul(
fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat)
)[
:, 0
] # scalar component
fingertip_midpoint_quat_inv = torch_utils.quat_conjugate(
fingertip_midpoint_quat
) / fingertip_midpoint_quat_norm.unsqueeze(-1)
quat_error = torch_utils.quat_mul(
ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv
)
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
elif (
jacobian_type == "analytic"
): # See example 2.9.7; note use of J_a and difference of rotation vectors
# Compute axis-angle error
axis_angle_error = axis_angle_from_quat(
ctrl_target_fingertip_midpoint_quat
) - axis_angle_from_quat(fingertip_midpoint_quat)
if rot_error_type == "quat":
return pos_error, quat_error
elif rot_error_type == "axis_angle":
return pos_error, axis_angle_error
def _get_wrench_error(
left_finger_force,
right_finger_force,
ctrl_target_fingertip_contact_wrench,
num_envs,
device,
):
"""Compute task-space error between target Franka fingertip contact wrench and current wrench."""
fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device)
fingertip_contact_wrench[:, 0:3] = (
left_finger_force + right_finger_force
) # net contact force on fingers
# Cols 3 to 6 are all zeros, as we do not have enough information
force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (
-fingertip_contact_wrench[:, 0:3]
)
torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (
-fingertip_contact_wrench[:, 3:6]
)
return force_error, torque_error
def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device):
"""Get delta Franka DOF position from delta pose using specified IK method."""
# References:
# 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
# 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47)
if ik_method == "pinv": # Jacobian pseudoinverse
k_val = 1.0
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "trans": # Jacobian transpose
k_val = 1.0
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "dls": # damped least squares (Levenberg-Marquardt)
lambda_val = 0.1
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val**2) * torch.eye(
n=jacobian.shape[1], device=device
)
delta_dof_pos = (
jacobian_T
@ torch.inverse(jacobian @ jacobian_T + lambda_matrix)
@ delta_pose.unsqueeze(-1)
)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "svd": # adaptive SVD
k_val = 1.0
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1.0 / S
min_singular_value = 1.0e-5
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = (
torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6]
@ torch.diag_embed(S_inv)
@ torch.transpose(U, dim0=1, dim1=2)
)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
return delta_dof_pos
def _apply_task_space_gains(
delta_fingertip_pose,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
task_prop_gains,
task_deriv_gains,
):
"""Interpret PD gains as task-space gains. Apply to task-space error."""
task_wrench = torch.zeros_like(delta_fingertip_pose)
# Apply gains to lin error components
lin_error = delta_fingertip_pose[:, 0:3]
task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + task_deriv_gains[
:, 0:3
] * (0.0 - fingertip_midpoint_linvel)
# Apply gains to rot error components
rot_error = delta_fingertip_pose[:, 3:6]
task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + task_deriv_gains[
:, 3:6
] * (0.0 - fingertip_midpoint_angvel)
return task_wrench
def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device):
"""Convert geometric Jacobian to analytic Jacobian."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# NOTE: Gym returns world-space geometric Jacobians by default
batch = num_envs
# Overview:
# x = [x_p; x_r]
# From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot
# From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv)
# Eq. 2.12 gives an expression for E_p_inv
# Eq. 2.107 gives an expression for E_r_inv
# Compute E_inv_top (i.e., [E_p_inv, 0])
I = torch.eye(3, device=device)
E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3)
E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2)
# Compute E_inv_bottom (i.e., [0, E_r_inv])
fingertip_axis_angle = axis_angle_from_quat(fingertip_quat)
fingertip_axis_angle_cross = get_skew_symm_matrix(
fingertip_axis_angle, device=device
)
fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1)
factor_1 = 1 / (fingertip_angle**2)
factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (
1 - torch.cos(fingertip_angle)
)
factor_3 = factor_1 * factor_2
E_r_inv = (
I
- 1 * 0.5 * fingertip_axis_angle_cross
+ (fingertip_axis_angle_cross @ fingertip_axis_angle_cross)
* factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3))
)
E_inv_bottom = torch.cat(
(torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2
)
E_inv = torch.cat(
(E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1
).reshape((batch, 6, 6))
J_a = E_inv @ fingertip_jacobian
return J_a
def get_skew_symm_matrix(vec, device):
"""Convert vector to skew-symmetric matrix."""
# Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
batch = vec.shape[0]
I = torch.eye(3, device=device)
skew_symm = torch.transpose(
torch.cross(
vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1))
).reshape(batch, 3, 3),
dim0=1,
dim1=2,
)
return skew_symm
def translate_along_local_z(pos, quat, offset, device):
"""Translate global body position along local Z-axis and express in global coordinates."""
num_vecs = pos.shape[0]
offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat(
(num_vecs, 1)
)
_, translated_pos = torch_utils.tf_combine(
q1=quat,
t1=pos,
q2=torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat((num_vecs, 1)),
t2=offset_vec,
)
return translated_pos
def axis_angle_from_euler(euler):
"""Convert tensor of Euler angles to tensor of axis-angles."""
quat = torch_utils.quat_from_euler_xyz(
roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2]
)
quat = quat * torch.sign(quat[:, 0]).unsqueeze(-1) # smaller rotation
axis_angle = axis_angle_from_quat(quat)
return axis_angle
def axis_angle_from_quat(quat, eps=1.0e-6):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544
mag = torch.linalg.norm(quat[:, 1:4], dim=1)
half_angle = torch.atan2(mag, quat[:, 0])
angle = 2.0 * half_angle
sin_half_angle_over_angle = torch.where(
torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle**2.0 / 48
)
axis_angle = quat[:, 1:4] / sin_half_angle_over_angle.unsqueeze(-1)
return axis_angle
def axis_angle_from_quat_naive(quat):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation
# NOTE: Susceptible to undesirable behavior due to divide-by-zero
mag = torch.linalg.vector_norm(quat[:, 1:4], dim=1) # zero when quat = [1, 0, 0, 0]
axis = quat[:, 1:4] / mag.unsqueeze(-1)
angle = 2.0 * torch.atan2(mag, quat[:, 0])
axis_angle = axis * angle.unsqueeze(-1)
return axis_angle
def get_rand_quat(num_quats, device):
"""Generate tensor of random quaternions."""
# Reference: http://planning.cs.uiuc.edu/node198.html
u = torch.rand((num_quats, 3), device=device)
quat = torch.zeros((num_quats, 4), device=device)
quat[:, 0] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2])
quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1])
quat[:, 2] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1])
quat[:, 3] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2])
return quat
def get_nonrand_quat(num_quats, rot_perturbation, device):
"""Generate tensor of non-random quaternions by composing random Euler rotations."""
quat = torch_utils.quat_from_euler_xyz(
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
)
return quat
| 19,859 | Python | 37.864971 | 163 | 0.627574 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_nut_bolt.yaml | nut_bolt_m4:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m4_tight/factory_nut_m4_tight.usd'
width_min: 0.007 # distance from flat surface to flat surface
width_max: 0.0080829 # distance from edge to edge
height: 0.0032 # height of nut
flat_length: 0.00404145 # length of flat surface
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m4_tight/factory_bolt_m4_tight.usd'
width: 0.004 # major diameter of bolt
head_height: 0.004 # height of bolt head
shank_length: 0.016 # length of bolt shank
thread_pitch: 0.0007 # distance between threads
nut_bolt_m8:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m8_tight/factory_nut_m8_tight.usd'
width_min: 0.013
width_max: 0.01501111
height: 0.0065
flat_length: 0.00750555
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m8_tight/factory_bolt_m8_tight.usd'
width: 0.008
head_height: 0.008
shank_length: 0.018
thread_pitch: 0.00125
nut_bolt_m12:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m12_tight/factory_nut_m12_tight.usd'
width_min: 0.019
width_max: 0.02193931
height: 0.010
flat_length: 0.01096966
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m12_tight/factory_bolt_m12_tight.usd'
width: 0.012
head_height: 0.012
shank_length: 0.020
thread_pitch: 0.00175
nut_bolt_m16:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m16_tight/factory_nut_m16_tight.usd'
width_min: 0.024
width_max: 0.02771281
height: 0.013
flat_length: 0.01385641
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m16_tight/factory_bolt_m16_tight.usd'
width: 0.016
head_height: 0.016
shank_length: 0.025
thread_pitch: 0.002
nut_bolt_m20:
nut:
usd_path: '/Isaac/Props/Factory/factory_nut_m20_tight/factory_nut_m20_tight.usd'
width_min: 0.030
width_max: 0.03464102
height: 0.016
flat_length: 0.01732051
bolt:
usd_path: '/Isaac/Props/Factory/factory_bolt_m20_tight/factory_bolt_m20_tight.usd'
width: 0.020
head_height: 0.020
shank_length: 0.045
thread_pitch: 0.0025
| 2,331 | YAML | 32.314285 | 90 | 0.617332 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/yaml/factory_asset_info_franka_table.yaml | franka_hand_length: 0.0584 # distance from origin of hand to origin of finger
franka_finger_length: 0.053671 # distance from origin of finger to bottom of fingerpad
franka_fingerpad_length: 0.017608 # distance from top of inner surface of fingerpad to bottom of inner surface of fingerpad
franka_gripper_width_max: 0.080 # maximum opening width of gripper
table_depth: 0.6 # depth of table
table_width: 1.0 # width of table
| 431 | YAML | 52.999993 | 124 | 0.772622 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/utils/anymal_terrain_generator.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
# terrain generator
class Terrain:
def __init__(self, cfg, num_robots) -> None:
self.horizontal_scale = 0.1
self.vertical_scale = 0.005
self.border_size = 20
self.num_per_env = 2
self.env_length = cfg["mapLength"]
self.env_width = cfg["mapWidth"]
self.proportions = [np.sum(cfg["terrainProportions"][: i + 1]) for i in range(len(cfg["terrainProportions"]))]
self.env_rows = cfg["numLevels"]
self.env_cols = cfg["numTerrains"]
self.num_maps = self.env_rows * self.env_cols
self.num_per_env = int(num_robots / self.num_maps)
self.env_origins = np.zeros((self.env_rows, self.env_cols, 3))
self.width_per_env_pixels = int(self.env_width / self.horizontal_scale)
self.length_per_env_pixels = int(self.env_length / self.horizontal_scale)
self.border = int(self.border_size / self.horizontal_scale)
self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border
self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border
self.height_field_raw = np.zeros((self.tot_rows, self.tot_cols), dtype=np.int16)
if cfg["curriculum"]:
self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows)
else:
self.randomized_terrain()
self.heightsamples = self.height_field_raw
self.vertices, self.triangles = convert_heightfield_to_trimesh(
self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"]
)
def randomized_terrain(self):
for k in range(self.num_maps):
# Env coordinates in the world
(i, j) = np.unravel_index(k, (self.env_rows, self.env_cols))
# Heightfield coordinate system from now on
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
choice = np.random.uniform(0, 1)
if choice < 0.1:
if np.random.choice([0, 1]):
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2)
else:
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
elif choice < 0.6:
# step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18])
step_height = np.random.choice([-0.15, 0.15])
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < 1.0:
discrete_obstacles_terrain(terrain, 0.15, 1.0, 2.0, 40, platform_size=3.0)
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
def curiculum(self, num_robots, num_terrains, num_levels):
num_robots_per_map = int(num_robots / num_terrains)
left_over = num_robots % num_terrains
idx = 0
for j in range(num_terrains):
for i in range(num_levels):
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
difficulty = i / num_levels
choice = j / num_terrains
slope = difficulty * 0.4
step_height = 0.05 + 0.175 * difficulty
discrete_obstacles_height = 0.025 + difficulty * 0.15
stepping_stones_size = 2 - 1.8 * difficulty
if choice < self.proportions[0]:
if choice < 0.05:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
elif choice < self.proportions[1]:
if choice < 0.15:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2)
elif choice < self.proportions[3]:
if choice < self.proportions[2]:
step_height *= -1
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < self.proportions[4]:
discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1.0, 2.0, 40, platform_size=3.0)
else:
stepping_stones_terrain(
terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0.0, platform_size=3.0
)
# Heightfield coordinate system
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
robots_in_map = num_robots_per_map
if j < left_over:
robots_in_map += 1
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
| 8,852 | Python | 50.47093 | 119 | 0.591618 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/utils/usd_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from pxr import UsdLux, UsdPhysics
def set_drive_type(prim_path, drive_type):
joint_prim = get_prim_at_path(prim_path)
# set drive type ("angular" or "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type)
return drive
def set_drive_target_position(drive, target_value):
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
def set_drive_target_velocity(drive, target_value):
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
def set_drive_stiffness(drive, stiffness):
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
def set_drive_damping(drive, damping):
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
def set_drive_max_force(drive, max_force):
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def set_drive(prim_path, drive_type, target_type, target_value, stiffness, damping, max_force) -> None:
drive = set_drive_type(prim_path, drive_type)
# set target type ("position" or "velocity")
if target_type == "position":
set_drive_target_position(drive, target_value)
elif target_type == "velocity":
set_drive_target_velocity(drive, target_value)
set_drive_stiffness(drive, stiffness)
set_drive_damping(drive, damping)
set_drive_max_force(drive, max_force)
| 3,403 | Python | 36.406593 | 103 | 0.740229 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shared/reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/tasks/shared/reacher.py
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
# `scale` maps [-1, 1] to [L, U]; `unscale` maps [L, U] to [-1, 1]
from omni.isaac.core.utils.torch import scale, unscale
from omni.isaac.gym.vec_env import VecEnvBase
class ReacherTask(RLTask):
def __init__(
self,
name: str,
env: VecEnvBase,
offset=None
) -> None:
ReacherTask.update_config(self)
RLTask.__init__(self, name, env)
self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = torch.tensor(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self._task_cfg["env"]["successTolerance"]
self.reach_goal_bonus = self._task_cfg["env"]["reachGoalBonus"]
self.rot_eps = self._task_cfg["env"]["rotEps"]
self.vel_obs_scale = self._task_cfg["env"]["velObsScale"]
self.reset_position_noise = self._task_cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self._task_cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self._task_cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self._task_cfg["env"]["resetDofVelRandomInterval"]
self.arm_dof_speed_scale = self._task_cfg["env"]["dofSpeedScale"]
self.use_relative_control = self._task_cfg["env"]["useRelativeControl"]
self.act_moving_average = self._task_cfg["env"]["actionsMovingAverage"]
self.max_episode_length = self._task_cfg["env"]["episodeLength"]
self.reset_time = self._task_cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self._task_cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self._task_cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self._task_cfg["env"].get("averFactor", 0.1)
self.dt = 1.0 / 60
control_freq_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
def set_up_scene(self, scene: Scene) -> None:
self._stage = get_current_stage()
self._assets_root_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0'
self.get_arm()
self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.get_object()
self.get_goal()
super().set_up_scene(scene)
self._arms = self.get_arm_view(scene)
scene.add(self._arms)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
)
self._objects._non_root_link = True # hack to ignore kinematics
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("dofbot_view"):
scene.remove_object("dofbot_view", registry_only=True)
if scene.object_exists("ur10_view"):
scene.remove_object("ur10_view", registry_only=True)
if scene.object_exists("kuka_view"):
scene.remove_object("kuka_view", registry_only=True)
if scene.object_exists("hiwin_view"):
scene.remove_object("hiwin_view", registry_only=True)
if scene.object_exists("goal_view"):
scene.remove_object("goal_view", registry_only=True)
if scene.object_exists("object_view"):
scene.remove_object("object_view", registry_only=True)
self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self._arms = self.get_arm_view(scene)
scene.add(self._arms)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
)
self._objects._non_root_link = True # hack to ignore kinematics
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
@abstractmethod
def get_num_dof(self):
pass
@abstractmethod
def get_arm(self):
pass
@abstractmethod
def get_arm_view(self):
pass
@abstractmethod
def get_observations(self):
pass
@abstractmethod
def get_reset_target_new_pos(self, n_reset_envs):
pass
@abstractmethod
def send_joint_pos(self, joint_pos):
pass
def get_object(self):
self.object_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd"
add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/object")
obj = XFormPrim(
prim_path=self.default_zero_env_path + "/object/object",
name="object",
translation=self.object_start_translation,
orientation=self.object_start_orientation,
scale=self.object_scale,
)
self._sim_config.apply_articulation_settings(
"object", get_prim_at_path(obj.prim_path), self._sim_config.parse_actor_config("object")
)
def get_goal(self):
self.goal_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd"
add_reference_to_stage(self.goal_usd_path, self.default_zero_env_path + "/goal")
goal = XFormPrim(
prim_path=self.default_zero_env_path + "/goal/object",
name="goal",
translation=self.goal_start_translation,
orientation=self.goal_start_orientation,
scale=self.goal_scale
)
self._sim_config.apply_articulation_settings("goal", get_prim_at_path(goal.prim_path), self._sim_config.parse_actor_config("goal_object"))
def post_reset(self):
self.num_arm_dofs = self.get_num_dof()
self.actuated_dof_indices = torch.arange(self.num_arm_dofs, dtype=torch.long, device=self.device)
self.arm_dof_targets = torch.zeros((self.num_envs, self._arms.num_dof), dtype=torch.float, device=self.device)
self.prev_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device)
dof_limits = self._dof_limits[:, :self.num_arm_dofs]
self.arm_dof_lower_limits, self.arm_dof_upper_limits = torch.t(dof_limits[0].to(self.device))
self.arm_dof_default_pos = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device)
self.arm_dof_default_vel = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device)
self.end_effectors_init_pos, self.end_effectors_init_rot = self._arms._end_effectors.get_world_poses()
self.goal_pos, self.goal_rot = self._goals.get_world_poses()
self.goal_pos -= self._env_pos
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self):
self.fall_dist = 0
self.fall_penalty = 0
(
self.rew_buf[:],
self.reset_buf[:],
self.reset_goal_buf[:],
self.progress_buf[:],
self.successes[:],
self.consecutive_successes[:],
) = compute_arm_reward(
self.rew_buf,
self.reset_buf,
self.reset_goal_buf,
self.progress_buf,
self.successes,
self.consecutive_successes,
self.max_episode_length,
self.object_pos,
self.object_rot,
self.goal_pos,
self.goal_rot,
self.dist_reward_scale,
self.rot_reward_scale,
self.rot_eps,
self.actions,
self.action_penalty_scale,
self.success_tolerance,
self.reach_goal_bonus,
self.fall_dist,
self.fall_penalty,
self.max_consecutive_successes,
self.av_factor,
)
self.extras["consecutive_successes"] = self.consecutive_successes.mean()
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term policy performance.
print(
"Direct average consecutive successes = {:.1f}".format(
direct_average_successes / (self.total_resets + self.num_envs)
)
)
if self.total_resets > 0:
print(
"Post-Reset average consecutive successes = {:.1f}".format(self.total_successes / self.total_resets)
)
def pre_physics_step(self, actions):
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
end_effectors_pos, end_effectors_rot = self._arms._end_effectors.get_world_poses()
# Reverse the default rotation and rotate the displacement tensor according to the current rotation
self.object_pos = end_effectors_pos + quat_rotate(end_effectors_rot, quat_rotate_inverse(self.end_effectors_init_rot, self.get_object_displacement_tensor()))
self.object_pos -= self._env_pos # subtract world env pos
self.object_rot = end_effectors_rot
object_pos = self.object_pos + self._env_pos
object_rot = self.object_rot
self._objects.set_world_poses(object_pos, object_rot)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids)
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device)
# Reacher tasks don't require gripper actions, disable it.
self.actions[:, 5] = 0.0
if self.use_relative_control:
targets = (
self.prev_targets[:, self.actuated_dof_indices] + self.arm_dof_speed_scale * self.dt * self.actions
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
targets,
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(
self.actions[:, :self.num_arm_dofs],
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
self.cur_targets[:, self.actuated_dof_indices] = (
self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices]
+ (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
self.cur_targets[:, self.actuated_dof_indices],
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self._arms.set_joint_position_targets(
self.cur_targets[:, self.actuated_dof_indices], indices=None, joint_indices=self.actuated_dof_indices
)
if self._task_cfg['sim2real']['enabled'] and self.test and self.num_envs == 1:
# Only retrieve the 0-th joint position even when multiple envs are used
cur_joint_pos = self._arms.get_joint_positions(indices=[0], joint_indices=self.actuated_dof_indices)
# Send the current joint positions to the real robot
joint_pos = cur_joint_pos[0]
if torch.any(joint_pos < self.arm_dof_lower_limits) or torch.any(joint_pos > self.arm_dof_upper_limits):
print("get_joint_positions out of bound, send_joint_pos skipped")
else:
self.send_joint_pos(joint_pos)
def is_done(self):
pass
def reset_target_pose(self, env_ids):
# reset goal
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_pos = self.get_reset_target_new_pos(len(env_ids))
new_rot = randomize_rotation(
rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
self.goal_pos[env_ids] = new_pos
self.goal_rot[env_ids] = new_rot
goal_pos, goal_rot = self.goal_pos.clone(), self.goal_rot.clone()
goal_pos[env_ids] = (
self.goal_pos[env_ids] + self._env_pos[env_ids]
) # add world env pos
self._goals.set_world_poses(goal_pos[env_ids], goal_rot[env_ids], indices)
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_arm_dofs * 2 + 5), device=self.device)
self.reset_target_pose(env_ids)
# reset arm
delta_max = self.arm_dof_upper_limits - self.arm_dof_default_pos
delta_min = self.arm_dof_lower_limits - self.arm_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * (rand_floats[:, 5:5+self.num_arm_dofs] + 1.0) * 0.5
pos = self.arm_dof_default_pos + self.reset_dof_pos_noise * rand_delta
dof_pos = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device)
dof_pos[env_ids, :self.num_arm_dofs] = pos
dof_vel = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device)
dof_vel[env_ids, :self.num_arm_dofs] = self.arm_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_arm_dofs:5+self.num_arm_dofs*2]
self.prev_targets[env_ids, :self.num_arm_dofs] = pos
self.cur_targets[env_ids, :self.num_arm_dofs] = pos
self.arm_dof_targets[env_ids, :self.num_arm_dofs] = pos
self._arms.set_joint_position_targets(self.arm_dof_targets[env_ids], indices)
# set_joint_positions doesn't seem to apply immediately.
self._arms.set_joint_positions(dof_pos[env_ids], indices)
self._arms.set_joint_velocities(dof_vel[env_ids], indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(
quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)
)
@torch.jit.script
def compute_arm_reward(
rew_buf,
reset_buf,
reset_goal_buf,
progress_buf,
successes,
consecutive_successes,
max_episode_length: float,
object_pos,
object_rot,
target_pos,
target_rot,
dist_reward_scale: float,
rot_reward_scale: float,
rot_eps: float,
actions,
action_penalty_scale: float,
success_tolerance: float,
reach_goal_bonus: float,
fall_dist: float,
fall_penalty: float,
max_consecutive_successes: int,
av_factor: float,
):
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(
torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0)
) # changed quat convention
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions**2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(goal_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
resets = reset_buf
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(
torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf
)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(
num_resets > 0,
av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes,
consecutive_successes,
)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
| 22,312 | Python | 42.836935 | 165 | 0.629482 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
class LocomotionTask(RLTask):
def __init__(self, name, env, offset=None) -> None:
LocomotionTask.update_config(self)
RLTask.__init__(self, name, env)
return
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
velocity = velocities[:, 0:3]
ang_velocity = velocities[:, 3:6]
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces(joint_indices=self._sensor_indices)
(
self.obs_buf[:],
self.potentials[:],
self.prev_potentials[:],
self.up_vec[:],
self.heading_vec[:],
) = get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
self.targets,
self.potentials,
self.dt,
self.inv_start_rot,
self.basis_vec0,
self.basis_vec1,
self.dof_limits_lower,
self.dof_limits_upper,
self.dof_vel_scale,
sensor_force_torques,
self._num_envs,
self.contact_force_scale,
self.actions,
self.angular_velocity_scale,
)
observations = {self._robots.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
forces = self.actions * self.joint_gears * self.power_scale
indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
# applies joint torques
self._robots.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions and velocities
dof_pos = torch_rand_float(-0.2, 0.2, (num_resets, self._robots.num_dof), device=self._device)
dof_pos[:] = tensor_clamp(self.initial_dof_pos[env_ids] + dof_pos, self.dof_limits_lower, self.dof_limits_upper)
dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, self._robots.num_dof), device=self._device)
root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
self._robots.set_joint_positions(dof_pos, indices=env_ids)
self._robots.set_joint_velocities(dof_vel, indices=env_ids)
self._robots.set_world_poses(root_pos, root_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
to_target = self.targets[env_ids] - self.initial_root_pos[env_ids]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
num_resets = len(env_ids)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = torch.tensor([1000, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.target_dirs = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.dt = 1.0 / 60.0
self.potentials = torch.tensor([-1000.0 / self.dt], dtype=torch.float32, device=self._device).repeat(
self.num_envs
)
self.prev_potentials = self.potentials.clone()
self.actions = torch.zeros((self.num_envs, self.num_actions), device=self._device)
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = calculate_metrics(
self.obs_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.termination_height,
self.death_cost,
self._robots.num_dof,
self.get_dof_at_limit_cost(),
self.alive_reward_scale,
self.motor_effort_ratio,
)
def is_done(self) -> None:
self.reset_buf[:] = is_done(
self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
targets,
potentials,
dt,
inv_start_rot,
basis_vec0,
basis_vec1,
dof_limits_lower,
dof_limits_upper,
dof_vel_scale,
sensor_force_torques,
num_envs,
contact_force_scale,
actions,
angular_velocity_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, int, float, Tensor, float) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
to_target = targets - torso_position
to_target[:, 2] = 0.0
prev_potentials = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2
)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position
)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs = torch.cat(
(
torso_position[:, 2].view(-1, 1),
vel_loc,
angvel_loc * angular_velocity_scale,
normalize_angle(yaw).unsqueeze(-1),
normalize_angle(roll).unsqueeze(-1),
normalize_angle(angle_to_target).unsqueeze(-1),
up_proj.unsqueeze(-1),
heading_proj.unsqueeze(-1),
dof_pos_scaled,
dof_vel * dof_vel_scale,
sensor_force_torques.reshape(num_envs, -1) * contact_force_scale,
actions,
),
dim=-1,
)
return obs, potentials, prev_potentials, up_vec, heading_vec
@torch.jit.script
def is_done(obs_buf, termination_height, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, float, Tensor, Tensor, float) -> Tensor
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return reset
@torch.jit.script
def calculate_metrics(
obs_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
termination_height,
death_cost,
num_dof,
dof_at_limit_cost,
alive_reward_scale,
motor_effort_ratio,
):
# type: (Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, int, Tensor, float, Tensor) -> Tensor
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# aligning up axis of robot and environment
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
# energy penalty for movement
actions_cost = torch.sum(actions**2, dim=-1)
electricity_cost = torch.sum(
torch.abs(actions * obs_buf[:, 12 + num_dof : 12 + num_dof * 2]) * motor_effort_ratio.unsqueeze(0), dim=-1
)
# reward for duration of staying alive
alive_reward = torch.ones_like(potentials) * alive_reward_scale
progress_reward = potentials - prev_potentials
total_reward = (
progress_reward
+ alive_reward
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost
)
# adjust reward for fallen agents
total_reward = torch.where(
obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward
)
return total_reward
| 13,249 | Python | 37.294798 | 214 | 0.628802 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/sim2real/dofbot.py | # Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import struct
import time
import numpy as np
class RealWorldDofbot():
# Defined in dofbot.usd
sim_dof_angle_limits = [
(-90, 90, False),
(-90, 90, False),
(-90, 90, False),
(-90, 90, False),
(-90, 180, False),
(-30, 60, True),
# (-30, 60): /arm_01/link5/Finger_Left_01/Finger_Left_01_RevoluteJoint
# (-60, 30): /arm_01/link5/Finger_Right_01/Finger_Right_01_RevoluteJoint
] # _sim_dof_limits[:,2] == True indicates inversed joint angle compared to real
# Ref: Section `6.5 Control all servo` in http://www.yahboom.net/study/Dofbot-Jetson_nano
servo_angle_limits = [
(0, 180),
(0, 180),
(0, 180),
(0, 180),
(0, 270),
(0, 180),
]
def __init__(self, IP, PORT, fail_quietely=False, verbose=False) -> None:
print("Connecting to real-world Dofbot at IP:", IP, "and port:", PORT)
self.fail_quietely = fail_quietely
self.failed = False
self.last_sync_time = 0
self.sync_hz = 10000
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (IP, PORT)
self.sock.connect(server_address)
print("Connected to real-world Dofbot!")
except socket.error as e:
self.failed = True
print("Connection to real-world Dofbot failed!")
if self.fail_quietely:
print(e)
else:
raise e
def send_joint_pos(self, joint_pos):
if time.time() - self.last_sync_time < 1 / self.sync_hz:
return
self.last_sync_time = time.time()
if len(joint_pos) != 6:
raise Exception("The length of Dofbot joint_pos is {}, but should be 6!".format(len(joint_pos)))
# Convert Sim angles to Real angles
servo_angles = [90] * 6
for i, pos in enumerate(joint_pos):
if i == 5:
# Ignore the gripper joints for Reacher task
continue
# Map [L, U] to [A, B]
L, U, inversed = self.sim_dof_angle_limits[i]
A, B = self.servo_angle_limits[i]
angle = np.rad2deg(float(pos))
if not L <= angle <= U:
print("The {}-th simulation joint angle ({}) is out of range! Should be in [{}, {}]".format(i, angle, L, U))
angle = np.clip(angle, L, U)
servo_angles[i] = (angle - L) * ((B-A)/(U-L)) + A # Map [L, U] to [A, B]
if inversed:
servo_angles[i] = (B-A) - (servo_angles[i] - A) + A # Map [A, B] to [B, A]
if not A <= servo_angles[i] <= B:
raise Exception("(Should Not Happen) The {}-th real world joint angle ({}) is out of range! hould be in [{}, {}]".format(i, servo_angles[i], A, B))
print("Sending real-world Dofbot joint angles:", servo_angles)
if self.failed:
print("Cannot send joint states. Not connected to real-world Dofbot!")
return
packer = struct.Struct("f f f f f f")
packed_data = packer.pack(*servo_angles)
try:
self.sock.sendall(packed_data)
except socket.error as e:
self.failed = True
print("Send to real-world Dofbot failed!")
if self.fail_quietely:
print(e)
else:
raise e
if __name__ == "__main__":
IP = input("Enter Dofbot's IP: ")
PORT = input("Enter Dofbot's Port: ")
dofbot = RealWorldDofbot(IP, int(PORT))
pos = [np.deg2rad(0)] * 6
dofbot.send_joint_pos(pos)
print("Dofbot joint angles reset.")
| 5,238 | Python | 40.912 | 163 | 0.605766 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# evaluate checkpoint
evaluation: False
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 90
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'omniisaacgymenvs'
# path to a kit app file
kit_app: ''
# Warp
warp: False
# set default task and default training config based on task
defaults:
- _self_
- task: Cartpole
- train: ${task}PPO
- override hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
use_urdf: False
| 1,739 | YAML | 21.894737 | 103 | 0.738355 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/cfg/task/CartpoleCamera.yaml | defaults:
- Cartpole
- _self_
name: CartpoleCamera
env:
numEnvs: ${resolve_default:32,${...num_envs}}
envSpacing: 20.0
cameraWidth: 240
cameraHeight: 160
exportImages: False
sim:
rendering_dt: 0.0166 # 1/60
# set to True if you use camera sensors in the environment
enable_cameras: True
add_ground_plane: False
add_distant_light: True
| 363 | YAML | 16.333333 | 60 | 0.69697 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/cfg/task/FrankaDeformable.yaml | # used to create the object
name: FrankaDeformable
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:1024,${...num_envs}} # 2048#4096
envSpacing: 3.0
episodeLength: 100 # 150 #350 #500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 8 # 12
solver_velocity_iteration_count: 0 # 1
contact_offset: 0.02 #0.005
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288 #20965884
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 4194304 #2097152 #16777216 #8388608 #2097152 #1048576
gpu_max_particle_contacts: 1048576 #2097152 #1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
franka:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
beaker:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cube:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
# # per-shape
# contact_offset: 0.02
# rest_offset: 0.001
| 3,421 | YAML | 25.944882 | 85 | 0.691903 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/cfg/task/Ant.yaml | # used to create the object
name: Ant
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
# numEnvs: ${...num_envs}
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 0.5
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.005
energyCost: 0.05
dofVelocityScale: 0.2
angularVelocityScale: 1.0
contactForceScale: 0.1
jointsAtLimitCost: 0.1
deathCost: -2.0
terminationHeight: 0.31
alive_reward_scale: 0.5
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 10.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Ant:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0 | 2,370 | YAML | 24.771739 | 71 | 0.690717 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.