file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
ft-lab/omniverse_sample_scripts/Operation/UNDO/CreateSphereUndo.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.kit.commands import omni.kit.undo # Get stage. stage = omni.usd.get_context().get_stage() # Process to create a sphere. class MyCreateSphere (omni.kit.commands.Command): _path = "" def __init__ (self, path : str): self._path = path def do (self): sphereGeom = UsdGeom.Sphere.Define(stage, self._path) # Set radius. sphereGeom.CreateRadiusAttr(5.0) # Set color. sphereGeom.CreateDisplayColorAttr([(1.0, 0.0, 0.0)]) # Set position. UsdGeom.XformCommonAPI(sphereGeom).SetTranslate((0.0, 5.0, 0.0)) def undo (self): stage.RemovePrim(self._path) # Create sphere. pathName = '/World/sphere' # Register a Class and run it. omni.kit.commands.register(MyCreateSphere) omni.kit.commands.execute("MyCreateSphere", path=pathName) # UNDO. omni.kit.undo.undo() # REDO. omni.kit.undo.redo()
952
Python
21.690476
72
0.656513
ft-lab/omniverse_sample_scripts/Operation/UNDO/readme.md
# UNDO UNDO処理。 特にSkeleton情報を変更する場合、UNDOに対応しないと正常に動作しないケースがありました。 Omniverse Kitのドキュメントの"Bundled Extensions/omni.kit.commands"が参考になります。 |ファイル|説明| |---|---| |[simpleClassUNDO.py](./simpleClassUNDO.py)|classを使用してUNDO対応。| |[CreateSphereUndo.py](./CreateSphereUndo.py)|球を生成する処理でUNDO対応して位置|
322
Markdown
23.846152
73
0.708075
ft-lab/omniverse_sample_scripts/Operation/UNDO/simpleClassUndo.py
# From "Bundled Extensions/omni.kit.commands" in Omniverse Kit documentation. import omni.kit.commands import omni.kit.undo # Class for UNDO processing. class MyOrange (omni.kit.commands.Command): def __init__ (self, bar: list): self._bar = bar def do (self): self._bar.append('orange') def undo (self): del self._bar[-1] # Register a Class and run it. omni.kit.commands.register(MyOrange) my_list = [] omni.kit.commands.execute("MyOrange", bar=my_list) print(my_list) # UNDO. omni.kit.undo.undo() print(my_list) # REDO. omni.kit.undo.redo() print(my_list)
601
Python
18.419354
77
0.673877
ft-lab/omniverse_sample_scripts/Operation/Selection/GetSelection.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Get selection. selection = omni.usd.get_context().get_selection() paths = selection.get_selected_prim_paths() for path in paths: print(path)
218
Python
20.899998
63
0.711009
ft-lab/omniverse_sample_scripts/Operation/Selection/readme.md
# Selection StageウィンドウでのPrimの選択を取得。 |ファイル|説明| |---|---| |[GetSelection.py](./GetSelection.py)|Primの選択を取得| |[IsSelected.py](./IsSelected.py)|指定されたパスのPrimが選択されているかどうか| |[Select.py](./Select.py)|指定されたパスのPrimを選択| |[EventSelection.py](./EventSelection.py)|選択変更イベントを取得し、選択されたPrim名を表示| |[EventSelection_showFacesCount.py](./EventSelection_showFacesCount.py)|選択されたPrim名、子要素も含めたMeshの面数をビューポートに表示<br>![EventSelection_showFacesCount.jpg](./images/EventSelection_showFacesCount.jpg)
510
Markdown
38.307689
191
0.735294
ft-lab/omniverse_sample_scripts/Operation/Selection/EventSelection.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Get context. context = omni.usd.get_context() # Get stage. stage = context.get_stage() # ---------------------------------------------. # Selected event. # ---------------------------------------------. def onStageEvent(evt): if evt.type == int(omni.usd.StageEventType.SELECTION_CHANGED): # Get selection paths. selection = omni.usd.get_context().get_selection() paths = selection.get_selected_prim_paths() for path in paths: prim = stage.GetPrimAtPath(path) if prim.IsValid() == True: print('Selected [ ' + prim.GetName() + ' ]') # ------------------------------------------------. # Register for stage events. # Specify "subs=None" to end the event. subs = context.get_stage_event_stream().create_subscription_to_pop(onStageEvent, name="sampleStageEvent")
913
Python
32.851851
105
0.542169
ft-lab/omniverse_sample_scripts/Operation/Selection/IsSelected.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Get selection. selection = omni.usd.get_context().get_selection() pathStr = '/World' selectedF = selection.is_prim_path_selected(pathStr) if selectedF: print('[' + pathStr + ' ] Selected') else: print('[' + pathStr + ' ] Not selected')
313
Python
23.153844
63
0.674121
ft-lab/omniverse_sample_scripts/Operation/Selection/EventSelection_showFacesCount.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.ui import omni.kit.app # Get context. context = omni.usd.get_context() # Get stage. stage = context.get_stage() # Get main window viewport. window = omni.ui.Window('Viewport') # ---------------------------------------------. # Get the number of faces in the mesh. # ---------------------------------------------. def GetFacesCount (prim): if prim.IsValid() == None: return 0 typeName = prim.GetTypeName() allCou = 0 if typeName == 'Mesh': m = UsdGeom.Mesh(prim) # If it is displayed. if m.ComputeVisibility() == 'inherited': # Get the number of faces of Mesh. allCou += len(m.GetFaceVertexCountsAttr().Get()) # Recursively traverse the hierarchy. pChildren = prim.GetChildren() for cPrim in pChildren: allCou += GetFacesCount(cPrim) return allCou # ---------------------------------------------. # Update Viewport UI. # Show the number of faces of the selected shape in the Viewport. # ---------------------------------------------. def UpdateViewportUI(paths): if len(paths) == 0: with window.frame: with omni.ui.VStack(height=0): with omni.ui.Placer(offset_x=20, offset_y=0): omni.ui.Spacer(width=0, height=8) return with window.frame: with omni.ui.VStack(height=0): with omni.ui.Placer(offset_x=20, offset_y=50): f = omni.ui.Label("--- Selection Shapes ---") f.visible = True f.set_style({"color": 0xff00ffff, "font_size": 32}) with omni.ui.Placer(offset_x=20, offset_y=0): omni.ui.Spacer(width=0, height=8) # Show selection shape name. for path in paths: prim = stage.GetPrimAtPath(path) if prim.IsValid() == True: facesCou = GetFacesCount(prim) with omni.ui.Placer(offset_x=28, offset_y=0): f2 = omni.ui.Label('[ ' + prim.GetName() + ' ] faces ' + str(facesCou)) f2.visible = True f2.set_style({"color": 0xff00ff00, "font_size": 32}) # ---------------------------------------------. # Selected event. # ---------------------------------------------. def onStageEvent(evt): if evt.type == int(omni.usd.StageEventType.SELECTION_CHANGED): # Get selection paths. selection = omni.usd.get_context().get_selection() paths = selection.get_selected_prim_paths() # Show selected shapes info. UpdateViewportUI(paths) # ------------------------------------------------. # Register for stage events. # Specify "subs=None" to end the event. subs = context.get_stage_event_stream().create_subscription_to_pop(onStageEvent, name="sampleStageEvent")
2,919
Python
32.563218
105
0.517643
ft-lab/omniverse_sample_scripts/Operation/Selection/Select.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Get selection. selection = omni.usd.get_context().get_selection() # Select one. selection.set_selected_prim_paths(['/World'], True) # Multiple selection. selection.set_selected_prim_paths(['/World', '/World/defaultLight'], True) # Deselection. selection.clear_selected_prim_paths()
352
Python
24.214284
74
0.735795
ft-lab/omniverse_sample_scripts/Operation/Keyboard/readme.md
# Keyboard キーボードの入力を取得します。 ## サンプル |ファイル|説明| |---|---| |[InputKeyboard.py](./InputKeyboard.py)|キーボードの入力を取得。| |[InputKeyboard_ShowViewport.py](./InputKeyboard_ShowViewport.py)|キーボードの入力を取得し、キー入力をViewportの左上に表示。|
240
Markdown
19.083332
105
0.658333
ft-lab/omniverse_sample_scripts/Operation/Keyboard/InputKeyboard.py
from pxr import Usd, UsdGeom, UsdSkel, UsdShade, Sdf, Gf, Tf import carb import carb.input import omni.kit.app import omni.ext # ------------------------------------------. # Input with Keyboard. # ------------------------------------------. class InputKeyboard: _keyboard = None _input = None _keyboard_subs = None def __init__(self): pass # Keyboard event. def _keyboard_event (self, event : carb.input.KeyboardEvent): if event.type == carb.input.KeyboardEventType.KEY_PRESS: print("KEY_PRESS : " + str(event.input)) if event.type == carb.input.KeyboardEventType.KEY_RELEASE: print("KEY_RELEASE : " + str(event.input)) return True def startup (self): # Assign keyboard event. appwindow = omni.appwindow.get_default_app_window() self._keyboard = appwindow.get_keyboard() self._input = carb.input.acquire_input_interface() self._keyboard_subs = self._input.subscribe_to_keyboard_events(self._keyboard, self._keyboard_event) def shutdown (self): # Release keyboard event. if self._input != None: self._input.unsubscribe_to_keyboard_events(self._keyboard, self._keyboard_subs) self._keyboard_subs = None self._keyboard = None self._input = None # -----------------------------------------. keyboardV = InputKeyboard() keyboardV.startup() # stop. #keyboardV.shutdown()
1,467
Python
28.359999
108
0.57805
ft-lab/omniverse_sample_scripts/Operation/Keyboard/InputKeyboard_ShowViewport.py
from pxr import Usd, UsdGeom, UsdSkel, UsdShade, Sdf, Gf, Tf import carb import carb.input import carb.events import omni.kit.app import omni.ext # ------------------------------------------. # Input with Keyboard. # ------------------------------------------. class InputKeyboard: _keyboard = None _input = None _keyboard_subs = None _update_subs = None _window = None _keyboard_input_value = None def __init__(self): pass # Keyboard event. def _keyboard_event (self, event): if event.type == carb.input.KeyboardEventType.KEY_PRESS: self._keyboard_input_value = event.input print("KEY_PRESS : " + str(event.input)) if event.type == carb.input.KeyboardEventType.KEY_RELEASE: print("KEY_RELEASE : " + str(event.input)) return True # UI Update event. def _on_update (self, e: carb.events.IEvent): with self._window.frame: with omni.ui.VStack(height=0): with omni.ui.Placer(offset_x=20, offset_y=50): # Set label. f = omni.ui.Label("Input : " + str(self._keyboard_input_value)) f.visible = True f.set_style({"color": 0xff00ffff, "font_size": 20}) def startup (self): # Assign keyboard event. appwindow = omni.appwindow.get_default_app_window() self._keyboard = appwindow.get_keyboard() self._input = carb.input.acquire_input_interface() self._keyboard_subs = self._input.subscribe_to_keyboard_events(self._keyboard, self._keyboard_event) # Get main window viewport. self._window = omni.ui.Window('Viewport') # Assing update event. self._update_subs = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update, name="update") def shutdown (self): # Release update event. if self._update_subs != None: self._update_subs.unsubscribe() # Release keyboard event. if self._input != None: self._input.unsubscribe_to_keyboard_events(self._keyboard, self._keyboard_subs) self._keyboard_subs = None self._keyboard = None self._input = None self._update_subs = None # -----------------------------------------. keyboardV = InputKeyboard() keyboardV.startup() # stop. #keyboardV.shutdown()
2,432
Python
31.013157
135
0.567845
ft-lab/omniverse_sample_scripts/Math/CalcMatrix.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Identity matrix. m = Gf.Matrix4f() print(m) # Initialize with rotation and translate. rotV = Gf.Rotation(Gf.Vec3d(1, 0, 0), 90.0) transV = Gf.Vec3f(10, 5, 2.3) m1 = Gf.Matrix4f(rotV, transV) print(m1) # Get data. for i in range(4): print(f"{m1[i,0]} , {m1[i,1]} , {m1[i,2]} , {m1[i,3]}") # Set identity. m1.SetIdentity() print(m1) # Matrix multiplication. rot1 = Gf.Rotation(Gf.Vec3d(1, 0, 0), 90.0) m2 = Gf.Matrix4f(rot1, Gf.Vec3f()) rot2 = Gf.Rotation(Gf.Vec3d(0, 1, 0), 30.0) m3 = Gf.Matrix4f(rot2, Gf.Vec3f()) m4 = m2 * m3 # Gf.Matrix4f * Gf.Matrix4f print(m4) rot3 = rot1 * rot2 # Gf.Rotation * Gf.Rotation print(Gf.Matrix4f(rot3, Gf.Vec3f())) # Inverse matrix. m4Inv = m4.GetInverse() print(m4Inv) # vector3 * matrix4. rotV = Gf.Rotation(Gf.Vec3d(1, 0, 0), 90.0) transV = Gf.Vec3f(10, 5, 2.3) m5 = Gf.Matrix4f(rotV, transV) v1 = Gf.Vec3f(1.2, 1.0, 2.5) v2 = m5.Transform(v1) print(f"{v2}") # vector3 * matrix4 (Ignore position). v1 = Gf.Vec3f(1.2, 1.0, 2.5) v2 = m5.TransformDir(v1) print(f"{v2}")
1,093
Python
20.88
63
0.638609
ft-lab/omniverse_sample_scripts/Math/DecomposeTransform2.py
from pxr import Usd, UsdGeom, UsdSkel, UsdPhysics, UsdShade, Sdf, Gf, Tf # Dump matrix. def DumpMatrix(m : Gf.Matrix4d): print("---------------------") for i in range(4): print(f"{mm[i,0]} {mm[i,1]} {mm[i,2]} {mm[i,3]}") print("") # Create Matrix4. translate = Gf.Vec3d(10.5, 2.8, 6.0) rotation = Gf.Rotation(Gf.Vec3d(0, 1, 0), 20) * Gf.Rotation(Gf.Vec3d(0, 0, 1), 45) scale = Gf.Vec3d(2.0, 0.5, 1.0) mm = Gf.Matrix4d().SetScale(scale) * Gf.Matrix4d(rotation, Gf.Vec3d(0)) * Gf.Matrix4d().SetTranslate(translate) DumpMatrix(mm) # Decompose matrix. mm2 = mm.RemoveScaleShear() rTrans = mm2.ExtractTranslation() rRot = mm2.ExtractRotation() mm3 = mm * mm2.GetInverse() rScale = Gf.Vec3d(mm3[0][0], mm3[1][1], mm3[2][2]) rAxisX = Gf.Vec3d(1, 0, 0) rAxisY = Gf.Vec3d(0, 1, 0) rAxisZ = Gf.Vec3d(0, 0, 1) rRotE = rRot.Decompose(rAxisZ, rAxisY, rAxisX) rRotE = Gf.Vec3d(rRotE[2], rRotE[1], rRotE[0]) print(f"Trans : {rTrans}") print(f"Rot : {rRotE}") print(f"Scale : {rScale}")
1,007
Python
27.799999
111
0.621648
ft-lab/omniverse_sample_scripts/Math/CalcDotCrossProduct.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf v1 = Gf.Vec3f(1.0, 2.0, -5.0) v2 = Gf.Vec3f(2.5, 14.0, 12.0) # Dot : Inner product. print(f"{v1} x {v2} = {v1 * v2}") print(f"{v1} x {v2} = {Gf.Dot(v1, v2)}") # Cross : Outer product. vx = v1[1] * v2[2] - v1[2] * v2[1] vy = v1[2] * v2[0] - v1[0] * v2[2] vz = v1[0] * v2[1] - v1[1] * v2[0] print(f"Cross product : ( {vx}, {vy}, {vz} )") v1_2 = Gf.Vec4f(v1[0], v1[1], v1[2],1.0) v2_2 = Gf.Vec4f(v2[0], v2[1], v2[2],1.0) v3_2 = Gf.HomogeneousCross(v1_2, v2_2) print(f"Cross product : {v3_2}")
558
Python
26.949999
63
0.543011
ft-lab/omniverse_sample_scripts/Math/readme.md
# Math ベクトルや行列計算関連。 USDのベクトル/行列計算は、"Gf"にまとまっているためそれを使用します。 numpyを経由しなくても計算できます。 ベクトルはfloat型のGf.Vec3f ( https://graphics.pixar.com/usd/release/api/class_gf_vec3f.html )、 double型のGf.Vec3d ( https://graphics.pixar.com/usd/release/api/class_gf_vec3d.html )が使用されます。 行列はfloat型のGf.Matrix4f ( https://graphics.pixar.com/usd/release/api/class_gf_matrix4f.html )、 double型のGf.Matrix4d ( https://graphics.pixar.com/usd/release/api/class_gf_matrix4d.html )が使用されます。 |ファイル|説明| |---|---| |[CalcDotCrossProduct.py](./CalcDotCrossProduct.py)|ベクトルの内積/外積計算| |[CalcMatrix.py](./CalcMatrix.py)|4x4行列の計算。行列とベクトルの乗算| |[CalcVector3.py](./CalcVector3.py)|Vector3の計算.| |[DecomposeTransform.py](./DecomposeTransform.py)|行列を移動(translate), 回転(rotation), スケール(scale)に変換。UsdSkelを使用。| |[DecomposeTransform2.py](./DecomposeTransform2.py)|行列を移動(translate), 回転(rotation), スケール(scale)に変換| |[GetVector3Length.py](./GetVector3Length.py)|Vector3の長さを計算| |[VectorToRotationAngle.py](./VectorToRotationAngle.py)|指定のベクトルをXYZ軸回転の角度(度数)に変換| |[NormalizeVector3.py](./NormalizeVector3.py)|Vector3の正規化| |[QuatToRotation.py](./QuatToRotation.py)|クォータニオンと回転角度(度数)の変換| |[TransRotationFrom2Vec.py](./TransRotationFrom2Vec.py)|ベクトルAをベクトルBに変換する回転を計算。| |[ConvRGB2SRGB.py](./ConvRGB2SRGB.py)|色のsRGBとRGBの相互変換。|
1,350
Markdown
44.033332
114
0.728148
ft-lab/omniverse_sample_scripts/Math/TransRotationFrom2Vec.py
from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf # No need to normalize. dirA = Gf.Vec3f(1.0, 0.0, 0.0).GetNormalized() dirB = Gf.Vec3f(-0.2, 12.0, 15.0).GetNormalized() print(f"dirA : {dirA}") print(f"dirB : {dirB}") # Calculate the rotation to transform dirA to dirB. rot = Gf.Rotation().SetRotateInto(Gf.Vec3d(dirA), Gf.Vec3d(dirB)) # Check that rot is correct. # v will have the same result as dirB. m = Gf.Matrix4f(rot, Gf.Vec3f(0, 0, 0)) v = m.Transform(dirA) print(f"dirA * m = {v}")
498
Python
26.722221
65
0.670683
ft-lab/omniverse_sample_scripts/Math/NormalizeVector3.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf v1 = Gf.Vec3f(1.0, 2.0, -5.0) v1N = v1.GetNormalized() print(f"{v1} ==> {v1N}")
145
Python
23.333329
63
0.634483
ft-lab/omniverse_sample_scripts/Math/QuatToRotation.py
from pxr import Usd, UsdGeom, UsdSkel, UsdPhysics, UsdShade, Sdf, Gf, Tf rotation = Gf.Quatf(0.7071, 0.7071, 0, 0) print(f"quat : {rotation}") # Convert from quaternion to Euler's rotation angles(degree). # Rotate XYZ. rot = Gf.Rotation(rotation) rV = rot.Decompose(Gf.Vec3d(0, 0, 1), Gf.Vec3d(0, 1, 0), Gf.Vec3d(1, 0, 0)) rV = Gf.Vec3d(rV[2], rV[1], rV[0]) print(f"Euler's rotation angles : {rV}") # RotationXYZ to quaternion. rotX = Gf.Rotation(Gf.Vec3d(1, 0, 0), 90.0) rotY = Gf.Rotation(Gf.Vec3d(0, 1, 0), 30.0) rotZ = Gf.Rotation(Gf.Vec3d(0, 0, 1), -10.0) rotXYZ = rotX * rotY * rotZ q = rotXYZ.GetQuat() print("quaternion : " + str(q)) # Quaternion to RotateXYZ. rV = rotXYZ.Decompose(Gf.Vec3d(0, 0, 1), Gf.Vec3d(0, 1, 0), Gf.Vec3d(1, 0, 0)) rV = Gf.Vec3d(rV[2], rV[1], rV[0]) print(" Euler's rotation angles : " + str(rV))
834
Python
32.399999
78
0.647482
ft-lab/omniverse_sample_scripts/Math/GetVector3Length.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf v1 = Gf.Vec3f(1.0, 2.0, -5.0) v2 = Gf.Vec3f(2.5, 14.0, 12.0) print(f"{v1} : Length = {v1.GetLength()}") print(f"{v2} : Length = {v2.GetLength()}")
212
Python
29.428567
63
0.613208
ft-lab/omniverse_sample_scripts/Math/VectorToRotationAngle.py
from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf dirV = Gf.Vec3f(20.0, 5.0, -25.0) yUp = Gf.Vec3f(0, 1, 0) m = Gf.Matrix4f().SetLookAt(Gf.Vec3f(0, 0, 0), dirV.GetNormalized(), yUp) # Rotate XYZ. rV = m.ExtractRotation().Decompose(Gf.Vec3d(0, 0, 1), Gf.Vec3d(0, 1, 0), Gf.Vec3d(1, 0, 0)) rV = Gf.Vec3d(rV[2], rV[1], rV[0]) print(f"rotateXYZ(Euler) : {rV}")
364
Python
25.071427
91
0.620879
ft-lab/omniverse_sample_scripts/Math/CalcVector3.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # float vector. print("\nfloat vector ----\n") v1 = Gf.Vec3f(1.0, 2.0, -5.0) v2 = Gf.Vec3f(2.5, 14.0, 12.0) v = v1 + v2 print(f"{v1} + {v2} = {v}") v = v1 / 2 print(f"{v1} / 2 = {v}") print(f"v.x = {v[0]} type = {type(v[0])}") print(f"v.y = {v[1]} type = {type(v[1])}") print(f"v.z = {v[2]} type = {type(v[2])}") # double vector. # It seems to be internally converted to Gf.Vec3f. print("\ndouble vector ----\n") v1d = Gf.Vec3d(1.0, 2.0, -5.0) v2d = Gf.Vec3d(2.5, 14.0, 12.0) v = v1d + v2d print("v.x = " + str(v1d[0]) + " type = " + str(type(v1d[0]))) v = v1d / 2 print(f"{v1d} / 2 = {v}") print(f"v.x = {v[0]} type = {type(v[0])}") print(f"v.y = {v[1]} type = {type(v[1])}") print(f"v.z = {v[2]} type = {type(v[2])}")
792
Python
23.030302
63
0.52399
ft-lab/omniverse_sample_scripts/Math/ConvRGB2SRGB.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import math def rgb_to_srgb (v : float, quantum_max : float = 1.0): if v <= 0.0031308: return (v * 12.92) v = v / quantum_max v = math.pow(v, 1.0 / 2.4) * 1.055 - 0.055 return (v * quantum_max) def srgb_to_rgb (v : float, quantum_max : float = 1.0): v = v / quantum_max if v <= 0.04045: return (v / 12.92) v = math.pow((v + 0.055) / 1.055, 2.4) return (v * quantum_max) # Conv RGB to sRGB. def conv_RGB_to_sRGB (col : Gf.Vec3f): retCol = Gf.Vec3f(col) if retCol[0] > 0.0 and retCol[0] < 1.0: retCol[0] = rgb_to_srgb(retCol[0]) if retCol[1] > 0.0 and retCol[1] < 1.0: retCol[1] = rgb_to_srgb(retCol[1]) if retCol[2] > 0.0 and retCol[2] < 1.0: retCol[2] = rgb_to_srgb(retCol[2]) return retCol # Conv sRGB to RGB (Linear). def conv_sRGB_to_RGB (col : Gf.Vec3f): retCol = Gf.Vec3f(col) if retCol[0] > 0.0 and retCol[0] < 1.0: retCol[0] = srgb_to_rgb(retCol[0]) if retCol[1] > 0.0 and retCol[1] < 1.0: retCol[1] = srgb_to_rgb(retCol[1]) if retCol[2] > 0.0 and retCol[2] < 1.0: retCol[2] = srgb_to_rgb(retCol[2]) return retCol # ---------------------------------------. # Original color (sRGB). col = Gf.Vec3f(0.5, 0.4, 0.7) # sRGB to RGB (sRGB to linear). col_linear = conv_sRGB_to_RGB(col) # RGB to sRGB (linear to sRGB). col2 = conv_RGB_to_sRGB(col_linear) print(f"col : {col}") print(f"col_linear : {col_linear}") print(f"col2 : {col2}")
1,557
Python
24.129032
63
0.552344
ft-lab/omniverse_sample_scripts/Math/DecomposeTransform.py
from pxr import Usd, UsdGeom, UsdSkel, UsdPhysics, UsdShade, Sdf, Gf, Tf translate = Gf.Vec3f(10.5, 2.8, 6.0) rotation = Gf.Quatf(0.7071, 0.7071, 0, 0) # Gf.Rotation(Gf.Vec3d(1, 0, 0), 90) scale = Gf.Vec3f(2.0, 0.5, 1.0) print(f"translate : {translate}") print(f"rotation : {rotation}") print(f"scale : {scale}") # Make transform. transM = UsdSkel.MakeTransform(translate, rotation, Gf.Vec3h(scale)) print(f"transform : {transM}") # Decompose transform. translate2, rotation2, scale2 = UsdSkel.DecomposeTransform(transM) print(f"==> translate : {translate2}") print(f"==> rotation : {rotation2}") print(f"==> scale : {scale2}")
638
Python
30.949998
80
0.681818
ft-lab/omniverse_sample_scripts/PLATEAU/divide_GeoTiff_images.py
# ---------------------------------------------------------------------. # PLATEAU GeoTIFF images split 10x10 and saved as jpeg. # ---------------------------------------------------------------------. from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.usd import omni.kit.commands import glob import os from PIL import Image # Allows handling of large size images. Image.MAX_IMAGE_PIXELS = 1000000000 # --------------------------------------. # Input Parameters. # --------------------------------------. # Source path (Root path with PLATEAU GeoTIFF). in_plateau_obj_path = "K:\\Modeling\\PLATEAU\\Tokyo_23ku\\13100_tokyo23-ku_2020_ortho_2_op" # Folder to save the split images. in_save_folder_path = "K:\\Modeling\\PLATEAU\\Tokyo_23ku\\13100_tokyo23-ku_2020_ortho_2_op\\divide_images" # --------------------------------------. # Load image and divide (10 x 10). # --------------------------------------. def load_divideImage (filePath : str, savePath : str): fName = os.path.basename(filePath) # Remove extension. fName2 = os.path.splitext(fName)[0] try: srcImage = Image.open(filePath) # Get image size. wid = srcImage.size[0] hei = srcImage.size[1] # 10x10 division. wid_10 = (float)(wid) / 10.0 hei_10 = (float)(hei) / 10.0 index = 0 fy1 = hei_10 * 9.0 for y in range(10): fx1 = 0.0 for x in range(10): img = srcImage.crop(((int)(fx1 + 0.5), (int)(fy1 + 0.5), (int)(fx1 + wid_10 + 0.5), (int)(fy1 + hei_10 + 0.5))) # Save file name ('533925' + '02' ==> '53392502.jpg'). dstName = fName2 + str(index).zfill(2) + ".jpg" dstPath = savePath + "/" + dstName img.save(dstPath) index += 1 fx1 += wid_10 fy1 -= hei_10 except Exception as e: pass # --------------------------------------. # Divide GeoTiff images. # --------------------------------------. def divide_geoTiff (savePath : str): if os.path.exists(in_plateau_obj_path) == False: return # Create a save folder. if os.path.exists(in_save_folder_path) == False: os.makedirs(in_save_folder_path) # Divide and save images. for path in glob.glob(in_plateau_obj_path + "/images/*.tif"): load_divideImage(path, savePath) fName = os.path.basename(path) print("Divide [" + fName + "]") # --------------------------------------. # --------------------------------------. divide_geoTiff(in_save_folder_path) print("Save success !!")
2,648
Python
29.102272
127
0.481495
ft-lab/omniverse_sample_scripts/PLATEAU/calcDistanceWithLatLong.py
# ------------------------------------------------------------------. # 2点の緯度経度を指定したときの距離を計算. # 参考 : https://vldb.gsi.go.jp/sokuchi/surveycalc/surveycalc/bl2stf.html # ------------------------------------------------------------------. import math # --------------------------------------. # Input Parameters. # --------------------------------------. # Latitude and longitude of the starting point. in_lat1 = 35.680908 in_longi1 = 139.767348 # Latitude and longitude of the end point. in_lat2 = 35.666436 in_longi2 = 139.758191 # -----------------------------------------. # 前処理. # -----------------------------------------. # 赤道半径 (km). R = 6378.137 # 極半径 (km). R2 = 6356.752 # 扁平率 (ref : https://ja.wikipedia.org/wiki/%E5%9C%B0%E7%90%83). # 「f = 1.0 - (R2 / R)」の計算になる. # 「f = 1.0 / 298.257222101」のほうがより正確. f = 1.0 / 298.257222101 # 度数をラジアンに変換. lat1R = in_lat1 * math.pi / 180.0 longi1R = in_longi1 * math.pi / 180.0 lat2R = in_lat2 * math.pi / 180.0 longi2R = in_longi2 * math.pi / 180.0 l = longi2R - longi1R l2 = l if l > math.pi: l2 = l - math.pi * 2.0 elif l < -math.pi: l2 = l + math.pi * 2.0 L = abs(l2) L2 = math.pi - L delta = 0.0 if l2 >= 0.0: depta = lat2R - lat1R else: depta = lat1R - lat2R sigma = lat1R + lat2R if l2 >= 0.0: u1 = math.atan((1.0 - f) * math.tan(lat1R)) else: u1 = math.atan((1.0 - f) * math.tan(lat2R)) if l2 >= 0.0: u2 = math.atan((1.0 - f) * math.tan(lat2R)) else: u2 = math.atan((1.0 - f) * math.tan(lat1R)) sigma2 = u1 + u2 delta2 = u2 - u1 xi = math.cos(sigma2 / 2.0) xi2 = math.sin(sigma2 / 2.0) eta = math.sin(delta2 / 2.0) eta2 = math.cos(delta2 / 2.0) x = math.sin(u1) * math.sin(u2) y = math.cos(u1) * math.cos(u2) c = y * math.cos(L) + x ep = f * (2.0 - f) / math.pow(1.0 - f, 2.0) distanceV = 0.0 # 最終的な距離が返る(km). # -----------------------------------------. # ゾーンの判断、θの反復計算. # -----------------------------------------. t0 = 0.0 if c >= 0.0: # Zone(1). t0 = L * (1.0 + f * y) elif c < 0.0 and c >= -math.cos((3.0 * math.pi / 180.0) * math.cos(u1)): # Zone(2). t0 = L2 else: # Zone(3). rr = 1.0 - (1.0/4.0) * f * (1.0 + f) * math.pow(math.sin(u1), 2.0) rr += (3.0/16.0) * f * f * math.pow(math.sin(u1), 4.0) rr = f * math.pi * math.pow(math.cos(u1), 2.0) * rr d1 = L2 * math.cos(u1) - rr d2 = abs(sigma2) + rr q = L2 / (f * math.pi) f1 = (1.0/4.0) * f * (1.0 + 0.5 * f) gam0 = q + f1 * q - f1 * math.pow(q, 3.0) if sigma != 0.0: A0 = math.atan(d1 / d2) B0 = math.asin(rr / math.sqrt(d1 * d1 + d2 * d2)) v = A0 + B0 j = gam0 / math.cos(u1) k = (1.0 + f1) * abs(sigma2) * (1.0 - f * y) / (f * math.pi * y) j1 = j / (1.0 + k * (1.0 / math.cos(v))) v2 = math.asin(j1) v3 = math.asin((math.cos(u1) / math.cos(u2)) * j1) t0 = math.tan((v2 + v3) / 2.0) * math.sin(abs(sigma2) / 2.0) t0 /= math.cos(delta2 / 2.0) t0 = 2.0 * math.atan(t0) else: if d1 > 0.0: t0 = L2 elif d1 == 0.0: gam2 = math.pow(math.sin(u1), 2.0) n0 = math.sqrt(1.0 + ep * gam2) + 1.0 n0 = (ep * gam2) / math.pow(n0, 2.0) A = (1.0 + n0) * (1.0 + (5.0/4.0) * n0 * n0) distanceV = (1.0 - f) * R * A * math.pi else: gV = gam0 gam2 = 0.0 while True: gam2 = 1.0 - gV * gV D = (1.0/4.0) * f * (1.0 + f) - (3.0/16.0) * f * f * gam2 gV2 = q / (1.0 - D * gam2) if abs(gV2 - gV) < (1e-15): break m = 1.0 - q * (1.0 / math.cos(u1)) n = (D * gam2) / (1.0 - D * gam2) w = m - n + m * n n0 = math.sqrt(1.0 + ep * gam2) + 1.0 n0 = (ep * gam2) / math.pow(n0, 2.0) A = (1.0 + n0) * (1.0 + (5.0/4.0) * n0 * n0) distanceV = (1.0 - f) * R * A * math.pi if distanceV == 0.0: tV = t0 while True: if c >= 0.0: g = math.pow(eta, 2.0) * math.pow(math.cos(tV / 2.0), 2.0) g += math.pow(xi, 2.0) * math.pow(math.sin(tV / 2.0), 2.0) g = math.sqrt(g) h = math.pow(eta2, 2.0) * math.pow(math.cos(tV / 2.0), 2.0) h += math.pow(xi2, 2.0) * math.pow(math.sin(tV / 2.0), 2.0) h = math.sqrt(h) else: g = math.pow(eta, 2.0) * math.pow(math.sin(tV / 2.0), 2.0) g += math.pow(xi, 2.0) * math.pow(math.cos(tV / 2.0), 2.0) g = math.sqrt(g) h = math.pow(eta2, 2.0) * math.pow(math.sin(tV / 2.0), 2.0) h += math.pow(xi2, 2.0) * math.pow(math.cos(tV / 2.0), 2.0) h = math.sqrt(h) sig = 2.0 * math.atan(g / h) J = 2.0 * g * h K = h * h - g * g gam = y * math.sin(tV) / J gam2 = 1.0 - gam * gam v = gam2 * K - 2.0 * x v2 = v + x D = (1.0 / 4.0) * f * (1.0 + f) - (3.0 / 16.0) * f * f * gam2 E = (1.0 - D * gam2) * f * gam * (sig + D * J * (v + D * K * (2.0 * v * v - gam2 * gam2))) if c >= 0.0: F = tV - L - E else: F = tV - L2 + E G = f * gam * gam * (1.0 - 2.0 * D * gam2) G += f * v2 * (sig / J) * (1.0 - D * gam2 + 0.5 * f * gam * gam) G += (1.0 / 4.0) * f * f * v * v2 tV = tV - F / (1.0 - G) # -----------------------------------------. # 測地線長の計算. # -----------------------------------------. if abs(F) < (1e-15): n0 = math.sqrt(1.0 + ep * gam2) + 1.0 n0 = (ep * gam2) / math.pow(n0, 2.0) A = (1.0 + n0) * (1.0 + (5.0/4.0) * n0 * n0) B = ep * (1.0 - 3.0 * n0 * n0 / 8.0) B /= math.pow(math.sqrt(1.0 + ep * gam2) + 1.0, 2.0) s1 = (1.0/6.0) * B * v * (1.0 - 4.0 * K * K) * (3.0 * gam2 * gam2 - 4.0 * v * v) s2 = K * (gam2 * gam2 - 2.0 * v * v) - s1 s3 = sig - B * J * (v - (1.0/4.0) * B * s2) distanceV = (1.0 - f) * R * A * s3 break print("Distance : " + str(distanceV * 1000.0) + " m ( " + str(distanceV) + " km )")
6,201
Python
28.393365
98
0.393646
ft-lab/omniverse_sample_scripts/PLATEAU/import_PLATEAU_tokyo23ku_obj.py
# ---------------------------------------------------------------------. # Import PLATEAU obj for Tokyo23-ku in LOD1. # Specify the path where the local "13100_tokyo23-ku_2020_obj_3_op.zip" was extracted in in_plateau_obj_path. # # It also assigns textures created from GeoTIFF to dem. # Please use "divide_GeoTiff_images.py" to convert GeoTIFF into jpeg images by dividing them into 10x10 segments in advance. # ---------------------------------------------------------------------. from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.usd import omni.client import glob import carb import os import asyncio import omni.kit.asset_converter # Get stage. stage = omni.usd.get_context().get_stage() # Get default prim. defaultPrim = stage.GetDefaultPrim() defaultPrimPath = defaultPrim.GetPath().pathString if defaultPrimPath == "": defaultPrimPath = "/World" # --------------------------------------. # Input Parameters. # --------------------------------------. # Source path (Root path with PLATEAU obj). in_plateau_obj_path = "K:\\Modeling\\PLATEAU\\Tokyo_23ku\\13100_tokyo23-ku_2020_obj_3_op" # dem textures path. # See : divide_GeoTiff_images.py in_dem_textures_path = "K:\\Modeling\\PLATEAU\\Tokyo_23ku\\13100_tokyo23-ku_2020_ortho_2_op\\divide_images" # output folder. # If specified, all usd and texture files are output to the specified folder. in_output_folder = "omniverse://localhost/PLATEAU/Tokyo_23ku" # Convert obj to USD (Skipped if already converted to USD). in_convert_to_usd = True # Folder to store output USD. # If not specified, in_plateau_obj_path + "\\output_usd" in_output_usd_folder = "" # Load LOD2. in_load_lod2 = False # Load LOD1 & LOD2. in_load_lod1_lod2 = False # Assign texture to dem. in_assign_dem_texture = True # Load bridge. in_load_bridge = False # Load tran. in_load_tran = False # Load map area. mapIndexList = [533925, 533926, 533934, 533935, 533936, 533937, 533944, 533945, 533946, 533947, 533954, 533955, 533956, 533957] # --------------------------------------. # Path of PLATEAU data. # --------------------------------------. # topographic. dem_path = in_plateau_obj_path + "/dem" # building. buliding_lod1_path = in_plateau_obj_path + "/bldg/lod1" buliding_lod2_path = in_plateau_obj_path + "/bldg/lod2" # bridge. bridge_path = in_plateau_obj_path + "/brid" # tran. tran_path = in_plateau_obj_path + "/tran" # ----------------------------------------------------. # Pass the process to Omniverse. # ----------------------------------------------------. async def _omniverse_sync_wait(): await omni.kit.app.get_app().next_update_async() # --------------------------------------. # Exist path (file/folder). # Support on Nucleus. # --------------------------------------. async def ocl_existPath_async (path : str): (result, entry) = await omni.client.stat_async(path) if result == omni.client.Result.ERROR_NOT_FOUND: return False return True # ----------------------------------------------------. # Convert file name to a string that can be used in USD Prim name. # @param[in] fName file name. # @return USD Prim name. # ----------------------------------------------------. def convFileNameToUSDPrimName (fName : str): # Remove extension. fName2 = os.path.splitext(fName)[0] retName = "" for i in range(len(fName2)): c = fName2[i] if retName == "": if (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or c == '_': pass else: retName += '_' if (c >= '0' and c <= '9') or (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or c == '_': retName += c elif c == ' ': retName += '_' else: retName += str(c.encode('utf-8').hex()) return retName # --------------------------------------. # Set rotate. # @param[in] prim target prim. # @param[in] (rx, ry, rz) Rotate (angle). # --------------------------------------. def setRotate (prim : Usd.Prim, rx : float, ry : float, rz : float): if prim.IsValid(): tV = prim.GetAttribute("xformOp:rotateXYZ") if tV.IsValid(): prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Float3, False).Set(Gf.Vec3f(rx, ry, rz)) tV = prim.GetAttribute("xformOp:orient") if tV.IsValid(): rotX = Gf.Rotation(Gf.Vec3d(1, 0, 0), rx) rotY = Gf.Rotation(Gf.Vec3d(0, 1, 0), ry) rotZ = Gf.Rotation(Gf.Vec3d(0, 0, 1), rz) rotXYZ = rotZ * rotY * rotX if type(tV.Get()) == Gf.Quatd: tV.Set(rotXYZ.GetQuat()) elif type(tV.Get()) == Gf.Quatf: tV.Set(Gf.Quatf(rotXYZ.GetQuat())) # --------------------------------------. # Set scale. # @param[in] prim target prim. # @param[in] (sx, sy, sz) Scale. # --------------------------------------. def setScale (prim : Usd.Prim, sx : float, sy : float, sz : float): if prim.IsValid(): tV = prim.GetAttribute("xformOp:scale") if tV.IsValid(): prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Float3, False).Set(Gf.Vec3f(sx, sy, sz)) # --------------------------------------. # Set translate. # @param[in] prim target prim. # @param[in] (tx, ty, tz) translate. # --------------------------------------. def setTranslate (prim : Usd.Prim, tx : float, ty : float, tz : float): if prim.IsValid(): tV = prim.GetAttribute("xformOp:translate") if tV.IsValid(): prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Float3, False).Set(Gf.Vec3f(tx, ty, tz)) # --------------------------------------. # Create new Material (OmniPBR). # @param[in] materialPrimPath Prim path of Material # @param[in] targetPrimPath Prim path to bind Material. # @param[in] textureFilePath File path of Diffuse texture. # @param[in] diffuseColor Diffuse Color. # --------------------------------------. def createMaterialOmniPBR (materialPrimPath : str, targetPrimPath : str = "", textureFilePath : str = "", diffuseColor : Gf.Vec3f = Gf.Vec3f(0.2, 0.2, 0.2)): material = UsdShade.Material.Define(stage, materialPrimPath) shaderPath = materialPrimPath + '/Shader' shader = UsdShade.Shader.Define(stage, shaderPath) shader.SetSourceAsset('OmniPBR.mdl', 'mdl') shader.GetPrim().CreateAttribute('info:mdl:sourceAsset:subIdentifier', Sdf.ValueTypeNames.Token, False, Sdf.VariabilityUniform).Set('OmniPBR') # Set Diffuse color. shader.CreateInput('diffuse_color_constant', Sdf.ValueTypeNames.Color3f).Set((diffuseColor[0], diffuseColor[1], diffuseColor[2])) # Set Metallic. shader.CreateInput('metallic_constant', Sdf.ValueTypeNames.Float).Set(0.0) # Set Roughness. shader.CreateInput('reflection_roughness_constant', Sdf.ValueTypeNames.Float).Set(0.8) # Set Specular. shader.CreateInput('specular_level', Sdf.ValueTypeNames.Float).Set(0.0) # Set texture. if textureFilePath != "": diffTexIn = shader.CreateInput('diffuse_texture', Sdf.ValueTypeNames.Asset) diffTexIn.Set(textureFilePath) diffTexIn.GetAttr().SetColorSpace('sRGB') # Connecting Material to Shader. mdlOutput = material.CreateSurfaceOutput('mdl') mdlOutput.ConnectToSource(shader, 'out') # Bind material. if targetPrimPath != "": tPrim = stage.GetPrimAtPath(targetPrimPath) if tPrim.IsValid(): UsdShade.MaterialBindingAPI(tPrim).Bind(material) return materialPrimPath # --------------------------------------. # Create Xform (e.g. map_533946). # --------------------------------------. def createXfrom_mapIndex (mapIndex : int, materialPath : str): mapPrimPath = defaultPrimPath + "/map_" + str(mapIndex) prim = stage.GetPrimAtPath(mapPrimPath) if prim.IsValid() == False: UsdGeom.Xform.Define(stage, mapPrimPath) prim = stage.GetPrimAtPath(mapPrimPath) # Bind material. if materialPath != "": matPrim = stage.GetPrimAtPath(materialPath) if matPrim.IsValid(): material = UsdShade.Material(matPrim) UsdShade.MaterialBindingAPI(prim).Bind(material) return mapPrimPath # --------------------------------------. # load dem. # @param[in] _mapIndex map index. # @param[in] _materialPath material prim path. # --------------------------------------. async def loadDem (_mapIndex : int, _materialPath : str): if (await ocl_existPath_async(dem_path)) == False: return mapPrimPath = createXfrom_mapIndex(_mapIndex, _materialPath) demPrimPath = mapPrimPath + "/dem" UsdGeom.Xform.Define(stage, demPrimPath) # Scope specifying the Material. materialPrimPath = "" if in_assign_dem_texture: materialPrimPath = defaultPrimPath + "/Looks/map_" + str(_mapIndex) prim = stage.GetPrimAtPath(materialPrimPath) if prim.IsValid() == False: UsdGeom.Scope.Define(stage, materialPrimPath) # Must be pre-converted if using USD. src_dem_path = "" if in_convert_to_usd: path = in_output_usd_folder if path == "": path = in_plateau_obj_path + "/output_usd" if (await ocl_existPath_async(path)): path += "/dem/" + str(_mapIndex) + "*" src_dem_path = path + "/" + str(_mapIndex) + "*.usd" if src_dem_path == "": src_dem_path = dem_path + "/" + str(_mapIndex) + "*.obj" for path in glob.glob(src_dem_path, recursive=True): fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) # When usd file is output on Nucleus, check the corresponding file. if in_output_folder != "": fName2 = str(mapIndex) + "_dem.usd" newPath = in_output_folder + "/data" newPath += "/dem/" + str(mapIndex) + "/" + fName2 if (await ocl_existPath_async(newPath)): path = newPath # Convert Prim name. primName = convFileNameToUSDPrimName(fName) # Create Xform. newPath = demPrimPath + "/" + primName UsdGeom.Xform.Define(stage, newPath) prim = stage.GetPrimAtPath(newPath) # Remove references. prim.GetReferences().ClearReferences() # Add a reference. prim.GetReferences().AddReference(path) setRotate(prim, -90.0, 0.0, 0.0) setScale(prim, 100.0, 100.0, 100.0) # Assign texture. if in_assign_dem_texture and mapIndex > 0: mapFilePath = in_dem_textures_path + "/" + str(mapIndex) + ".jpg" if in_output_folder != "": mapFilePath2 = in_output_folder + "/data/geotiff_images" mapFilePath2 += "/" + str(mapIndex) + ".jpg" if (await ocl_existPath_async(mapFilePath2)): mapFilePath = mapFilePath2 if (await ocl_existPath_async(mapFilePath)): # Create material. materialName = "mat_dem_" + str(mapIndex) matPath = materialPrimPath + "/" + materialName createMaterialOmniPBR(matPath, newPath, mapFilePath) # Pass the process to Omniverse. asyncio.ensure_future(_omniverse_sync_wait()) # --------------------------------------. # load building. # @param[in] _mapIndex map index. # @param[in] _useLOD2 If LOD2 is available, use LOD2. # @param[in] _materialPath material prim path. # --------------------------------------. async def loadBuilding (_mapIndex : int, _useLOD2 : bool, _materialPath : str): if (await ocl_existPath_async(buliding_lod1_path)) == False: return mapPrimPath = createXfrom_mapIndex(_mapIndex, _materialPath) buildingPath = mapPrimPath + "/building" if _useLOD2: buildingPath += "_lod2" else: buildingPath += "_lod1" UsdGeom.Xform.Define(stage, buildingPath) # Must be pre-converted if using USD. src_bldg_path = "" if in_convert_to_usd: path = in_output_usd_folder if path == "": path = in_plateau_obj_path + "/output_usd" if (await ocl_existPath_async(path)): path += "/building/lod2/" + str(_mapIndex) + "*" src_bldg_path = path + "/" + str(_mapIndex) + "*.usd" if src_bldg_path == "": src_bldg_path = buliding_lod2_path + "/**/" + str(_mapIndex) + "*.obj" # If LOD2 exists. useLOD2Dict = dict() if _useLOD2 and (await ocl_existPath_async(buliding_lod2_path)): # Search subdirectories. for path in glob.glob(src_bldg_path, recursive=True): fName = os.path.basename(path) # e.g. 53392641_bldg_6677.obj p1 = fName.find('_') if p1 > 0: s = fName[0:p1] # When usd file is output on Nucleus, check the corresponding file. if in_output_folder != "": mIndex = int(s) fName2 = str(mIndex) + "_bldg.usd" newPath = in_output_folder + "/data" newPath += "/building/lod2/" + str(mIndex) + "/" + fName2 if (await ocl_existPath_async(newPath)): path = newPath useLOD2Dict[int(s)] = path # Must be pre-converted if using USD. src_bldg_path = "" if in_convert_to_usd: path = in_output_usd_folder if path == "": path = in_plateau_obj_path + "/output_usd" if (await ocl_existPath_async(path)): path += "/building/lod1/" + str(_mapIndex) + "*" src_bldg_path = path + "/" + str(_mapIndex) + "*.usd" if src_bldg_path == "": src_bldg_path = buliding_lod1_path + "/**/" + str(_mapIndex) + "*.obj" # Search subdirectories. for path in glob.glob(src_bldg_path, recursive=True): fName = os.path.basename(path) chkF = False p1 = fName.find('_') if p1 > 0: s = fName[0:p1] mIndex = int(s) # When usd file is output on Nucleus, check the corresponding file. if (not in_load_lod1_lod2) or (in_load_lod1_lod2 and not _useLOD2): if in_output_folder != "": fName2 = str(mIndex) + "_bldg.usd" newPath = in_output_folder + "/data" newPath += "/building/lod1/" + str(mIndex) + "/" + fName2 if (await ocl_existPath_async(newPath)): path = newPath chkF = True # Refer to LOD2 path. if mIndex in useLOD2Dict: path = useLOD2Dict[mIndex] fName = os.path.basename(path) chkF = True if not chkF: continue # Conv Prim name. primName = convFileNameToUSDPrimName(fName) # Create Xform. newPath = buildingPath + "/" + primName UsdGeom.Xform.Define(stage, newPath) prim = stage.GetPrimAtPath(newPath) # Remove references. prim.GetReferences().ClearReferences() # Add a reference. prim.GetReferences().AddReference(path) setRotate(prim, -90.0, 0.0, 0.0) setScale(prim, 100.0, 100.0, 100.0) # Pass the process to Omniverse. asyncio.ensure_future(_omniverse_sync_wait()) # --------------------------------------. # load bridge. # @param[in] _mapIndex map index. # @param[in] _materialPath material prim path. # --------------------------------------. async def loadBridge (_mapIndex : int, _materialPath : str): if (await ocl_existPath_async(bridge_path)) == False: return mapPrimPath = createXfrom_mapIndex(_mapIndex, _materialPath) bridgePath = mapPrimPath + "/bridge" UsdGeom.Xform.Define(stage, bridgePath) # Must be pre-converted if using USD. src_brid_path = "" if in_convert_to_usd: path = in_output_usd_folder if path == "": path = in_plateau_obj_path + "/output_usd" if (await ocl_existPath_async(path)): path += "/bridge/" + str(_mapIndex) + "*" src_brid_path = path + "/" + str(_mapIndex) + "*.usd" if src_brid_path == "": src_brid_path = bridge_path + "/**/" + str(_mapIndex) + "*.obj" # Search subdirectories. for path in glob.glob(src_brid_path, recursive=True): fName = os.path.basename(path) mIndex = 0 p1 = fName.find('_') if p1 > 0: s = fName[0:p1] mIndex = int(s) if mIndex == 0: continue # Conv Prim name. primName = convFileNameToUSDPrimName(fName) # When usd file is output on Nucleus, check the corresponding file. if in_output_folder != "": fName2 = str(mIndex) + "_brid.usd" newPath = in_output_folder + "/data" newPath += "/bridge/" + str(mIndex) + "/" + fName2 if (await ocl_existPath_async(newPath)): path = newPath # Create Xform. newPath = bridgePath + "/" + primName UsdGeom.Xform.Define(stage, newPath) prim = stage.GetPrimAtPath(newPath) # Remove references. prim.GetReferences().ClearReferences() # Add a reference. prim.GetReferences().AddReference(path) setRotate(prim, -90.0, 0.0, 0.0) setScale(prim, 100.0, 100.0, 100.0) # Pass the process to Omniverse. asyncio.ensure_future(_omniverse_sync_wait()) # --------------------------------------. # load tran. # @param[in] _mapIndex map index. # @param[in] _materialPath material prim path. # --------------------------------------. async def loadTran (_mapIndex : int, _materialPath : str): if (await ocl_existPath_async(tran_path)) == False: return mapPrimPath = createXfrom_mapIndex(_mapIndex, _materialPath) tranPath = mapPrimPath + "/tran" UsdGeom.Xform.Define(stage, tranPath) # Must be pre-converted if using USD. src_tran_path = "" if in_convert_to_usd: path = in_output_usd_folder if path == "": path = in_plateau_obj_path + "/output_usd" if (await ocl_existPath_async(path)): path += "/tran/" + str(_mapIndex) + "*" src_tran_path = path + "/" + str(_mapIndex) + "*.usd" if src_tran_path == "": src_tran_path = tran_path + "/**/" + str(_mapIndex) + "*.obj" # Search subdirectories. for path in glob.glob(src_tran_path, recursive=True): fName = os.path.basename(path) mIndex = 0 p1 = fName.find('_') if p1 > 0: s = fName[0:p1] mIndex = int(s) if mIndex == 0: continue # Conv Prim name. primName = convFileNameToUSDPrimName(fName) # When usd file is output on Nucleus, check the corresponding file. if in_output_folder != "": fName2 = str(mIndex) + "_tran.usd" newPath = in_output_folder + "/data" newPath += "/tran/" + str(mIndex) + "/" + fName2 if (await ocl_existPath_async(newPath)): path = newPath # Create Xform. newPath = tranPath + "/" + primName UsdGeom.Xform.Define(stage, newPath) prim = stage.GetPrimAtPath(newPath) # Remove references. prim.GetReferences().ClearReferences() # Add a reference. prim.GetReferences().AddReference(path) setRotate(prim, -90.0, 0.0, 0.0) setScale(prim, 100.0, 100.0, 100.0) heightPos = 5.0 setTranslate(prim, 0.0, heightPos, 0.0) # Create/Set material. matPath = "/World/Looks/mat_trans" primM = stage.GetPrimAtPath(matPath) if not primM.IsValid(): col = Gf.Vec3f(0, 1, 0) createMaterialOmniPBR(matPath, "", "", col) primM = stage.GetPrimAtPath(matPath) material = UsdShade.Material(primM) UsdShade.MaterialBindingAPI(prim).Bind(material) # Pass the process to Omniverse. asyncio.ensure_future(_omniverse_sync_wait()) # --------------------------------------. # Convert obj files to USD. # --------------------------------------. # Get target path for converting dem obj to usd. async def get_ObjToUsdDem (_mapIndex : int, _dstPath : str): if (await ocl_existPath_async(dem_path)) == False: return dstPath = _dstPath + "/dem" if (await ocl_existPath_async(dstPath)) == False: result = omni.client.create_folder(dstPath) if result != omni.client.Result.OK: return srcObjPathList = [] dstUsdPathList = [] for path in glob.glob(dem_path + "/" + str(_mapIndex) + "*.obj"): fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) dstPath2 = dstPath + "/" + str(mapIndex) if (await ocl_existPath_async(dstPath2)) == False: omni.client.create_folder(dstPath2) usdPath = dstPath2 + "/" + str(mapIndex) + "_dem.usd" if (await ocl_existPath_async(usdPath)): continue srcObjPathList.append(path) dstUsdPathList.append(usdPath) return srcObjPathList, dstUsdPathList # Get target path for converting bldg obj to usd. async def get_ObjToUsdBuilding (_mapIndex : int, _dstPath : str): srcObjPathList = [] dstUsdPathList = [] if (await ocl_existPath_async(buliding_lod1_path)): dstPath = _dstPath + "/building/lod1" for path in glob.glob(buliding_lod1_path + "/**/" + str(_mapIndex) + "*.obj", recursive=True): if (await ocl_existPath_async(dstPath)) == False: omni.client.create_folder(dstPath) fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) dstPath2 = dstPath + "/" + str(mapIndex) if (await ocl_existPath_async(dstPath2)) == False: omni.client.create_folder(dstPath2) usdPath = dstPath2 + "/" + str(mapIndex) + "_bldg.usd" if (await ocl_existPath_async(usdPath)): continue srcObjPathList.append(path) dstUsdPathList.append(usdPath) if (await ocl_existPath_async(buliding_lod2_path)) and in_load_lod2: dstPath = _dstPath + "/building/lod2" for path in glob.glob(buliding_lod2_path + "/**/" + str(_mapIndex) + "*.obj", recursive=True): if (await ocl_existPath_async(dstPath)) == False: omni.client.create_folder(dstPath) fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) dstPath2 = dstPath + "/" + str(mapIndex) if (await ocl_existPath_async(dstPath2)) == False: omni.client.create_folder(dstPath2) usdPath = dstPath2 + "/" + str(mapIndex) + "_bldg.usd" if (await ocl_existPath_async(usdPath)): continue srcObjPathList.append(path) dstUsdPathList.append(usdPath) return srcObjPathList, dstUsdPathList # Get target path for converting bridge obj to usd. async def get_ObjToUsdBridge (_mapIndex : int, _dstPath : str): srcObjPathList = [] dstUsdPathList = [] if (await ocl_existPath_async(bridge_path)): dstPath = _dstPath + "/bridge" for path in glob.glob(bridge_path + "/**/" + str(_mapIndex) + "*.obj", recursive=True): if (await ocl_existPath_async(dstPath)) == False: omni.client.create_folder(dstPath) fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) dstPath2 = dstPath + "/" + str(mapIndex) if (await ocl_existPath_async(dstPath2)) == False: omni.client.create_folder(dstPath2) usdPath = dstPath2 + "/" + str(mapIndex) + "_brid.usd" if (await ocl_existPath_async(usdPath)): continue srcObjPathList.append(path) dstUsdPathList.append(usdPath) return srcObjPathList, dstUsdPathList # Get target path for converting tran obj to usd. async def get_ObjToUsdTran (_mapIndex : int, _dstPath : str): srcObjPathList = [] dstUsdPathList = [] if (await ocl_existPath_async(tran_path)): dstPath = _dstPath + "/tran" if (await ocl_existPath_async(dstPath)) == False: omni.client.create_folder(dstPath) for path in glob.glob(tran_path + "/**/" + str(_mapIndex) + "*.obj", recursive=True): fName = os.path.basename(path) # Get map index from file name. mapIndex = 0 p1 = fName.find('_') if p1 > 0: mapIndex = int(fName[0:p1]) dstPath2 = dstPath + "/" + str(mapIndex) if (await ocl_existPath_async(dstPath2)) == False: omni.client.create_folder(dstPath2) usdPath = dstPath2 + "/" + str(mapIndex) + "_tran.usd" if (await ocl_existPath_async(usdPath)): continue srcObjPathList.append(path) dstUsdPathList.append(usdPath) return srcObjPathList, dstUsdPathList # Convert asset file(obj/fbx/glTF, etc) to usd. async def convert_asset_to_usd (input_path_list, output_path_list): # Input options are defaults. converter_context = omni.kit.asset_converter.AssetConverterContext() converter_context.ignore_materials = False converter_context.ignore_camera = False converter_context.ignore_animations = False converter_context.ignore_light = False converter_context.export_preview_surface = False converter_context.use_meter_as_world_unit = False converter_context.create_world_as_default_root_prim = True converter_context.embed_textures = True converter_context.convert_fbx_to_y_up = False converter_context.convert_fbx_to_z_up = False converter_context.merge_all_meshes = False converter_context.use_double_precision_to_usd_transform_op = False converter_context.ignore_pivots = False converter_context.keep_all_materials = True converter_context.smooth_normals = True instance = omni.kit.asset_converter.get_instance() for i in range(len(input_path_list)): input_asset = input_path_list[i] output_usd = output_path_list[i] task = instance.create_converter_task(input_asset, output_usd, None, converter_context) # Wait for completion. success = await task.wait_until_finished() if not success: carb.log_error(task.get_status(), task.get_detailed_error()) break # convert obj(dem/dldg/drid/tran) to usd. async def convertObjToUsd (): if (await ocl_existPath_async(in_plateau_obj_path)) == False: return dstPath = in_output_usd_folder if dstPath == "": dstPath = in_plateau_obj_path + "/output_usd" if in_output_folder != "": dstPath = in_output_folder + "/data" if (await ocl_existPath_async(dstPath)) == False: result = omni.client.create_folder(dstPath) if result != omni.client.Result.OK: return srcObjPathList = [] dstUsdPathList = [] for mapIndex in mapIndexList: ##sList, dList = get_ObjToUsdDem(mapIndex, dstPath) sList, dList = await get_ObjToUsdDem(mapIndex, dstPath) srcObjPathList.extend(sList) dstUsdPathList.extend(dList) for mapIndex in mapIndexList: #sList, dList = get_ObjToUsdBuilding(mapIndex, dstPath) sList, dList = await get_ObjToUsdBuilding(mapIndex, dstPath) srcObjPathList.extend(sList) dstUsdPathList.extend(dList) if in_load_bridge: for mapIndex in mapIndexList: #sList, dList = get_ObjToUsdBridge(mapIndex, dstPath) sList, dList = await get_ObjToUsdBridge(mapIndex, dstPath) srcObjPathList.extend(sList) dstUsdPathList.extend(dList) if in_load_tran: for mapIndex in mapIndexList: #sList, dList = get_ObjToUsdTran(mapIndex, dstPath) sList, dList = await get_ObjToUsdTran(mapIndex, dstPath) srcObjPathList.extend(sList) dstUsdPathList.extend(dList) # Wait for usd conversion. if len(srcObjPathList) > 0: task = asyncio.create_task(convert_asset_to_usd(srcObjPathList, dstUsdPathList)) await task print(f"PLATEAU : convert obj to usd ({ len(srcObjPathList) })") asyncio.ensure_future(_omniverse_sync_wait()) # Copy geoTiff images. async def copyGEOTiffImages (srcPath : str, _mapIndex : int): if in_output_folder == "": return if (await ocl_existPath_async(srcPath)) == False: return dstPath = in_output_folder + "/data/geotiff_images" if (await ocl_existPath_async(dstPath)) == False: result = omni.client.create_folder(dstPath) if result != omni.client.Result.OK: return imgCou = 0 for path in glob.glob(srcPath + "/" + str(_mapIndex) + "*.*"): fName = os.path.basename(path) dPath = dstPath + "/" + fName if (await ocl_existPath_async(dPath)): continue try: # TODO : Warning ? result = omni.client.copy(path, dPath) if result == omni.client.Result.OK: imgCou += 1 except: pass if imgCou > 0: print(f"PLATEAU : copy GEOTiff images ({ imgCou })") # --------------------------------------. # load PLATEAU data. # --------------------------------------. async def load_PLATEAU (): if (await ocl_existPath_async(in_plateau_obj_path)) == False: return print("PLATEAU : Start processing.") # Convert obj to usd. if in_convert_to_usd: task = asyncio.create_task(convertObjToUsd()) await task # Copy GEOTiff images. if in_dem_textures_path != "": for mapIndex in mapIndexList: await copyGEOTiffImages(in_dem_textures_path, mapIndex) # Create OmniPBR material. materialLooksPath = defaultPrimPath + "/Looks" prim = stage.GetPrimAtPath(materialLooksPath) if prim.IsValid() == False: UsdGeom.Scope.Define(stage, materialLooksPath) defaultMaterialPath = createMaterialOmniPBR(materialLooksPath + "/defaultMaterial") for mapIndex in mapIndexList: task_dem = asyncio.create_task(loadDem(mapIndex, defaultMaterialPath)) await task_dem if not in_load_lod1_lod2: task_building = asyncio.create_task(loadBuilding(mapIndex, in_load_lod2, defaultMaterialPath)) await task_building else: task_building_lod1 = asyncio.create_task(loadBuilding(mapIndex, False, defaultMaterialPath)) await task_building_lod1 task_building_lod2 = asyncio.create_task(loadBuilding(mapIndex, True, defaultMaterialPath)) await task_building_lod2 if in_load_bridge and in_load_lod2: task_bridge = asyncio.create_task(loadBridge(mapIndex, defaultMaterialPath)) await task_bridge if in_load_tran: task_tran = asyncio.create_task(loadTran(mapIndex, defaultMaterialPath)) await task_tran print(f"PLATEAU : map_index[{mapIndex}]") print("PLATEAU : Processing is complete.") # --------------------------------------. # --------------------------------------. asyncio.ensure_future(load_PLATEAU())
32,163
Python
33.734341
157
0.567049
ft-lab/omniverse_sample_scripts/PLATEAU/readme.md
# PLATEAU Project PLATEAU ( https://www.mlit.go.jp/plateau/ )の都市データをOmniverseにインポートします。 G空間情報センターの「3D都市モデルポータルサイト」( https://www.geospatial.jp/ckan/dataset/plateau )の「東京都23区」より、 OBJ形式のデータを使用しています。 また、地形のテクスチャについてはGeoTIFFを分割して使用しました。 2022/06/26 : objからusdに変換して読み込むようにしました。 2022/07/11 : 変換されたusdやテクスチャをNucleus上にアップロードするようにしました(デフォルトの指定)。 ## 使い方 ### 「東京都23区」のobjファイル一式をダウンロード 「3D都市モデルポータルサイト」より、「東京都23区」のobjファイル一式をダウンロードします。 https://www.geospatial.jp/ckan/dataset/plateau-tokyo23ku/resource/9c8d65f1-a424-4189-92c0-d9e3f7c3d2db 「13100_tokyo23-ku_2020_obj_3_op.zip」がダウンロードされますので解凍します。 注意 : 配置パスに日本語名のフォルダがある場合は正しく動作しません。 ### 「東京都23区」のGeoTIFFファイル一式をダウンロード また、地形のテクスチャで「東京都23区」の「GeoTIFF」のオルソ画像データを用います。 これは航空写真を平行投影として地域メッシュの2次メッシュ(533926、533935など)ごとにテクスチャ化したものです。 以下より、GeoTIFFの画像をダウンロードします。 https://www.geospatial.jp/ckan/dataset/plateau-tokyo23ku/resource/2434d5b4-7dad-4286-8da5-276f68a23797 「13100_tokyo23-ku_2020_ortho_2_op.zip」がダウンロードされますので解凍します。 ### GeoTIFF画像を10x10分割してjpeg形式に変換 Omniverseでは、tiff画像を扱うことができません。 そのためjpegに変換するようにしました。 また、8K解像度以上のテクスチャは読み込みに失敗するようです。 そのため、このtiffを10x10分割しそれぞれをjpegに変換します。 この処理はOmniverse上で行うことにしました。 Omniverse Createを起動します。 Omniverse Create 2022.1.2で確認しました。 [divide_GeoTiff_images.py](./divide_GeoTiff_images.py) のスクリプトの内容を、OmniverseのScript Editorにコピーします。 「in_plateau_obj_path」のパスに、「13100_tokyo23-ku_2020_ortho_2_op.zip」を解凍して展開されたフォルダのルートを指定します。 「in_save_folder_path」にそれぞれのtiff画像を10x10分割したときの画像を格納するフォルダを指定します。 スクリプトを実行します。 この処理は時間がかかります。Consoleに"Save success !!"と出ると出力完了です。 「in_save_folder_path」に指定したフォルダに53392500.jpg/53392501.jpgなどが出力されていることを確認します。 このTiffからjpeg出力を行う処理は1回だけ行えばOKです。 注意 : 配置パスに日本語名のフォルダがある場合は正しく動作しません。 ### 例1 : 東京23区の地形と建物(LOD1)を読み込み ※ テクスチャは反映しません。 Omniverse Createを起動し、新規Stageを作成します。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」の内容をScript Editorにコピーします。 スクリプト上の 「in_plateau_obj_path」のパス指定を、ローカルの「13100_tokyo23-ku_2020_obj_3_op.zip」を解凍したフォルダに変更します。 スクリプト上の 「in_assign_dem_texture」をFalseにします。 これにより、demにマッピングするテクスチャは読み込まれません。 スクリプトを実行します。 この処理は時間がかかります。数分ほど待つと、StageにPLATEAUの都市データが読み込まれます。 以下は背景のEnvironmentを指定し、RTX-Interactive (Path Tracing)にしています。 ![plateau_01_01.jpg](./images/plateau_01_01.jpg) ![plateau_01_02.jpg](./images/plateau_01_02.jpg) このLOD1のみの都市データは、Omniverse Createで約12GBくらいのメモリを消費します。 OSのメモリは32GBあれば足ります。 2022/06/26 すべてのデータをobjからusdに変換して読み込むようにしました。 変換されたusdは、in_plateau_obj_pathの"output_usd"フォルダに格納されます。 ### 例2 : 東京23区の地形と建物(LOD1)を読み込み + 地形のテクスチャを反映 Omniverse Create 2022.1.2で確認しました。 Omniverse Createで新規Stageを作成します。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」の内容をScript Editorにコピーします。 スクリプト上の 「in_plateau_obj_path」のパス指定を、ローカルの「13100_tokyo23-ku_2020_obj_3_op.zip」を解凍したフォルダに変更します。 スクリプト上の 「in_dem_textures_path」のパスは、ローカルのGeoTiffからjpeg変換したときの出力先を指定します。 スクリプト上の 「in_assign_dem_texture」がTrueになっているのを確認します。 これにより、「in_dem_textures_path」で指定したフォルダからテクスチャが読み込まれ、マテリアルとテクスチャが地形のMeshであるdemに割り当てられます。 スクリプトを実行します。 この処理は時間がかかります。数分ほど待つと、StageにPLATEAUの都市データが読み込まれます。 地形にはテクスチャが割り当てられています。 以下は背景のEnvironmentを指定し、RTX-Interactive (Path Tracing)にしています。 ![plateau_02_01.jpg](./images/plateau_02_01.jpg) ![plateau_02_02.jpg](./images/plateau_02_02.jpg) このLOD1のみの都市データは、Omniverse Createで約13GBくらいのメモリを消費します。 OSのメモリは32GBあれば足ります。 ### 例3 : 東京23区の地形と建物(LOD1またはLOD2)を読み込み + 地形のテクスチャを反映 LOD2の建物がある場合はそれを読み込みます。 Omniverse Create 2022.1.2で確認しました。 Omniverse Createで新規Stageを作成します。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」の内容をScript Editorにコピーします。 スクリプト上の 「in_plateau_obj_path」のパス指定を、ローカルの「13100_tokyo23-ku_2020_obj_3_op.zip」を解凍したフォルダに変更します。 スクリプト上の 「in_dem_textures_path」のパスは、ローカルのGeoTiffからjpeg変換したときの出力先を指定します。 スクリプト上の 「in_assign_dem_texture」がTrueになっているのを確認します。 これにより、「in_dem_textures_path」で指定したフォルダからテクスチャが読み込まれ、マテリアルとテクスチャが地形のMeshであるdemに割り当てられます。 スクリプト上の「in_load_lod2」をTrueに変更します。 これにより、もし建物にLOD2の情報を持つ場合はそれが読み込まれます。 ※ LOD2はテクスチャを伴います。これにより、読み込み速度とメモリはかなり消費します。 また、スクリプト上の「mapIndexList」に地域メッシュの2次メッシュの番号を配列で入れています。 デフォルトでは東京23区全体をいれていますが、メモリに合わせて2次メッシュの番号を調整します。 ここでは以下のように変更しました。 ``` mapIndexList = [533945, 533946] ``` スクリプトを実行します。 LOD2を読み込む場合は時間がかなりかかります。 20分ほどで読み込みが完了しました。 「mapIndexList = [533945, 533946]」でOmniverse Createで約11GBくらいのメモリを消費。 続いて ``` mapIndexList = [533935, 533936] ``` として追加でスクリプトを実行し、地形データを読み込みました。 30分ほどで読み込みが完了しました。 追加の「mapIndexList = [533935, 533936]」でOmniverse Createで合計約20GBくらいのメモリを消費。 スクリプトで連続して複数の2次メッシュ分を読み込む場合、読み込み完了後にフリーズする場合がありました(マテリアルの更新でぶつかる?)。 Omniverse CreateのステータスバーでLoading Materialと出てプログレスバーのパーセントが進まない場合がありました。 ![plateau_03_00.jpg](./images/plateau_03_00.jpg) これを回避するため、読み込みが完全に完了するのを待って何回かに分けてスクリプトを複数回実行するようにしています。 以下は背景のEnvironmentを指定し、RTX-Interactive (Path Tracing)にしています。 ![plateau_03_01.jpg](./images/plateau_03_01.jpg) ![plateau_03_02.jpg](./images/plateau_03_02.jpg) LOD2も考慮した[533945, 533946, 533935, 533936]の2次メッシュ範囲のデータは、Omniverse Createで合計で約20GBくらいのメモリを消費します。 OSのメモリは32GBでは読み込めませんでした。64GBくらい余裕を持たせたほうがよさそうです。 ### 例4 : 東京23区の地形と建物(LOD1またはLOD2)、橋(LOD2)を読み込み + 地形のテクスチャを反映 LOD2の建物がある場合はそれを読み込みます。 また、LOD2の橋も読み込みます。 Omniverse Create 2022.1.2で確認しました。 Omniverse Createで新規Stageを作成します。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」の内容をScript Editorにコピーします。 スクリプト上の 「in_plateau_obj_path」のパス指定を、ローカルの「13100_tokyo23-ku_2020_obj_3_op.zip」を解凍したフォルダに変更します。 スクリプト上の 「in_dem_textures_path」のパスは、ローカルのGeoTiffからjpeg変換したときの出力先を指定します。 スクリプト上の 「in_assign_dem_texture」がTrueになっているのを確認します。 これにより、「in_dem_textures_path」で指定したフォルダからテクスチャが読み込まれ、マテリアルとテクスチャが地形のMeshであるdemに割り当てられます。 スクリプト上の「in_load_lod2」をTrueに変更します。 これにより、もし建物にLOD2の情報を持つ場合はそれが読み込まれます。 ※ LOD2はテクスチャを伴います。これにより、読み込み速度とメモリはかなり消費します。 スクリプト上の「in_load_bridge」をTrueに変更します。 これにより、LOD2の橋の3Dモデルも読み込まれます。 また、スクリプト上の「mapIndexList」に2次メッシュの番号を配列で入れています。 デフォルトでは東京23区全体をいれていますが、メモリに合わせて2次メッシュの番号を調整します。 ここでは以下のように変更しました。 ``` mapIndexList = [533935] ``` スクリプトを実行します。 以下は背景のEnvironmentを指定し、RTX-Interactive (Path Tracing)にしています。 ![plateau_04_01.jpg](./images/plateau_04_01.jpg) LOD2で橋を追加するとさらにメモリ消費は増加することになります。 ですが、建物に比べて橋は数が少ないです。 ## スクリプトを使った緯度経度の確認 いくつか緯度経度計算を行う際の確認用スクリプトを作成しました。 ### 緯度経度を指定し、平面直角座標/Omniverse上のXZ位置に変換 地理院地図の「平面直角座標への換算」( https://vldb.gsi.go.jp/sokuchi/surveycalc/surveycalc/bl2xyf.html )をPythonスクリプトに置き換えました。 [calcLatLongToOmniverse.py](./calcLatLongToOmniverse.py) これはスクリプトのみの計算になります。 スクリプトの(in_lat, in_longi)に緯度経度を指定すると平面直角座標での位置を計算、Omniverse(USD)の座標系(Y-Up/右手系/cm単位)に変換します。 Omniverse上の-Z方向が北向きとします。 以下は地理院地図( https://maps.gsi.go.jp/ )での東京タワー前。 ![plateau_calc_lat_longi_01.jpg](./images/plateau_calc_lat_longi_01.jpg) 緯度 : 35.658310 経度 : 139.745243 これをこのスクリプトで計算すると、Omniverse上のXZ位置は以下のように計算できました。 x = -797587.3075871967 (cm) z = 3790513.4729016027 (cm) この位置に赤いマテリアルを与えたSphereを配置すると以下のようになりました。 ![plateau_calc_lat_longi_02.jpg](./images/plateau_calc_lat_longi_02.jpg) ### 2点間の距離を計算(単純な直線距離) [calcDistance.py](./calcDistance.py) 選択された2つの形状の中心位置の距離を単純計算します。 cmとm単位の距離をConsoleに出力します。 ![plateau_calc_dist_01.jpg](./images/plateau_calc_dist_01.jpg) ### 2つの緯度経度を指定して距離を計算 地理院地図の「距離と方位角の計算」( https://vldb.gsi.go.jp/sokuchi/surveycalc/surveycalc/bl2stf.html )をPythonスクリプトに置き換えました。 [calcDistanceWithLatLong.py](./calcDistanceWithLatLong.py) これはスクリプトのみの計算になります。 スクリプトの(in_lat1, in_longi1)に開始位置の緯度経度を指定、(in_lat2, in_longi2)に終了位置の緯度経度を指定します。 この2つの緯度経度の距離をmとkm単位でConsoleに出力します。 ## USDファイル変換について 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」を実行する際、PLATEAUのobjファイルをusdファイルに変換します。 変換されたUSDファイルは「in_plateau_obj_path + "/output_usd"」に格納されます。 東京23区全体でusdと関連テクスチャファイルは全体で4GBほどのファイル容量になりました。 また、objからusdに変換する処理は時間がかかります。 2回目以降、すでにusdファイルが存在する場合はこのusdファイル変換処理はスキップされます。 もし、改めてobjからusd変換する場合は「in_plateau_obj_path + "/output_usd"」ファイルを削除するようにしてください。 Omniverseではobj/fbxファイルを直接Referenceできますが、できるだけusdに変換して扱うほうがよいと思われます。 ## Nucleus上へアップロード (2022/07/11 追加) PLATEAUのデータをデフォルトでNucleus上にアップロードするようにしました。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」の ``` in_output_folder = "omniverse://localhost/PLATEAU/Tokyo_23ku" ``` の指定のURLに、objからusd変換されたときのファイルとGEOTiffを分割した画像を転送します。 なお、初回はobjからusdの変換、GEOTiff画像のNucleusへの転送作業が発生するため時間がかかります。 都市データを参照として読み込んだルートのusdファイルをNucleus上に保存することで、usdやテクスチャの参照は自動的に相対パスに変換されます。 Omniverseでシーンを管理する場合はNucleus上で行うほうがよさそうです。 ## Collect Asset : Nucleusにアップロードするには? ※ 2022/07/11 : デフォルトで、関連するusdとテクスチャファイルをNucleus上にアップロードするようにしました。 そのため、Collect Assetは使わなくても問題ありません。 参考 : https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_collect.html 「Collect Asset」を使用することで、 対象usdファイル内からusdファイルや画像ファイルなどの参照がある場合に相対パスになるように整理して出力します。 これによりローカルの環境依存がある状態でのパスが整理され、Nucleusへのアップロードができるようになります。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」を使用して東京23区の都市データを読み込み後、 現在のStageをusdファイルに保存します。 なお、このとき参照されるファイルもすべてUSDファイルで構成されるようにしておいてください。デフォルトの「in_convert_to_usd = True」の指定でobjはUSDに変換されます。 Contentウィンドウで保存したusdを右クリックしてポップアップメニューを表示。 「Collect Asset」を選択します。 ![plateau_collectAsset_01.png](./images/plateau_collectAsset_01.png) Collection Optionsウィンドウで「Collection Path」に出力先を指定します。 ここでNucleus上のパスを指定しました。 Collectボタンを押すと、指定のパスに整理した状態でusdや参照されているテクスチャなどを出力します。 ![plateau_collectAsset_02.png](./images/plateau_collectAsset_02.png) ### Collect Asset使用時の注意点 Omniverse Create 2022.1.3段階で、以下の点を確認しています。 * 対象のUSDファイルから参照(Reference)するAssetは、usdファイルを指定するようにしてください。 objやfbxを直接参照することもできますが、この場合はCollect Assetでマテリアルファイルやテクスチャファイルが正しく渡せませんでした。 * ~~Material Graphを使用すると、Collect AssetでMDLファイルが正しく渡せませんでした。~~ Omniverse Create 2022.1.3では問題なし。 ---- ## ファイル |ファイル|説明| |---|---| |[divide_GeoTiff_images.py](./divide_GeoTiff_images.py)|東京23区のPLATEAUのGeoTIFFファイルを10x10分割して、jpeg形式で指定のフォルダに出力します。<br>コード内の「in_xxx」の指定を環境に合わせて書き換えるようにしてください。| |[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)|東京23区のPLATEAUのobjファイルより、都市モデルをOmniverseにインポートします。<br>コード内の「in_xxx」の指定を環境に合わせて書き換えるようにしてください。| |[calcDistance.py](./calcDistance.py)|選択された2つの形状の直線距離を計算します。| |[calcDistanceWithLatLong.py](./calcDistanceWithLatLong.py)|2つの緯度経度を指定して距離を計算します。<br>コード内の「in_xxx」の指定を環境に合わせて書き換えるようにしてください。| |[calcLatLongToOmniverse.py](./calcLatLongToOmniverse.py)|緯度経度から平面直角座標上の位置を計算、Omniverse上のXZ位置を計算します。<br>コード内の「in_xxx」の指定を環境に合わせて書き換えるようにしてください。| ## 現状の既知問題点 Omniverse Create 2022.1.3で確認。 ### 大量のusdをReferenceする際にプログレスバーが止まる ※ 開発にレポート済み。 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」を使って都市データを読み込む場合、 Omniverse CreateのステータスバーでLoading Materialと出てプログレスバーのパーセントが進まない場合がありました。 ![plateau_03_00.jpg](./images/plateau_03_00.jpg) これを回避するため、読み込みが完全に完了するのを待って何回かに分けてスクリプトを複数回実行するようにします。 LOD1だけの読み込みの場合は、地域メッシュ全部(14個分)を読み込む場合でも停止することはありませんでした。 LOD2を含む場合、マップを1つまたは2つずつ読み込まないとフリーズします。 ### 作成されたusdファイルを保存する際にSaving layersで進まない ※ 開発にレポート済み。 東京23区全体を読み込んで保存する場合、Saving layersでずっと進まない場合がありました。 数時間待てば処理が完了します。 回避策として、マップを1つだけ読み込んでその段階でいったん保存。 マップを追加読み込んで保存、とすると時間がかからないようでした。 ### 保存したusdファイルを読み込む場合にプログレスバーが止まる ※ 開発にレポート済み。 東京23区全体(LOD2)を読み込んで保存後、いったんOmniverse Createを再起動してusdを読み込みます。 この際、Loading Materialと出てプログレスバーのパーセントが進まない場合がありました。 おそらく1つめの「大量のusdをReferenceする際にプログレスバーが止まる」と同じ現象と思われます。 ### objファイルをReferenceした状態で保存すると、再読み込み時にテクスチャが消える 「[import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)」を使って都市データを読み込む場合に「in_convert_to_usd」をFalseにすると、 PLATEAUのobjを直接Referenceで参照します。 「in_convert_to_usd」をTrueにするとobjからusdに変換してそれを参照します。 東京23区全体(LOD2)を読み込んで保存後usdを閉じ、 再度同じシーンを開いた場合、テクスチャが消えてしまう場合があります。 これはobjで読み込んだ場合のキャッシュ(objの場合、作業ディレクトリにusd変換した際のジオメトリやテクスチャが格納される)によるものと思われます。 また、「Collect Asset」( https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_collect.html )を行ってNucleusにusd一式をアップロードする場合、 objを使っているとマテリアルのmtlやテクスチャを渡してくれないようでした。 そのため、OmniverseではStageはすべてusdを使用するほうがよさそうです。 ## 更新履歴 ### 2022/07/11 [import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py) を更新。 * 「in_output_folder」を指定することで、Nucleus上にusd/テクスチャファイルを送るようにした * ファイル転送時にUIが止まる問題の緩和 (Nucleus上にアップするようにしたため?) ただし、ステージの構築時はLOD2の場合はマテリアル処理で待ちが発生する模様。 * GEOTiffのテクスチャを地形に割り当てる際に、マテリアルのSpecularを0にした(白飛びを緩和) ### 2022/06/26 * [import_PLATEAU_tokyo23ku_obj.py](./import_PLATEAU_tokyo23ku_obj.py)でUSDに変換してインポートするようにした ### 2022/06/10 * 初回バージョン
13,043
Markdown
33.599469
163
0.766848
ft-lab/omniverse_sample_scripts/PLATEAU/calcLatLongToOmniverse.py
# ------------------------------------------------------------------. # 緯度経度を平面直角座標に変換し、Omniverse(USD)のY-Up/cmに変換. # 参考 : https://vldb.gsi.go.jp/sokuchi/surveycalc/surveycalc/bl2xyf.html # # ただし、日本地図上での計算になる点に注意. # ------------------------------------------------------------------. import math # --------------------------------------. # Input Parameters. # --------------------------------------. # Latitude and longitude. in_lat = 35.680908 in_longi = 139.767348 # ---------------------------------------------------------. # 平面直角座標系の原点の緯度と経度を取得. # 参考 : https://www.gsi.go.jp/LAW/heimencho.html # 東京都の場合は9を指定. # ---------------------------------------------------------. def getOriginLatAndLongi (index : int = 9): latV0 = 0.0 longiV0 = 0.0 # I. if index == 1: latV0 = 33.0 longiV0 = 129.5 # II. elif index == 2: latV0 = 33.0 longiV0 = 131.0 # III. elif index == 3: latV0 = 36.0 longiV0 = 131.16666666 # IV. elif index == 4: latV0 = 33.0 longiV0 = 133.5 # V. elif index == 5: latV0 = 36.0 longiV0 = 134.33333333 # VI. elif index == 6: latV0 = 36.0 longiV0 = 136.0 # VII. elif index == 7: latV0 = 36.0 longiV0 = 137.16666666 # VIII. elif index == 8: latV0 = 36.0 longiV0 = 138.5 # IX. // 東京都(デフォルト). elif index == 9: latV0 = 36.0 longiV0 = 139.83333333 # X. elif index == 10: latV0 = 40.0 longiV0 = 140.83333333 # XI. elif index == 11: latV0 = 44.0 longiV0 = 140.25 # XII. elif index == 12: latV0 = 44.0 longiV0 = 142.25 # XIII. elif index == 13: latV0 = 44.0 longiV0 = 144.25 # XIV. elif index == 14: latV0 = 26.0 longiV0 = 142.0 # XV. elif index == 15: latV0 = 26.0 longiV0 = 127.5 # XVI. elif index == 16: latV0 = 26.0 longiV0 = 124.0 # XVII. elif index == 17: latV0 = 26.0 longiV0 = 131.0 # XVIII. elif index == 18: latV0 = 20.0 longiV0 = 136.0 # XIX. elif index == 19: latV0 = 26.0 longiV0 = 154.0 return latV0, longiV0 # ---------------------------------------------. # 緯度経度を平面直角座標に変換. # @param[in] latV 緯度 (10進数の度数指定). # @param[in] longiV 経度 (10進数の度数指定). # @param[in] originIndex 平面直角座標系の原点の番号. # https://www.gsi.go.jp/LAW/heimencho.html # @return x, y (m単位) # ---------------------------------------------. def calcLatLongToHeimenChokaku (latV : float, longiV : float, originIndex : int = 9): # 赤道半径 (km) = 楕円体の長半径. R = 6378.137 # 極半径 (km). R2 = 6356.752 # 逆扁平率. F = 298.257222101 # 平面直角座標系のX軸上における縮尺係数. m0 = 0.9999 # 平面直角座標系の原点の緯度と経度. # https://www.gsi.go.jp/LAW/heimencho.html # 地域によってこれは変わる。東京の場合はIX(9)番目のものを使用. latV0, longiV0 = getOriginLatAndLongi(originIndex) # 度数をラジアンに変換. lat0R = latV0 * math.pi / 180.0 longi0R = longiV0 * math.pi / 180.0 latR = latV * math.pi / 180.0 longiR = longiV * math.pi / 180.0 n = 1.0 / (2.0 * F - 1.0) A0 = 1.0 + (n**2) / 4.0 + (n**4) / 64.0 A1 = (-3.0 / 2.0) * (n - (n**3) / 8.0 - (n**5) / 64.0) A2 = (15.0 / 16.0) * ((n**2) - (n**4) / 4.0) A3 = (-35.0/ 48.0) * ((n**3) - (5.0 / 16.0) * (n**5)) A4 = (315.0 / 512.0) * (n**4) A5 = (-693.0/1280.0) * (n**5) A_Array = [A0, A1, A2, A3 , A4, A5] a1 = (1.0 / 2.0) * n - (2.0 / 3.0) * (n**2) + (5.0 / 16.0) * (n**3) + (41.0 / 180.0) * (n**4) - (127.0 / 288.0) * (n**5) a2 = (13.0 / 48.0) * (n**2) - (3.0 / 5.0) * (n**3) + (557.0 / 1440.0) * (n**4) + (281.0 / 630.0) * (n**5) a3 = (61.0 / 240.0) * (n**3) - (103.0 / 140.0) * (n**4) + (15061.0 / 26880.0) * (n**5) a4 = (49561.0 / 161280.0) * (n**4) - (179.0 / 168.0) * (n**5) a5 = (34729.0 / 80640.0) * (n**5) a_Array = [0.0, a1, a2, a3, a4, a5] A_ = ((m0 * R) / (1.0 + n)) * A0 v = 0.0 for i in range(5): v += A_Array[i + 1] * math.sin(2.0 * (float)(i + 1) * lat0R) S_ = ((m0 * R) / (1.0 + n)) * (A0 * lat0R + v) lambdaC = math.cos(longiR - longi0R) lambdaS = math.sin(longiR - longi0R) t = math.sinh(math.atanh(math.sin(latR)) - ((2.0 * math.sqrt(n)) / (1.0 + n)) * math.atanh(((2.0 * math.sqrt(n)) / (1.0 + n)) * math.sin(latR))) t_ = math.sqrt(1.0 + t * t) xi2 = math.atan(t / lambdaC) eta2 = math.atanh(lambdaS / t_) v = 0.0 for i in range(5): v += a_Array[i + 1] * math.sin(2.0 * (float)(i + 1) * xi2) * math.cosh(2.0 * (float)(i + 1) * eta2) x = A_ * (xi2 + v) - S_ v = 0.0 for i in range(5): v += a_Array[i + 1] * math.cos(2.0 * (float)(i + 1) * xi2) * math.sinh(2.0 * (float)(i + 1) * eta2) y = A_ * (eta2 + v) # kmからmに変換して返す. return (x * 1000.0), (y * 1000.0) # ----------------------------------------------------------. # 緯度経度から平面直角座標に変換(単位 m). originIndex = 9 # Tokyo. x,y = calcLatLongToHeimenChokaku(in_lat, in_longi, originIndex) print("Latitude = " + str(in_lat)) print("Longitude = " + str(in_longi)) print(" X = " + str(x) + " (m)") print(" Y = " + str(y) + " (m)") # Omniverse(USD)のY-up/右手座標系/cmに変換. x2 = y * 100.0 z2 = -x * 100.0 print("[ Omniverse ] (Y-up/right hand/cm)") print(" x = " + str(x2) + " (cm)") print(" z = " + str(z2) + " (cm)")
5,518
Python
26.733668
148
0.436934
ft-lab/omniverse_sample_scripts/PLATEAU/calcDistance.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.usd # Get stage. stage = omni.usd.get_context().get_stage() # -------------------------------------------------. # Calculate bounding box in world coordinates. # -------------------------------------------------. def _calcWorldBoundingBox (prim : Usd.Prim): # Calc world boundingBox. bboxCache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), ["default"]) bboxD = bboxCache.ComputeWorldBound(prim).ComputeAlignedRange() bb_min = Gf.Vec3f(bboxD.GetMin()) bb_max = Gf.Vec3f(bboxD.GetMax()) return bb_min, bb_max # -------------------------------------------------. # Calculate the distance between two selected shapes. # -------------------------------------------------. # Get selection. selection = omni.usd.get_context().get_selection() paths = selection.get_selected_prim_paths() wPosList = [] for path in paths: # Get prim. prim = stage.GetPrimAtPath(path) if prim.IsValid(): bbMin, bbMax = _calcWorldBoundingBox(prim) wCenter = Gf.Vec3f((bbMax[0] + bbMin[0]) * 0.5, (bbMax[1] + bbMin[1]) * 0.5, (bbMax[2] + bbMin[2]) * 0.5) wPosList.append(wCenter) continue if len(wPosList) == 2: distV = (wPosList[1] - wPosList[0]).GetLength() print("Distance : " + str(distV) + " cm ( " + str(distV * 0.01) + " m)")
1,359
Python
33.871794
113
0.554084
ft-lab/omniverse_sample_scripts/Animation/readme.md
# Animation アニメーション関連の処理を行います。 |ファイル|説明| |---|---| |[GetTimeCode.py](./GetTimeCode.py)|現在のStageの開始/終了TimeCode、TimeCodesPerSecond(フレームレート)を取得。| |[GetCurrentTimeCode.py](./GetCurrentTimeCode.py)|現在のタイムコード(フレーム位置)を取得。|
244
Markdown
26.222219
96
0.67623
ft-lab/omniverse_sample_scripts/Animation/GetTimeCode.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf # Get stage. stage = omni.usd.get_context().get_stage() # Get TimeCode. print(f"Start TimeCode : {stage.GetStartTimeCode()}") print(f"End TimeCode : {stage.GetEndTimeCode()}") # Get frame rate. print(f"TimeCodesPerSecond : {stage.GetTimeCodesPerSecond()}")
325
Python
24.076921
63
0.723077
ft-lab/omniverse_sample_scripts/Animation/GetCurrentTimeCode.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.usd import omni.timeline # Get stage. stage = omni.usd.get_context().get_stage() # Get current timeCode. time_code = omni.timeline.get_timeline_interface().get_current_time() * stage.GetTimeCodesPerSecond() print(f"Current timeCode : {time_code}")
327
Python
26.333331
101
0.746177
ft-lab/Omniverse_OmniGraph_ClockSample/readme.md
# ft_lab.OmniGraph.GetDateTime This sample uses OmniGraph to reflect the current time on analog and digital clocks created as 3D models. ![preview.jpg](./images/preview.jpg) This is a sample project in which OmniGraph custom nodes are prepared with a Python Extension to control a pre-prepared 3D model. ## How to use I have confirmed that it works with ~~Omniverse Create 2022.3.3~~ USD Composer 2023.2.2(Kit 105.1.2). Download and use this repository locally. ``` [extension] [ft_lab.OmniGraph.GetDateTime] ... Extension(OmniGraph Nodes) used in this project [usds] sample scene [Clock] [textures] clock.usd [ClockDigital] [textures] clock_digital.usd clock_stage.usd ... Open and use this locally. ``` * Assign and activate Extension to Omniverse Create. Copy "[ft_lab.OmniGraph.GetDateTime](./extension/ft_lab.OmniGraph.GetDateTime/)" to a folder where Omniverse can find it as an Extension. ![GetDateTime_extension_01.jpg](./images/GetDateTime_extension_01.jpg) * Open "[clock_stage.usd](./usds/clock_stage.usd)" in Omniverse Create. References two USD "[clock.usd](./usds/Clock/clock.usd)" and "[clock_digital.usd](./usds/ClockDigital/clock_digital.usd)". You can now see the current time reflected in the analog and digital clocks. ![GetDateTime_01.jpg](./images/GetDateTime_01.jpg) ## Documents * [Description of OmniGraph nodes](./OmniGraphNodes.md) ## Documents for Development * [Extension Structure](./docs/ExtensionStructure.md) * [GetDateTime](./docs/node_GetDateTime.md) * [RotationByTime](./docs/node_RotationByTime.md) * [OutputToLCD](./docs/node_OutputToLCD.md) * [3D Models](./docs/Modeling3D.md) ## Change Log * [Change Log](./ChangeLog.md) ## License This software is released under the MIT License, see [LICENSE.txt](./LICENSE.txt).
1,864
Markdown
31.155172
143
0.714056
ft-lab/Omniverse_OmniGraph_ClockSample/ChangeLog.md
# Change Log ## December 22, 2023 Fixed in USD Composer 2023.2.2 (Kit.105.1.2) ### xxxxDatabase.py The icons were not reflected until these two internal versions were updated. * GENERATOR_VERSION : (1, 31, 1) -> (1, 41, 3) * TARGET_VERSION : (2, 107, 4) -> (2, 139, 12) ## July 11, 2023 Fixed in USD Composer 2023.1.0-beta (Kit.105) from Omniverse Create 2022.3.3 (Kit.104). ### [RotationByTime.ogn](extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/RotationByTime.ogn) "type": "float3" -> "float[3]" ### xxxxDatabase.py * GENERATOR_VERSION : (1, 17, 2) -> (1, 31, 1) * TARGET_VERSION : (2, 65, 4) -> (2, 107, 4)
673
Markdown
23.962962
118
0.63893
ft-lab/Omniverse_OmniGraph_ClockSample/OmniGraphNodes.md
# Description of OmniGraph nodes This extension consists of three custom nodes. ![OmniGraphNodes.png](./images/OmniGraphNodes.png) Three nodes are added to "Examples" as Graph. These are nodes that act as Push Graphs. ![GetDateTime_nodes.png](./images/GetDateTime_nodes.png) ## Get DateTime Get the current local date and time. ![Node_GetDateTime.png](./images/Node_GetDateTime.png) ### Outputs * Year (int) * Month (int) * Day (int) * Hour (int) * Minute (int) * Second (int) ## Rotation By Time Given an hour, minute, and second, returns the XYZ of each rotation(degree). Used in analog clock rotation. ![Node_RotationByTime.png](./images/Node_RotationByTime.png) ### Inputs * Default RotateXYZ : Default rotation value (float3) * Rotation Axis : Rotation axis (0:X, 1:Y, 2:Z) * Hour (int) * Minute (int) * Second (int) ### Outputs * Hour RotateXYZ : Hour rotation value (float3) * Minute RotateXYZ : Minute rotation value (float3) * Second RotateXYZ : Second rotation value (float3) Connect the Output value of the Get DateTime node to the Hour/Minute/Second of Inputs. The analog clock "[clock.usd](./usds/Clock/clock.usd)" referenced in this stage has a default rotation of Rotate(90, 0, 0). It also rotates the hands of the clock around the Y axis. This is the same for Hour/Minute/Second hands. ![GetDateTime_02.jpg](./images/GetDateTime_02.jpg) In Inputs, set "Default RotationXYZ" to (90, 0, 0) and "Rotation Axis" to 1 (Y). This input returns the calculated rotation values for "Hour RotateXYZ", "Minute RotateXYZ", and "Second RotateXYZ". Clock hand prims are added to Graph as "Write Prim Attribute". ![GetDateTime_03.png](./images/GetDateTime_03.png) In this case, select "xformOp:rotateXYZ" for the "Attribute Name". ![GetDateTime_04.png](./images/GetDateTime_04.png) Connect "Hour RotateXYZ", "Minute RotateXYZ", and "Second RotateXYZ" of "Rotation By Time" to the Value of this node. This is all that is required to move the hands of an analog clock. ## Time Output To LCD This node controls a virtual 7-segment LED LCD screen. Show/Hide the Prim specified in Input to display the digital clock. ![Node_TimeOutputToLCD.png](./images/Node_TimeOutputToLCD.png) ### Inputs * HourNum10 Prim : Specify the 10th digit Prim of hour (token) * HourNum11 Prim : Specify the 1th digit Prim of hour (token) * MinuteNum10 Prim : Specify the 10th digit Prim of minute (token) * MinuteNum11 Prim : Specify the 1th digit Prim of minute (token) * AM Prim : Specify the prim to display "AM" (token) * PM Prim : Specify the prim to display "PM" (token) * Hour (int) * Minute (int) * Second (int) The digital clock is controlled by showing/hiding the respective parts of the virtual LCD screen. ![GetDateTime_Digital_01.jpg](./images/GetDateTime_Digital_01.jpg) ”AM" and "PM" are one prim (mesh) each. Hours and minutes are on a two-digit, seven-segment LED. It consists of A, B, C, D, E, F, and G Prim(Mesh) respectively. ![GetDateTime_Digital_02.jpg](./images/GetDateTime_Digital_02.jpg) By showing/hiding this 7-segment LED component, a numerical value from 0-9 is represented. The Hour, Minute, and Second inputs to the "Time Output To LCD" node are connected from the output of "Get DateTime". Each input to the "Time Output To LCD" node uses the "Source Prim Path" of the Read Bundle. ![GetDateTime_Digital_03.png](./images/GetDateTime_Digital_03.png) AM, PM and 4 LED's Prim connected. This allows the digital clock to reflect the current time.
3,664
Markdown
36.783505
123
0.704694
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "0.0.1" # Lists people or organizations that are considered the "authors" of the package. authors = ["ft-lab"] # The title and description fields are primarily for displaying extension info in UI title = "OmniGraph : Get DateTime" description="OmniGraph sample node.Get datetime." # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "" # One of categories for UI. category = "Example" # Keywords for the extension keywords = ["kit", "example", "omnigraph"] # Location of change log file in target (final) folder of extension, relative to the root. Can also be just a content # of it instead of file path. More info on writing changelog: https://keepachangelog.com/en/1.0.0/ changelog="docs/CHANGELOG.md" # Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file). # Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image. preview_image = "data/preview.jpg" # Icon is shown in Extensions window, it is recommended to be square, of size 256x256. icon = "data/icon.png" # Watch the .ogn files for hot reloading (only works for Python files) [fswatcher.patterns] include = ["*.ogn", "*.py"] exclude = ["*Database.py","*/ogn*"] # We only depend on testing framework currently: [dependencies] "omni.graph" = {} "omni.graph.nodes" = {} "omni.graph.tools" = {} # Main python module this extension provides. [[python.module]] name = "ft_lab.OmniGraph.GetDateTime"
1,647
TOML
31.959999
118
0.734062
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/extension.py
import omni.ext import importlib import os from .ogn import * # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class SimpleNodeExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[ft_lab.OmniGraph.GetDateTime] startup") def on_shutdown(self): print("[ft_lab.OmniGraph.GetDateTime] shutdown")
712
Python
40.941174
119
0.738764
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/GetDateTimeDatabase.py
import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy import sys import traceback import carb class GetDateTimeDatabase(og.Database): """Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.GetDateTime Class Members: node: Node being evaluated Attribute Value Properties: Inputs: Outputs: outputs.a1_year outputs.a2_month outputs.a3_day outputs.b1_hour outputs.b2_minute outputs.b3_second """ # Omniverse Create 2022.3.3 (Kit.104) #GENERATOR_VERSION = (1, 17, 2) #TARGET_VERSION = (2, 65, 4) # Imprint the generator and target ABI versions in the file for JIT generation # USD Composer 2023.2.2 (Kit.105.1.2) GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('outputs:a1_year', 'int', 0, 'Year', 'output year', {ogn.MetadataKeys.DEFAULT: '2000'}, True, 0, False, ''), ('outputs:a2_month', 'int', 0, 'Month', 'output month', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:a3_day', 'int', 0, 'Day', 'output day', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:b1_hour', 'int', 0, 'Hour', 'output hour', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:b2_minute', 'int', 0, 'Minute', 'output minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('outputs:b3_second', 'int', 0, 'Second', 'output second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ]) # ----------------------------------------------------. # Processing Output Parameter. # ----------------------------------------------------. class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def a1_year(self): value = self._batchedWriteValues.get(self._attributes.a1_year) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_year) return data_view.get() @a1_year.setter def a1_year(self, value): self._batchedWriteValues[self._attributes.a1_year] = value @property def a2_month(self): value = self._batchedWriteValues.get(self._attributes.a2_month) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a2_month) return data_view.get() @a2_month.setter def a2_month(self, value): self._batchedWriteValues[self._attributes.a2_month] = value @property def a3_day(self): value = self._batchedWriteValues.get(self._attributes.a3_day) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a3_day) return data_view.get() @a3_day.setter def a3_day(self, value): self._batchedWriteValues[self._attributes.a3_day] = value @property def b1_hour(self): value = self._batchedWriteValues.get(self._attributes.b1_hour) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b1_hour) return data_view.get() @b1_hour.setter def b1_hour(self, value): self._batchedWriteValues[self._attributes.b1_hour] = value @property def b2_minute(self): value = self._batchedWriteValues.get(self._attributes.b2_minute) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b2_minute) return data_view.get() @b2_minute.setter def b2_minute(self, value): self._batchedWriteValues[self._attributes.b2_minute] = value @property def b3_second(self): value = self._batchedWriteValues.get(self._attributes.b3_second) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b3_second) return data_view.get() @b3_second.setter def b3_second(self, value): self._batchedWriteValues[self._attributes.b3_second] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = GetDateTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = GetDateTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) # ----------------------------------------------------. # Class defining the ABI interface for the node type. # ----------------------------------------------------. class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.GetDateTime' @staticmethod def compute(context, node): def database_valid(): return True try: per_node_data = GetDateTimeDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = GetDateTimeDatabase(node) per_node_data['_db'] = db if not database_valid(): per_node_data['_db'] = None return False except: db = GetDateTimeDatabase(node) try: compute_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) with og.in_compute(): return GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.outputs._commit() return False @staticmethod def initialize(context, node): GetDateTimeDatabase._initialize_per_node_data(node) initialize_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) GetDateTimeDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False @staticmethod def initialize_type(node_type): initialize_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Get DateTime") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Get current date and time") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type) @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) NODE_TYPE_CLASS = None @staticmethod def register(node_type_class): GetDateTimeDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(GetDateTimeDatabase.abi, 1) @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.GetDateTime")
11,389
Python
42.473282
130
0.588375
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/OutputToLCDDatabase.py
import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy import sys import traceback import carb from typing import Any class OutputToLCDDatabase(og.Database): """Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.OutputToDatabaseDatabase Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.a1_hourNum10Prim inputs.a2_hourNum1Prim inputs.b1_minuteNum10Prim inputs.b2_minuteNum1Prim inputs.c1_amPrim inputs.c2_pmPrim inputs.d1_hour inputs.d2_minute inputs.d3_second Outputs: """ # Omniverse Create 2022.3.3 (Kit.104) #GENERATOR_VERSION = (1, 17, 2) #TARGET_VERSION = (2, 65, 4) # Imprint the generator and target ABI versions in the file for JIT generation # USD Composer 2023.2.2 (Kit.105.1.2) GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('inputs:a1_hourNum10Prim', 'token', 0, 'HourNum10 Prim', 'HourNum10 Prim', {}, True, None, False, ''), ('inputs:a2_hourNum1Prim', 'token', 0, 'HourNum1 Prim', 'HourNum1 Prim', {}, True, None, False, ''), ('inputs:b1_minuteNum10Prim', 'token', 0, 'MinuteNum10 Prim', 'MinuteNum10 Prim', {}, True, None, False, ''), ('inputs:b2_minuteNum1Prim', 'token', 0, 'MinuteNum1 Prim', 'MinuteNum1 Prim', {}, True, None, False, ''), ('inputs:c1_amPrim', 'token', 0, 'AM Prim', 'AM Prim', {}, True, None, False, ''), ('inputs:c2_pmPrim', 'token', 0, 'PM Prim', 'PM Prim', {}, True, None, False, ''), ('inputs:d1_hour', 'int', 0, 'Hour', 'Hour', {}, True, 0, False, ''), ('inputs:d2_minute', 'int', 0, 'Minute', 'Minute', {}, True, 0, False, ''), ('inputs:d3_second', 'int', 0, 'Second', 'Second', {}, True, 0, False, ''), ]) # ----------------------------------------------------. # Processing Input Parameters. # ----------------------------------------------------. class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second] self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0] @property def a1_hourNum10Prim(self): return self._batchedReadValues[0] @a1_hourNum10Prim.setter def a1_hourNum10Prim(self, value): self._batchedReadValues[0] = value @property def a2_hourNum1Prim(self): return self._batchedReadValues[1] @a2_hourNum1Prim.setter def a2_hourNum1Prim(self, value): self._batchedReadValues[1] = value @property def b1_minuteNum10Prim(self): return self._batchedReadValues[2] @b1_minuteNum10Prim.setter def b1_minuteNum10Prim(self, value): self._batchedReadValues[2] = value @property def b2_minuteNum1Prim(self): return self._batchedReadValues[3] @b2_minuteNum1Prim.setter def b2_minuteNum1Prim(self, value): self._batchedReadValues[3] = value @property def c1_amPrim(self): return self._batchedReadValues[4] @c1_amPrim.setter def c1_amPrim(self, value): self._batchedReadValues[4] = value @property def c2_pmPrim(self): return self._batchedReadValues[5] @c2_pmPrim.setter def c2_pmPrim(self, value): self._batchedReadValues[5] = value @property def d1_hour(self): return self._batchedReadValues[6] @d1_hour.setter def d1_hour(self, value): self._batchedReadValues[6] = value @property def d2_minute(self): return self._batchedReadValues[7] @d2_minute.setter def d2_minute(self, value): self._batchedReadValues[7] = value @property def d3_second(self): return self._batchedReadValues[8] @d3_second.setter def d3_second(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OutputToLCDDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OutputToLCDDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) # ----------------------------------------------------. # Class defining the ABI interface for the node type. # ----------------------------------------------------. class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.OutputToLCD' @staticmethod def compute(context, node): def database_valid(): return True try: per_node_data = OutputToLCDDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = OutputToLCDDatabase(node) per_node_data['_db'] = db if not database_valid(): per_node_data['_db'] = None return False except: db = OutputToLCDDatabase(node) try: compute_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False #db.outputs._commit() return False @staticmethod def initialize(context, node): OutputToLCDDatabase._initialize_per_node_data(node) initialize_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) OutputToLCDDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False @staticmethod def initialize_type(node_type): initialize_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Time output to LCD") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Time output to LCD") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type) @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) NODE_TYPE_CLASS = None @staticmethod def register(node_type_class): OutputToLCDDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(OutputToLCDDatabase.abi, 1) @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.OutputToLCD")
11,682
Python
42.431227
322
0.598185
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/ogn/RotationByTimeDatabase.py
import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy import sys import traceback import carb class RotationByTimeDatabase(og.Database): """Helper class providing simplified access to data on nodes of type ft_lab.OmniGraph.GetDateTime.RotationByTime Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.a1_defaultRotateXYZ inputs.a2_rotationAxis inputs.b1_hour inputs.b2_minute inputs.b3_second Outputs: outputs.a1_hourRotateXYZ outputs.a2_minuteRotateXYZ outputs.a3_secondRotateXYZ """ # Omniverse Create 2022.3.3 (Kit.104) #GENERATOR_VERSION = (1, 17, 2) #TARGET_VERSION = (2, 65, 4) # Imprint the generator and target ABI versions in the file for JIT generation # USD Composer 2023.2.2 (Kit.105.1.2) GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('inputs:a1_defaultRotateXYZ', 'float[3]', 0, 'Default RotateXYZ', 'Default rotateXYZ', {}, True, None, False, ''), ('inputs:a2_rotationAxis', 'int', 0, 'Rotation Axis', 'Rotation axis (0:X, 1:Y, 2:Z)', {}, True, None, False, ''), ('inputs:b1_hour', 'int', 0, 'Hour', 'Hour', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:b2_minute', 'int', 0, 'Minute', 'Minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:b3_second', 'int', 0, 'Second', 'Second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('outputs:a1_hourRotateXYZ', 'float[3]', 0, 'Hour RotateXYZ', 'Hour RotateXYZ', {}, True, None, False, ''), ('outputs:a2_minuteRotateXYZ', 'float[3]', 0, 'Minute RotateXYZ', 'Minute RotateXYZ', {}, True, None, False, ''), ('outputs:a3_secondRotateXYZ', 'float[3]', 0, 'Second RotateXYZ', 'Second RotateXYZ', {}, True, None, False, ''), ]) # ----------------------------------------------------. # Processing Input Parameters. # ----------------------------------------------------. class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second] self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0] @property def a1_defaultRotateXYZ(self): return self._batchedReadValues[0] @a1_defaultRotateXYZ.setter def a1_defaultRotateXYZ(self, value): self._batchedReadValues[0] = value @property def a2_rotationAxis(self): return self._batchedReadValues[1] @a2_rotationAxis.setter def a2_rotationAxis(self, value): self._batchedReadValues[1] = value @property def b1_hour(self): return self._batchedReadValues[2] @b1_hour.setter def b1_hour(self, value): self._batchedReadValues[2] = value @property def b2_minute(self): return self._batchedReadValues[3] @b2_minute.setter def b2_minute(self, value): self._batchedReadValues[3] = value @property def b3_second(self): return self._batchedReadValues[4] @b3_second.setter def b3_second(self, value): self._batchedReadValues[4] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues # ----------------------------------------------------. # Processing Output Parameter. # ----------------------------------------------------. class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def a1_hourRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ) return data_view.get() @a1_hourRotateXYZ.setter def a1_hourRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value @property def a2_minuteRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a2_minuteRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a2_minuteRotateXYZ) return data_view.get() @a2_minuteRotateXYZ.setter def a2_minuteRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a2_minuteRotateXYZ] = value @property def a3_secondRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a3_secondRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a3_secondRotateXYZ) return data_view.get() @a3_secondRotateXYZ.setter def a3_secondRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a3_secondRotateXYZ] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = RotationByTimeDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = RotationByTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = RotationByTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) # ----------------------------------------------------. # Class defining the ABI interface for the node type. # ----------------------------------------------------. class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.RotationByTime' @staticmethod def compute(context, node): def database_valid(): return True try: per_node_data = RotationByTimeDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = RotationByTimeDatabase(node) per_node_data['_db'] = db if not database_valid(): per_node_data['_db'] = None return False except: db = RotationByTimeDatabase(node) try: compute_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False db.outputs._commit() return False @staticmethod def initialize(context, node): RotationByTimeDatabase._initialize_per_node_data(node) initialize_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) RotationByTimeDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False @staticmethod def initialize_type(node_type): initialize_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Rotation By Time") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Rotation By Time") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type) @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) NODE_TYPE_CLASS = None @staticmethod def register(node_type_class): RotationByTimeDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(RotationByTimeDatabase.abi, 1) @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.RotationByTime")
13,782
Python
44.488449
196
0.600929
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/GetDateTime.py
""" Get date time. """ import numpy as np import omni.ext import datetime class GetDateTime: @staticmethod def compute(db) -> bool: try: # Get current date and time. now = datetime.datetime.now() db.outputs.a1_year = now.year db.outputs.a2_month = now.month db.outputs.a3_day = now.day db.outputs.b1_hour = now.hour db.outputs.b2_minute = now.minute db.outputs.b3_second = now.second except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True
650
Python
22.249999
56
0.550769
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/OutputToLCD.py
""" Time output to LCD (hh:mm). """ from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import numpy as np import omni.ext class OutputToLCD: @staticmethod def compute(db) -> bool: try: hour = db.inputs.d1_hour minute = db.inputs.d2_minute second = db.inputs.d3_second # xABCDEFG => 0b01111110 = 0x7e = '0' nameList = ["A", "B", "C", "D", "E", "F", "G"] numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b] # Get stage. stage = omni.usd.get_context().get_stage() # Show/hide "AM" if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "": prim = stage.GetPrimAtPath(db.inputs.c1_amPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible') # Show/hide "PM" if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "": prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible') # Hour : 10th digit. hour12 = hour if (hour < 12) else (hour - 12) if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "": basePrimPath = db.inputs.a1_hourNum10Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12 / 10) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Hour : 1th digit. if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "": basePrimPath = db.inputs.a2_hourNum1Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Minute : 10th digit. if db.inputs.b1_minuteNum10Prim != None and db.inputs.b1_minuteNum10Prim != "": basePrimPath = db.inputs.b1_minuteNum10Prim shiftV = 0x40 maskV = numMaskList[(int)(minute / 10) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Minute : 1th digit. if db.inputs.b2_minuteNum1Prim != None and db.inputs.b2_minuteNum1Prim != "": basePrimPath = db.inputs.b2_minuteNum1Prim shiftV = 0x40 maskV = numMaskList[(int)(minute) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True
4,275
Python
42.632653
118
0.509474
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/ft_lab/OmniGraph/GetDateTime/nodes/RotationByTime.py
""" Rotation by time. """ import numpy as np import omni.ext class RotationByTime: @staticmethod def compute(db) -> bool: try: # Calculate clock rotation from seconds. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a3_secondRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b3_second) / 60.0) * 360.0 # Calculate clock rotation from minutes. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a2_minuteRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b2_minute * 60.0 + db.inputs.b3_second) / (60.0 * 60.0)) * 360.0 # Calculate clock rotation from hours. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a1_hourRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b1_hour * 60.0 + db.inputs.b2_minute) / (60.0 * 24.0)) * 360.0 * 2.0 except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True
1,705
Python
39.619047
134
0.559531
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/docs/CHANGELOG.md
# CHANGELOG This document records all notable changes to ``ft_lab.OmniGraph.GetDateTime`` extension.
104
Markdown
16.499997
88
0.778846
ft-lab/Omniverse_OmniGraph_ClockSample/extension/ft_lab.OmniGraph.GetDateTime/docs/README.md
# GetDateTime [ft_lab.OmniGraph.GetDateTime] This sample uses OmniGraph to reflect the current time on analog and digital clocks created as 3D models. This extension consists of three custom nodes. ## Get DateTime Get the current local date and time. ### Output * Year (int) * Month (int) * Day (int) * Hour (int) * Minute (int) * Second (int) ## Rotation By Time Given an hour, minute, and second, returns the XYZ of each rotation(degree). Used in analog clock rotation. ### Input * Default RotationXYZ : Default rotation value (float3) * Rotation Axis : Rotation axis (0:X, 1:Y, 2:Z) * Hour (int) * Minute (int) * Second (int) ### Output * Hour RotateXYZ : Hour rotation value (float3) * Minute RotateXYZ : Minute rotation value (float3) * Second RotateXYZ : Second rotation value (float3) ## Time Output To LCD This node controls a virtual 7-segment LED LCD screen. Show/Hide the Prim specified in Input to display the digital clock. ### Input * HourNum10 Prim : Specify the 10th digit Prim of hour (token) * HourNum11 Prim : Specify the 1th digit Prim of hour (token) * MinuteNum10 Prim : Specify the 10th digit Prim of minute (token) * MinuteNum11 Prim : Specify the 1th digit Prim of minute (token) * AM Prim : Specify the prim to display "AM" (token) * PM Prim : Specify the prim to display "PM" (token) * Hour (int) * Minute (int) * Second (int)
1,405
Markdown
24.563636
110
0.703203
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_GetDateTime.md
# GetDateTime Get the current local date and time. ![GetDateTime_icon.png](./images/GetDateTime_icon.png) ## GetDateTime.json ```json { "GetDateTime": { "version": 1, "categories": "examples", "description": "Get datetime node.", "language": "Python", "metadata": { "uiName": "Get DateTime" }, "inputs": { }, "outputs": { "a1_year": { "type": "int", "description": "year", "default": 2000, "metadata": { "uiName": "Year" } }, "a2_month": { "type": "int", "description": "month", "default": 1, "metadata": { "uiName": "Month" } }, "a3_day": { "type": "int", "description": "day", "default": 1, "metadata": { "uiName": "Day" } }, "b1_hour": { "type": "int", "description": "hour", "default": 1, "metadata": { "uiName": "Hour" } }, "b2_minute": { "type": "int", "description": "minute", "default": 1, "metadata": { "uiName": "Minute" } }, "b3_second": { "type": "int", "description": "second", "default": 1, "metadata": { "uiName": "Second" } } } } } ``` ![GetDateTime_node.png](./images/GetDateTime_node.png) No inputs is provided, as it only outputs the current time. Outputs date and time in int type. ### Outputs |Attribute name|Type|UI name|Description| |---|---|---|---| |a1_year|int|Year|year| |a2_month|int|Month|month| |a3_day|int|Day|day| |b1_hour|int|Hour|hour| |b2_minute|int|Minute|minute| |b3_second|int|Second|second| The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph. This is done to prevent the node inputs/outputs from being sorted in ascending order as ASCII code strings when displaying the inputs/outputs of the node in the UI. The order is ascending by attribute name, and the display name is the UI name. ## GetDateTime.py ”GetDateTime.py" specifies what the node actually does. ```python import numpy as np import omni.ext import datetime class GetDateTime: @staticmethod def compute(db) -> bool: try: # Get current date and time. now = datetime.datetime.now() db.outputs.a1_year = now.year db.outputs.a2_month = now.month db.outputs.a3_day = now.day db.outputs.b1_hour = now.hour db.outputs.b2_minute = now.minute db.outputs.b3_second = now.second except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True ``` Get the date and time and store them in the outputs. Data is set to "db.outputs.[Attribute name]". ## GetDateTimeDatabase.py The registration process as an Extension of the OmniGraph node is performed. Since this code is almost canned, it is expected that once it is created, it will be reused. In the case of "GetDateTimeDatabase.py", enter the class "GetDateTimeDatabase(og.Database)". ```python import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy import sys import traceback import carb class GetDateTimeDatabase(og.Database): PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('outputs:a1_year', 'int', 0, 'Year', 'output year', {ogn.MetadataKeys.DEFAULT: '2000'}, True, 0, False, ''), ('outputs:a2_month', 'int', 0, 'Month', 'output month', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:a3_day', 'int', 0, 'Day', 'output day', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:b1_hour', 'int', 0, 'Hour', 'output hour', {ogn.MetadataKeys.DEFAULT: '1'}, True, 0, False, ''), ('outputs:b2_minute', 'int', 0, 'Minute', 'output minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('outputs:b3_second', 'int', 0, 'Second', 'output second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ]) ``` "INTERFACE" enumerates attribute data. The input and output data, in turn, will include the following. * Attribute name * Type (To allow more than one, separate them with a comma) * Index of type ? Specify 0 for a single Type or 1 for multiple Types. * Display name in UI * Description * Meta data * Necessary or not (True, False) * Default value * Deprecated (True, False) * Message when deprecated Attribute name and type must match those specified in the ogn file. In the case of the OmniGraph node provided by Extension, it seemed to refer to this description rather than the ogn file. ### ValuesForOutputs The outputs designation is described in the "ValuesForOutputs" class. ```python class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def a1_year(self): value = self._batchedWriteValues.get(self._attributes.a1_year) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_year) return data_view.get() @a1_year.setter def a1_year(self, value): self._batchedWriteValues[self._attributes.a1_year] = value @property def a2_month(self): value = self._batchedWriteValues.get(self._attributes.a2_month) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a2_month) return data_view.get() @a2_month.setter def a2_month(self, value): self._batchedWriteValues[self._attributes.a2_month] = value @property def a3_day(self): value = self._batchedWriteValues.get(self._attributes.a3_day) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a3_day) return data_view.get() @a3_day.setter def a3_day(self, value): self._batchedWriteValues[self._attributes.a3_day] = value @property def b1_hour(self): value = self._batchedWriteValues.get(self._attributes.b1_hour) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b1_hour) return data_view.get() @b1_hour.setter def b1_hour(self, value): self._batchedWriteValues[self._attributes.b1_hour] = value @property def b2_minute(self): value = self._batchedWriteValues.get(self._attributes.b2_minute) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b2_minute) return data_view.get() @b2_minute.setter def b2_minute(self, value): self._batchedWriteValues[self._attributes.b2_minute] = value @property def b3_second(self): value = self._batchedWriteValues.get(self._attributes.b3_second) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.b3_second) return data_view.get() @b3_second.setter def b3_second(self, value): self._batchedWriteValues[self._attributes.b3_second] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } ``` Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES". ```python LOCAL_PROPERTY_NAMES = { "a1_year", "a2_month", "a3_day", "b1_hour", "b2_month", "b3_second" } ``` Specify getter/setter for each attribute. If the attribute type is fixed, simply change the attribute name. ```python @property def a1_year(self): value = self._batchedWriteValues.get(self._attributes.a1_year) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_year) return data_view.get() @a1_year.setter def a1_year(self, value): self._batchedWriteValues[self._attributes.a1_year] = value ``` "\_\_getattr\_\_", "\_\_setattr\_\_", and "\_commit" can be copied and pasted as is. ### ValuesForState(og.DynamicAttributeAccess) The ValuesForState class "GetDateTimeDatabase" can be used by simply specifying the target class name and copying and pasting. ```python class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) ``` ### \_\_init\_\_ In "\_\_init\_\_", outputs and state classes are created. ```python def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = GetDateTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = GetDateTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) ``` There are no inputs in this GetDateTimeDatabase class, so that is not mentioned. ### class abi Define the connections for the OmniGraph node. Think of ABI as a regular flow. Basically, the designation to the ABI interface is a canned statement. ```python class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.GetDateTime' ``` Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "GetDateTime" is in it, "ft_lab.OmniGraph.GetDateTime.GetDateTime" is specified as the return value. The compute method is called when this node is executed. This also specifies an almost canned statement. ```python @staticmethod def compute(context, node): try: per_node_data = GetDateTimeDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = GetDateTimeDatabase(node) per_node_data['_db'] = db except: db = GetDateTimeDatabase(node) try: compute_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) with og.in_compute(): return GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.outputs._commit() return False ``` The compute method of GetDateTime.py is called from "GetDateTimeDatabase.NODE_TYPE_CLASS.compute(db)". initialize, release, and update_node_version are listed as they are, just matching the class names. This is also a canned statement. ```python @staticmethod def initialize(context, node): GetDateTimeDatabase._initialize_per_node_data(node) initialize_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) GetDateTimeDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False ``` The initialize_type method specifies information about the OmniGraph node. ```python @staticmethod def initialize_type(node_type): initialize_type_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Get DateTime") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Get current date and time") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/icon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type) ``` The information is set as metadata by using "node_type.set_metadata". |Key name|Description|Value| |---|---|---| |ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime| |ogn.MetadataKeys.UI_NAME|UI name of node|Get DateTime| |ogn.MetadataKeys.CATEGORIES|Categories name|examples| |ogn.MetadataKeys.DESCRIPTION|Node description|Get current date and time| |ogn.MetadataKeys.LANGUAGE|language used|Python| |ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg| See below for available category names. https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html The icon path is obtained from the Extension path as follows, and then "/data/icons/icon.svg" is connected. ```python icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.icon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) ``` Finally, register the "node_type" to which the metadata is assigned. ```python GetDateTimeDatabase.INTERFACE.add_to_node_type(node_type) ``` The on_connection_type_resolve method is a canned statement. ```python @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(GetDateTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) ``` ### Specify version After describing the abi class, add the following line as is. USD Composer 2023.2.2 (Kit.105.1.2). ```python NODE_TYPE_CLASS = None GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) ``` This seemed to need to be updated when the Kit version was upgraded. Otherwise, problems occurred, such as icons not being displayed. ### register method The register method is a canned statement. ```python @staticmethod def register(node_type_class): GetDateTimeDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(GetDateTimeDatabase.abi, 1) ``` ### deregister method The deregister method specifies "[Extension name].[class name of this node]". ```python @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.GetDateTime") ```
18,152
Markdown
34.734252
177
0.613431
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_RotationByTime.md
# RotationByTime Given an hour, minute, and second, returns the XYZ of each rotation(degree). ![rotationByTime_icon.png](./images/rotationByTime_icon.png) ## RotationByTime.json ```json { "RotationByTime": { "version": 1, "categories": "examples", "description": "Rotation mechanism by time.", "language": "Python", "metadata": { "uiName": "Rotation By Time" }, "inputs": { "a1_defaultRotateXYZ": { "type": "float[3]", "description": "Default rotateXYZ", "default": [0.0, 0.0, 0.0], "metadata": { "uiName": "Default rotateXYZ" } }, "a2_rotationAxis": { "type": "int", "description": "Rotation axis (0:X, 0:Y, 0:Z)", "default": 0, "metadata": { "uiName": "Rotation axis" } }, "b1_hour": { "type": "int", "description": "Hour", "default": 0, "metadata": { "uiName": "Hour" } }, "b2_minute": { "type": "int", "description": "Minute", "default": 0, "metadata": { "uiName": "Minute" } }, "b3_second": { "type": "int", "description": "Second", "default": 0, "metadata": { "uiName": "Second" } } }, "outputs": { "a1_hourRotateXYZ": { "type": "float[3]", "description": "Hour rotateXYZ", "default": [0.0, 0.0, 0.0], "metadata": { "uiName": "Hour RotateXYZ" } }, "a2_minuteRotateXYZ": { "type": "float[3]", "description": "Minute rotateXYZ", "default": [0.0, 0.0, 0.0], "metadata": { "uiName": "Minute RotateXYZ" } }, "a3_secondRotateXYZ": { "type": "float[3]", "description": "Second rotateXYZ", "default": [0.0, 0.0, 0.0], "metadata": { "uiName": "Second RotateXYZ" } } } } } ``` ![RotationByTime_node.png](./images/RotationByTime_node.png) ### Inputs |Attribute name|Type|UI name|Description| |---|---|---|---| |a1_defaultRotateXYZ|float3|Default rotateXYZ|Default rotateXYZ| |a2_rotationAxis|int|Rotation axis|Rotation axis (0:X, 1:Y, 2:Z)| |b1_hour|int|Hour|Hour| |b2_minute|int|Minute|Minute| |b3_second|int|Second|Second| The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph. "a1_defaultRotateXYZ" is the initial rotation value of the clock hands provided in the 3D model. ![RotationByTime_img_01.jpg](./images/RotationByTime_img_01.jpg) "a2_rotationAxis" is the axis of rotation (0:X, 1:Y, 2:Z). In the case of the image above, it rotates around the Y axis. In this case, specify 1. b1_hour, b2_minute, and b3_second are entered as hours, minutes, and seconds. ### Outputs |Attribute name|Type|UI name|Description| |---|---|---|---| |a1_hourRotateXYZ|float3|Hour rotateXYZ|Hour rotateXYZ| |a2_minuteRotateXYZ|float3|Minute rotateXYZ|Minute rotateXYZ| |a3_secondRotateXYZ|float3|Second rotateXYZ|Second rotateXYZ| Returns the rotational value of an analog clock corresponding to the input hour, minute, and second. The XYZ of the rotation returned here is assigned to the rotation of the clock hands in the 3D model. ## RotationByTime.py The rotation of the hands of a clock is calculated. ```python import numpy as np import omni.ext class RotationByTime: @staticmethod def compute(db) -> bool: try: # Calculate clock rotation from seconds. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a3_secondRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b3_second) / 60.0) * 360.0 # Calculate clock rotation from minutes. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a2_minuteRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b2_minute * 60.0 + db.inputs.b3_second) / (60.0 * 60.0)) * 360.0 # Calculate clock rotation from hours. if db.inputs.a2_rotationAxis >= 0 and db.inputs.a2_rotationAxis <= 2: v = db.outputs.a1_hourRotateXYZ v[0] = db.inputs.a1_defaultRotateXYZ[0] v[1] = db.inputs.a1_defaultRotateXYZ[1] v[2] = db.inputs.a1_defaultRotateXYZ[2] v[db.inputs.a2_rotationAxis] = ((float)(db.inputs.b1_hour * 60.0 + db.inputs.b2_minute) / (60.0 * 24.0)) * 360.0 * 2.0 except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True ``` ## RotationByTimeDatabase.py For the most part, the process is the same as for "[GetDateTimeDatabase.py](./node_GetDateTime.md)". "INTERFACE" enumerates attribute data. ```python PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('inputs:a1_defaultRotateXYZ', 'float[3]', 0, 'Default RotateXYZ', 'Default rotateXYZ', {}, True, None, False, ''), ('inputs:a2_rotationAxis', 'int', 0, 'Rotation Axis', 'Rotation axis (0:X, 1:Y, 2:Z)', {}, True, None, False, ''), ('inputs:b1_hour', 'int', 0, 'Hour', 'Hour', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:b2_minute', 'int', 0, 'Minute', 'Minute', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:b3_second', 'int', 0, 'Second', 'Second', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('outputs:a1_hourRotateXYZ', 'float[3]', 0, 'Hour RotateXYZ', 'Hour RotateXYZ', {}, True, None, False, ''), ('outputs:a2_minuteRotateXYZ', 'float[3]', 0, 'Minute RotateXYZ', 'Minute RotateXYZ', {}, True, None, False, ''), ('outputs:a3_secondRotateXYZ', 'float[3]', 0, 'Second RotateXYZ', 'Second RotateXYZ', {}, True, None, False, ''), ]) ``` ”RotationByTimeDatabase.py" specifies both inputs and outputs. Note that the attribute type specified as "float3" in the ogn file becomes "float[3]". ### ValuesForInputs The inputs designation is described in the "ValuesForInputs" class. ```python class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second] self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0] @property def a1_defaultRotateXYZ(self): return self._batchedReadValues[0] @a1_defaultRotateXYZ.setter def a1_defaultRotateXYZ(self, value): self._batchedReadValues[0] = value @property def a2_rotationAxis(self): return self._batchedReadValues[1] @a2_rotationAxis.setter def a2_rotationAxis(self, value): self._batchedReadValues[1] = value @property def b1_hour(self): return self._batchedReadValues[2] @b1_hour.setter def b1_hour(self, value): self._batchedReadValues[2] = value @property def b2_minute(self): return self._batchedReadValues[3] @b2_minute.setter def b2_minute(self, value): self._batchedReadValues[3] = value @property def b3_second(self): return self._batchedReadValues[4] @b3_second.setter def b3_second(self, value): self._batchedReadValues[4] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues ``` Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES". ```python LOCAL_PROPERTY_NAMES = {"a1_defaultRotateXYZ", "a2_rotationAxis", "b1_hour", "b2_minute", "b3_second"} ``` In "\_\_init\_\_", specify "self._attributes.[Attribute name]" as an array. ```python self._batchedReadAttributes = [self._attributes.a1_defaultRotateXYZ, self._attributes.a2_rotationAxis, self._attributes.b1_hour, self._attributes.b2_minute, self._attributes.b3_second] ``` Also, put initial values in self._batchedReadValues. ```python self._batchedReadValues = [[0.0, 0.0, 0.0], 0, 0, 0, 0] ``` "a1_defaultRotateXYZ" is a float[3] value, all other values are of type int. The property getter/setter is specified as follows. If the attribute type is fixed, simply change the attribute name. ```python @property def a1_defaultRotateXYZ(self): return self._batchedReadValues[0] @a1_defaultRotateXYZ.setter def a1_defaultRotateXYZ(self, value): self._batchedReadValues[0] = value ``` The index of "self.\_batchedReadValues" is a number starting from 0 specified in "self.\_batchedReadAttributes[]". "\_\_getattr\_\_", "\_\_setattr\_\_", and "\_prefetch" can be copied and pasted as is. ### ValuesForOutputs The outputs designation is described in the "ValuesForOutputs" class. ```python class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def a1_hourRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ) return data_view.get() @a1_hourRotateXYZ.setter def a1_hourRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value @property def a2_minuteRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a2_minuteRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a2_minuteRotateXYZ) return data_view.get() @a2_minuteRotateXYZ.setter def a2_minuteRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a2_minuteRotateXYZ] = value @property def a3_secondRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a3_secondRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a3_secondRotateXYZ) return data_view.get() @a3_secondRotateXYZ.setter def a3_secondRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a3_secondRotateXYZ] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } ``` Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES". ```python LOCAL_PROPERTY_NAMES = { "a1_hourRotateXYZ", "a2_minuiteRotateXYZ", "a3_secondRotateXYZ" } ``` Specify getter/setter for each attribute. If the attribute type is fixed, simply change the attribute name. ```python @property def a1_hourRotateXYZ(self): value = self._batchedWriteValues.get(self._attributes.a1_hourRotateXYZ) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.a1_hourRotateXYZ) return data_view.get() @a1_hourRotateXYZ.setter def a1_hourRotateXYZ(self, value): self._batchedWriteValues[self._attributes.a1_hourRotateXYZ] = value ``` "\_\_getattr\_\_", "\_\_setattr\_\_", and "\_commit" can be copied and pasted as is. ### ValuesForState(og.DynamicAttributeAccess) The ValuesForState class "RotationByTimeDatabase" can be used by simply specifying the target class name and copying and pasting. ```python class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) ``` ### \_\_init\_\_ In "\_\_init\_\_", inputs, outputs and state classes are created. ```python def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = RotationByTimeDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = RotationByTimeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = RotationByTimeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) ``` ### class abi Define the connections for the OmniGraph node. Think of ABI as a regular flow. Basically, the designation to the ABI interface is a canned statement. ```python class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.RotationByTime' ``` Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "RotationByTime" is in it, "ft_lab.OmniGraph.GetDateTime.RotationByTime" is specified as the return value. The compute method is called when this node is executed. This also specifies an almost canned statement. ```python @staticmethod def compute(context, node): try: per_node_data = RotationByTimeDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = RotationByTimeDatabase(node) per_node_data['_db'] = db except: db = RotationByTimeDatabase(node) try: compute_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False db.outputs._commit() return False ``` The compute method of RotationByTime.py is called from "RotationByTimeDatabase.NODE_TYPE_CLASS.compute(db)". initialize, release, and update_node_version are listed as they are, just matching the class names. This is also a canned statement. ```python @staticmethod def initialize(context, node): RotationByTimeDatabase._initialize_per_node_data(node) initialize_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) RotationByTimeDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False ``` The initialize_type method specifies information about the OmniGraph node. ```python @staticmethod def initialize_type(node_type): initialize_type_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Rotation By Time") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Rotation By Time") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/rotationByTimeIcon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type) ``` The information is set as metadata by using "node_type.set_metadata". |Key name|Description|Value| |---|---|---| |ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime| |ogn.MetadataKeys.UI_NAME|UI name of node|Rotation By Time| |ogn.MetadataKeys.CATEGORIES|Categories name|examples| |ogn.MetadataKeys.DESCRIPTION|Node description|Rotation By Time| |ogn.MetadataKeys.LANGUAGE|language used|Python| |ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg| See below for available category names. https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html The icon path is obtained from the Extension path as follows, and then "/data/icons/rotationByTimeIcon.svg" is connected. ```python icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) ``` Finally, register the "node_type" to which the metadata is assigned. ```python RotationByTimeDatabase.INTERFACE.add_to_node_type(node_type) ``` The on_connection_type_resolve method is a canned statement. ```python @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(RotationByTimeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) ``` ### Specify version After describing the abi class, add the following line as is. USD Composer 2023.2.2 (Kit.105.1.2). ```python NODE_TYPE_CLASS = None GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) ``` This seemed to need to be updated when the Kit version was upgraded. Otherwise, problems occurred, such as icons not being displayed. ### register method The register method is a canned statement. ```python @staticmethod def register(node_type_class): RotationByTimeDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(RotationByTimeDatabase.abi, 1) ``` ### deregister method The deregister method specifies "[Extension name].[class name of this node]". ```python @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.RotationByTime") ```
23,193
Markdown
37.785953
196
0.617341
ft-lab/Omniverse_OmniGraph_ClockSample/docs/Modeling3D.md
# 3D models I modeled 3D models of analog and digital clocks in Blender. I used the Blender 3.6 alpha USD branch which can be launched from the Omniverse Launcher. This is because I want to export the correct USD from Blender. I exported the modeled shapes in Blender in fbx format and textured them in Substance 3D Painter. I also imported Blender exported USD files into Omniverse Create to edit the hierarchy and reassign materials. ## Clock ![blender_clock_01.jpg](./images/blender_clock_01.jpg) Analog clocks use hour, minute, and second hands. To organize this part of the process, I imported it once into Omniverse Create and organized it. ![omniverse_clock_01.jpg](./images/omniverse_clock_01.jpg) The final usd file is placed at "[usds/Clock](../usds/Clock)". Check which Prim the hour, minute, and second hands are. ## Digital Clock ![blender_digital_clock_01.jpg](./images/blender_digital_clock_01.jpg) For digital clocks, note the AM/PM/7-segment LED on the LCD. This is used by showing/hiding each of them. AM/PM gives the quadrangle mesh a texture with Opacity as the material. To organize this, I imported it into Omniverse Create and edited it. ![omniverse_degital_clock_01.jpg](./images/omniverse_degital_clock_01.jpg) "SevenSegmentLED1", "SevenSegmentLED2", "SevenSegmentLED3", "SevenSegmentLED4", and a mesh of parts A through G as children. The Mesh of the letters on this LCD was placed with a slight float in the normal direction. The final usd file is placed at "[usds/ClockDigital](../usds/ClockDigital)".
1,643
Markdown
42.263157
129
0.725502
ft-lab/Omniverse_OmniGraph_ClockSample/docs/node_OutputToLCD.md
# OutputToLCD This node controls a virtual 7-segment LED LCD screen. ![outputToLCD_icon.png](./images/outputToLCD_icon.png) ## OutputToLCD.ogn ```json { "OutputToLCD": { "version": 1, "categories": "examples", "description": "Time output to LCD (hh:mm).", "language": "Python", "metadata": { "uiName": "Time output to LCD (hh:mm)" }, "inputs": { "a1_hourNum10Prim": { "type": "token", "description": "Tenth digit of the hour Prim", "metadata": { "uiName": "HourNum10 Prim" } }, "a2_hourNum1Prim": { "type": "token", "description": "First digit of the hour Prim", "metadata": { "uiName": "HourNum1 Prim" } }, "b1_minuteNum10Prim": { "type": "token", "description": "Tenth digit of the minute Prim", "metadata": { "uiName": "MinuteNum10 Prim" } }, "b2_minuteNum1Prim": { "type": "token", "description": "First digit of the minute Prim", "metadata": { "uiName": "MinuteNum1 Prim" } }, "c1_amPrim": { "type": "token", "description": "AM Prim", "metadata": { "uiName": "AM Prim" } }, "c2_pmPrim": { "type": "token", "description": "PM Prim", "metadata": { "uiName": "PM Prim" } }, "d1_hour": { "type": "int", "description": "Hour", "default": 0, "metadata": { "uiName": "Hour" } }, "d2_minute": { "type": "int", "description": "Minute", "default": 0, "metadata": { "uiName": "Minute" } }, "d3_second": { "type": "int", "description": "Second", "default": 0, "metadata": { "uiName": "Second" } } }, "outputs": { } } } ``` ![OutputToLCD_node.png](./images/OutputToLCD_node.png) ### Inputs |Attribute name|Type|UI name|Description| |---|---|---|---| |a1_hourNum10Prim|token|HourNum10 Prim|Tenth digit of the hour Prim| |a2_hourNum1Prim|token|HourNum1 Prim|First digit of the hour Prim| |b1_minuteNum10Prim|token|MinuteNum10 Prim|Tenth digit of the minute Prim| |b2_minuteNum1Prim|token|MinuteNum1 Prim|First digit of the minute Prim| |c1_amPrim|token|AM Prim|AM Prim| |c2_pmPrim|token|PM Prim|PM Prim| |d1_hour|int|Hour|Hour| |d2_minute|int|Minute|Minute| |d3_second|int|Second|Second| The "a1_" or "b1_" at the beginning of the attribute name is used to display the data in ascending order when it is displayed in a graph. Those that specify a "token" type will be connected to the Prim path. In total, 6 Prims will be connected to this node. ![GetDateTime_Digital_01.jpg](../images/GetDateTime_Digital_01.jpg) Four prims that imitate "7-segment LEDs" are placed as numerical components. One of the "7-segment LEDs" consists of four components, A, B, C, D, E, F, and G, as shown below. ![GetDateTime_Digital_02.jpg](../images/GetDateTime_Digital_02.jpg) The same A, B, C, D, E, F, and G are given for the child Prim names. This is turned On/Off to indicate the numerical value. The numbers were expressed in 8 bits as follows. The lower 7 bits are assigned to ABCDEFG respectively. |Image|Bit value|Hexadecimal| |---|---|---| |<img src="./images/num_0.jpg" height=40 />|01111110|0x7e| |<img src="./images/num_1.jpg" height=40 />|00110000|0x30| |<img src="./images/num_2.jpg" height=40 />|01101101|0x6d| |<img src="./images/num_3.jpg" height=40 />|01111001|0x79| |<img src="./images/num_4.jpg" height=40 />|00110011|0x33| |<img src="./images/num_5.jpg" height=40 />|01011011|0x5b| |<img src="./images/num_6.jpg" height=40 />|01011111|0x5f| |<img src="./images/num_7.jpg" height=40 />|01110000|0x70| |<img src="./images/num_8.jpg" height=40 />|01111111|0x7f| |<img src="./images/num_9.jpg" height=40 />|01111011|0x7b| d1_hour, d2_minute, and d3_second are entered as hours, minutes, and seconds. ## OutputToLCD.py Controls the display/non-display of the AM and PM panels and the 2-digit 7-segment LED for the hour and minute, respectively. ```python from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import numpy as np import omni.ext class OutputToLCD: @staticmethod def compute(db) -> bool: try: hour = db.inputs.d1_hour minute = db.inputs.d2_minute second = db.inputs.d3_second # xABCDEFG => 0b01111110 = 0x7e = '0' nameList = ["A", "B", "C", "D", "E", "F", "G"] numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b] # Get stage. stage = omni.usd.get_context().get_stage() # Show/hide "AM" if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "": prim = stage.GetPrimAtPath(db.inputs.c1_amPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible') # Show/hide "PM" if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "": prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible') # Hour : 10th digit. hour12 = hour if (hour < 12) else (hour - 12) if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "": basePrimPath = db.inputs.a1_hourNum10Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12 / 10) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Hour : 1th digit. if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "": basePrimPath = db.inputs.a2_hourNum1Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Minute : 10th digit. if db.inputs.b1_minuteNum10Prim != None and db.inputs.b1_minuteNum10Prim != "": basePrimPath = db.inputs.b1_minuteNum10Prim shiftV = 0x40 maskV = numMaskList[(int)(minute / 10) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Minute : 1th digit. if db.inputs.b2_minuteNum1Prim != None and db.inputs.b2_minuteNum1Prim != "": basePrimPath = db.inputs.b2_minuteNum1Prim shiftV = 0x40 maskV = numMaskList[(int)(minute) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 except TypeError as error: db.log_error(f"Processing failed : {error}") return False return True ``` The following retrieves hours, minutes, and seconds. ```python hour = db.inputs.d1_hour minute = db.inputs.d2_minute second = db.inputs.d3_second ``` ### AM/PM The Prim path specified as "token" in the cogn file is received as a string. I did the following to show/hide the Prim path in the AM. The Prim path is in "db.inputs.c1_amPrim". Use "db.inputs.c2_pmPrim" for the PM prim path. ```python # Get stage. stage = omni.usd.get_context().get_stage() # Show/hide "AM" if db.inputs.c1_amPrim != None and db.inputs.c1_amPrim != "": prim = stage.GetPrimAtPath(db.inputs.c1_amPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if hour < 12 else 'invisible') # Show/hide "PM" if db.inputs.c2_pmPrim != None and db.inputs.c2_pmPrim != "": prim = stage.GetPrimAtPath(db.inputs.c2_pmPrim) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if (hour >= 12) else 'invisible') ``` "stage.GetPrimAtPath" is used to obtain Prim. "prim.IsValid()" is True, the prim exists. For AM, the time is before 12, so it will be displayed then. In Visibility, specify "inherited" to show or "invisible" to hide. PM is the reverse of AM. ### Display 2-digit numbers Hour(db.inputs.d1_hour) will be entered as a number from 0-23. nameList is an array of letters from 'A' to 'G'. The numMaskList contains an array of bits to show/hide for seven of them. This will display 0-9. ```python nameList = ["A", "B", "C", "D", "E", "F", "G"] numMaskList = [0x7e, 0x30, 0x6d, 0x79, 0x33, 0x5b, 0x5f, 0x70, 0x7f, 0x7b] ``` Divide the hour into 10 and 1 digits and give a show/hide for each of 'A' through 'G' in the target Prim. ```python # Hour : 10th digit. hour12 = hour if (hour < 12) else (hour - 12) if db.inputs.a1_hourNum10Prim != None and db.inputs.a1_hourNum10Prim != "": basePrimPath = db.inputs.a1_hourNum10Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12 / 10) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 # Hour : 1th digit. if db.inputs.a2_hourNum1Prim != None and db.inputs.a2_hourNum1Prim != "": basePrimPath = db.inputs.a2_hourNum1Prim shiftV = 0x40 maskV = numMaskList[(int)(hour12) % 10] for i in range(7): primPath = f"{basePrimPath}/{nameList[i]}" prim = stage.GetPrimAtPath(primPath) if prim.IsValid(): primImageable = UsdGeom.Imageable(prim) primImageable.GetVisibilityAttr().Set('inherited' if ((maskV & shiftV) != 0) else 'invisible') shiftV >>= 1 ``` The same process is applied to the minute. ## OutputToLCDDatabase.py For the most part, the process is the same as for "[GetDateTimeDatabase.py](./node_GetDateTime.md)". "INTERFACE" enumerates attribute data. ```python PER_NODE_DATA = {} INTERFACE = og.Database._get_interface([ ('inputs:a1_hourNum10Prim', 'token', 0, 'HourNum10 Prim', 'HourNum10 Prim', {}, True, None, False, ''), ('inputs:a2_hourNum1Prim', 'token', 0, 'HourNum1 Prim', 'HourNum1 Prim', {}, True, None, False, ''), ('inputs:b1_minuteNum10Prim', 'token', 0, 'MinuteNum10 Prim', 'MinuteNum10 Prim', {}, True, None, False, ''), ('inputs:b2_minuteNum1Prim', 'token', 0, 'MinuteNum1 Prim', 'MinuteNum1 Prim', {}, True, None, False, ''), ('inputs:c1_amPrim', 'token', 0, 'AM Prim', 'AM Prim', {}, True, None, False, ''), ('inputs:c2_pmPrim', 'token', 0, 'PM Prim', 'PM Prim', {}, True, None, False, ''), ('inputs:d1_hour', 'int', 0, 'Hour', 'Hour', {}, True, 0, False, ''), ('inputs:d2_minute', 'int', 0, 'Minute', 'Minute', {}, True, 0, False, ''), ('inputs:d3_second', 'int', 0, 'Second', 'Second', {}, True, 0, False, ''), ]) ``` 'inputs:a1_hourNum10Prim', 'inputs:a2_hourNum1Prim', 'inputs:b1_minuteNum10Prim', 'inputs:b2_minuteNum1Prim', 'inputs:c1_amPrim', ' inputs:c2_pmPrim' accepts the Prim path, so the type is token. ### ValuesForInputs The inputs designation is described in the "ValuesForInputs" class. ```python class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second] self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0] @property def a1_hourNum10Prim(self): return self._batchedReadValues[0] @a1_hourNum10Prim.setter def a1_hourNum10Prim(self, value): self._batchedReadValues[0] = value @property def a2_hourNum1Prim(self): return self._batchedReadValues[1] @a2_hourNum1Prim.setter def a2_hourNum1Prim(self, value): self._batchedReadValues[1] = value @property def b1_minuteNum10Prim(self): return self._batchedReadValues[2] @b1_minuteNum10Prim.setter def b1_minuteNum10Prim(self, value): self._batchedReadValues[2] = value @property def b2_minuteNum1Prim(self): return self._batchedReadValues[3] @b2_minuteNum1Prim.setter def b2_minuteNum1Prim(self, value): self._batchedReadValues[3] = value @property def c1_amPrim(self): return self._batchedReadValues[4] @c1_amPrim.setter def c1_amPrim(self, value): self._batchedReadValues[4] = value @property def c2_pmPrim(self): return self._batchedReadValues[5] @c2_pmPrim.setter def c2_pmPrim(self, value): self._batchedReadValues[5] = value @property def d1_hour(self): return self._batchedReadValues[6] @d1_hour.setter def d1_hour(self, value): self._batchedReadValues[6] = value @property def d2_minute(self): return self._batchedReadValues[7] @d2_minute.setter def d2_minute(self, value): self._batchedReadValues[7] = value @property def d3_second(self): return self._batchedReadValues[8] @d3_second.setter def d3_second(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues ``` Specify the attribute names to be used in order in "LOCAL_PROPERTY_NAMES". ```python LOCAL_PROPERTY_NAMES = {"a1_hourNum10Prim", "a2_hourNum1Prim", "b1_minuteNum10Prim", "b2_minuteNum1Prim", "c1_amPrim", "c2_pmPrim", "d1_hour", "d2_minute", "d3_second"} ``` In "\_\_init\_\_", specify "self._attributes.[Attribute name]" as an array. ```python self._batchedReadAttributes = [self._attributes.a1_hourNum10Prim, self._attributes.a2_hourNum1Prim, self._attributes.b1_minuteNum10Prim, self._attributes.b2_minuteNum1Prim, self._attributes.c1_amPrim, self._attributes.c2_pmPrim, self._attributes.d1_hour, self._attributes.d2_minute, self._attributes.d3_second] ``` Also, put initial values in self._batchedReadValues. ```python self._batchedReadValues = ["", "", "", "", "", "", 0, 0, 0] ``` Specify "" for token. All other values are of type int. The property getter/setter is specified as follows. If the attribute type is fixed, simply change the attribute name. ```python @property def a1_hourNum10Prim(self): return self._batchedReadValues[0] @a1_hourNum10Prim.setter def a1_hourNum10Prim(self, value): self._batchedReadValues[0] = value ``` The index of "self.\_batchedReadValues" is a number starting from 0 specified in "self.\_batchedReadAttributes[]". "\_\_getattr\_\_", "\_\_setattr\_\_", and "\_prefetch" can be copied and pasted as is. ### ValuesForState(og.DynamicAttributeAccess) The ValuesForState class "OutputToLCDDatabase" can be used by simply specifying the target class name and copying and pasting. ```python class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) ``` ### \_\_init\_\_ In "\_\_init\_\_", inputs, outputs and state classes are created. ```python def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OutputToLCDDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OutputToLCDDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) ``` There are no outputs in this OutputToLCDDatabase class, so that is not mentioned. ### class abi Define the connections for the OmniGraph node. Think of ABI as a regular flow. Basically, the designation to the ABI interface is a canned statement. ```python class abi: @staticmethod def get_node_type(): get_node_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'ft_lab.OmniGraph.GetDateTime.OutputToLCD' ``` Since the name of this Extension is "ft_lab.OmniGraph.GetDateTime" and "OutputToLCD" is in it, "ft_lab.OmniGraph.GetDateTime.OutputToLCD" is specified as the return value. The compute method is called when this node is executed. This also specifies an almost canned statement. ```python @staticmethod def compute(context, node): try: per_node_data = OutputToLCDDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = OutputToLCDDatabase(node) per_node_data['_db'] = db except: db = OutputToLCDDatabase(node) try: compute_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False #db.outputs._commit() return False ``` The compute method of OutputToLCD.py is called from "OutputToLCDDatabase.NODE_TYPE_CLASS.compute(db)". initialize, release, and update_node_version are listed as they are, just matching the class names. This is also a canned statement. ```python @staticmethod def initialize(context, node): OutputToLCDDatabase._initialize_per_node_data(node) initialize_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) OutputToLCDDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False ``` The initialize_type method specifies information about the OmniGraph node. ```python @staticmethod def initialize_type(node_type): initialize_type_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "ft_lab.OmniGraph.GetDateTime") node_type.set_metadata(ogn.MetadataKeys.UI_NAME, "Time output to LCD") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "examples") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Time output to LCD") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") # Set Icon(svg). icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/outputToLCD.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type) ``` The information is set as metadata by using "node_type.set_metadata". |Key name|Description|Value| |---|---|---| |ogn.MetadataKeys.EXTENSION|Extension name|ft_lab.OmniGraph.GetDateTime| |ogn.MetadataKeys.UI_NAME|UI name of node|Time output to LCD| |ogn.MetadataKeys.CATEGORIES|Categories name|examples| |ogn.MetadataKeys.DESCRIPTION|Node description|Time output to LCD| |ogn.MetadataKeys.LANGUAGE|language used|Python| |ogn.MetadataKeys.ICON_PATH|Icon path|[Extension Path]/data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg| See below for available category names. https://docs.omniverse.nvidia.com/kit/docs/omni.graph.docs/latest/howto/Categories.html The icon path is obtained from the Extension path as follows, and then "/data/icons/outputToLCD.svg" is connected. ```python icon_path = carb.tokens.get_tokens_interface().resolve("${ft_lab.OmniGraph.GetDateTime}") icon_path = icon_path + '/' + "data/icons/ft_lab.OmniGraph.GetDateTime.outputToLCD.svg" node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path) ``` Finally, register the "node_type" to which the metadata is assigned. ```python OutputToLCDDatabase.INTERFACE.add_to_node_type(node_type) ``` The on_connection_type_resolve method is a canned statement. ```python @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(OutputToLCDDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) ``` ### Specify version After describing the abi class, add the following line as is. USD Composer 2023.2.2 (Kit.105.1.2). ```python NODE_TYPE_CLASS = None GENERATOR_VERSION = (1, 41, 3) TARGET_VERSION = (2, 139, 12) ``` This seemed to need to be updated when the Kit version was upgraded. Otherwise, problems occurred, such as icons not being displayed. ### register method The register method is a canned statement. ```python @staticmethod def register(node_type_class): OutputToLCDDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(OutputToLCDDatabase.abi, 1) ``` ### deregister method The deregister method specifies "[Extension name].[class name of this node]". ```python @staticmethod def deregister(): og.deregister_node_type("ft_lab.OmniGraph.GetDateTime.OutputToLCD") ```
26,995
Markdown
38.181422
322
0.601926
ft-lab/Omniverse_OmniGraph_ClockSample/docs/ExtensionStructure.md
# Extension Structure The extension has the following structure. Extension name is "ft_lab.OmniGraph.GetDateTime". ``` [ft_lab.OmniGraph.GetDateTime] [config] extension.toml [data] [icons] icon.svg outputToLCD.svg rotationByTimeIcon.svg icon.png preview.jpg [docs] CHANGELOG.md index.rst README.md [ft_lab] [OmniGraph] [GetDateTime] [nodes] GetDateTime.ogn GetDateTime.py OutputToLCD.ogn OutputToLCD.py RotationByTime.ogn RotationByTime.py [ogn] __init__.py GetDateTimeDatabase.py OutputToLCDDatabase.py RotationByTimeDatabase.py __init__.py extension.py ``` The Extension configuration file is "extension.toml". This section describes only the information on creating custom nodes for OmniGraph in Extension. ## Files per node The data for OmniGraph nodes uses files with the extension ogn. If there is an ogn file called "GetDateTime.ogn", the node name is "GetDateTime". One node consists of three files. ``` [nodes] GetDateTime.ogn GetDateTime.py [ogn] GetDateTimeDatabase.py ``` |File|Description| |---|---| |GetDateTime.ogn|Node configuration in json format| |GetDateTime.py|Describes the implementation part of the node| |GetDateTimeDatabase.py|Describe the implementation as a custom node.<br>It is almost always a canned statement.| ”GetDateTimeDatabase.py" specifies "[node name]Database.py". ## extension.toml ``` # Watch the .ogn files for hot reloading (only works for Python files) [fswatcher.patterns] include = ["*.ogn", "*.py"] exclude = ["*Database.py","*/ogn*"] # We only depend on testing framework currently: [dependencies] "omni.graph" = {} "omni.graph.nodes" = {} "omni.graph.tools" = {} ``` In [fswatcher.patterns], add the information to be used by OmniGraph nodes. I think there is no problem copying and pasting the above as is. Specify other Extensions to be used with OmniGraph in [dependencies]. This will be enabled if disabled before this Extension is called. ## Icons used in graph In "data/icons", icons used in nodes are stored as SVG files. ``` [data] [icons] ft_lab.OmniGraph.GetDateTime.icon.svg ft_lab.OmniGraph.GetDateTime.outputToLCD.svg ft_lab.OmniGraph.GetDateTime.rotationByTimeIcon.svg ``` Icon names have been standardized with the following designations. ``` [Project name].[Icon name].svg ``` These icons are used in the node graph in Omniverse Create at the following locations. ![node_svg.jpg](./images/node_svg.jpg) I created the svg file in Affinity Designer( https://affinity.serif.com/ ). ## Nodes The following three nodes exist. Please also see "[Description of OmniGraph nodes](../OmniGraphNodes.md)" for node descriptions. |Node name|Description| |---|---| |[GetDateTime](./node_GetDateTime.md)|Get the current local date and time.| |[RotationByTime](./node_RotationByTime.md)|Given an hour, minute, and second, returns the XYZ of each rotation(degree).| |[OutputToLCD](./node_OutputToLCD.md)|This node controls a virtual 7-segment LED LCD screen.|
3,303
Markdown
25.861788
126
0.674236
ft-lab/Omniverse_extension_SetOrigin/update_log.md
# Update log ## Set Origin v.0.0.1 [08/11/2022] * Adjustments for Extension Manager ## Set Origin v.0.0.1 [04/28/2022] * First version.
141
Markdown
11.90909
35
0.659574
ft-lab/Omniverse_extension_SetOrigin/readme.md
# Omniverse Extension : "Set Origin" [Japanese readme](./readme_jp.md) Changes the center position of the rotation or scale for the selected Mesh or Xform. ![setorigin_preview.jpg](./images/setorigin_preview.jpg) ## Operating Environment * Windows 10/Ubuntu 20.04 * Omniverse Create 2022.1.1 (Omniverse Kit 103) * Omniverse Code 2022.1.0 ## Usage 1. Copy "ft_lab.Tools.SetOrigin" to the exts folder in Omniverse. (ov/pkg/create-2022.1.1/exts , etc.) 2. Run Omniverse Create. 3. Activate "ft_lab.Tools.SetOrigin" in the Extension window. ![extension_setOrigin.jpg](./images/extension_setOrigin.jpg) 4. Select Mesh or Xform. 5. Select "Tools"-"Set Origin"-"Center of Geometry" from the menu to move the center of the manipulator to the center of the geometry. 6. Select "Tools"-"Set Origin"-"Lower center of Geometry" from the menu to move the center of the manipulator to the lower center of the geometry. ![tools_img_01.jpg](./images/tools_img_01.jpg) ## Additional command in Python This Set Origin function adjusts the Translate and Pivot of the Prim. Add "ToolSetOrigin" to omni.kit.commands. The argument "prim" specifies Usd.Prim. The argument "center_position" specifies the center position in world coordinates. ```python import omni.kit.commands from pxr import Usd, Gf stage = omni.usd.get_context().get_stage() omni.kit.commands.execute('ToolSetOrigin', prim=stage.GetPrimAtPath("/World/xxx"), center_position=Gf.Vec3f(50.0, -50.0, 0.0)) ``` ## Script reference in Omniverse Extension [https://github.com/ft-lab/omniverse_sample_scripts](https://github.com/ft-lab/omniverse_sample_scripts) ## Update log [Update log](./update_log.md)
1,731
Markdown
29.385964
146
0.720971
ft-lab/Omniverse_extension_SetOrigin/readme_jp.md
# Omniverse Extension : "Set Origin" [English readme](./readme.md) 選択されたMeshまたはXformの回転またはスケールの中心位置を変更します。 ![setorigin_preview.jpg](./images/setorigin_preview.jpg) ## 動作確認環境 * Windows 10/Ubuntu 20.04 * Omniverse Create 2022.1.1 (Omniverse Kit 103) * Omniverse Code 2022.1.0 ## 使い方 1. "ft_lab.Tools.SetOrigin"を Omniverseのextフォルダにコピーします。 (ov/pkg/create-2022.1.1/exts など) 2. Omniverse Createを起動します。 3. Extensionウィンドウで"ft_lab.Tools.SetOrigin"をアクティブにします。 ![extension_setOrigin.jpg](./images/extension_setOrigin.jpg) 4. MeshまたはXformを選択します。 5. "Tools"-"Set Origin"-"Center of Geometry"をメニューから選択すると、マニピュレータの中心がジオメトリの中心位置になります。 6. "Tools"-"Set Origin"-"Lower center of Geometry"をメニューから選択すると、マニピュレータの中心が ジオメトリの中央下の位置になります。 ![tools_img_01.jpg](./images/tools_img_01.jpg) ## Pythonでの追加コマンド Set Origin機能は、PrimのTranslateとPivotを調整する機能を提供します。 omni.kit.commandsに"ToolSetOrigin"を追加しています。 引数"prim"はUsd.Primを指定します。 引数"center_position"はワールド座標での中心にする位置を指定します。 ```python import omni.kit.commands from pxr import Usd, Gf stage = omni.usd.get_context().get_stage() omni.kit.commands.execute('ToolSetOrigin', prim=stage.GetPrimAtPath("/World/xxx"), center_position=Gf.Vec3f(50.0, -50.0, 0.0)) ``` ## Omniverse Extensionでのスクリプトの参考 [https://github.com/ft-lab/omniverse_sample_scripts](https://github.com/ft-lab/omniverse_sample_scripts) ## 更新履歴 [Update log](./update_log.md)
1,440
Markdown
25.685185
108
0.728472
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/extension.py
from pxr import Usd, UsdGeom, UsdSkel, UsdPhysics, UsdShade, UsdSkel, Sdf, Gf, Tf import omni.ext import omni.usd import omni.kit.menu.utils import omni.kit.undo import omni.kit.commands from omni.kit.menu.utils import MenuItemDescription import asyncio from .scripts.SetOrigin import SetOrigin # ----------------------------------------------------. class SetOriginExtension (omni.ext.IExt): # Menu list. _menu_list = None _sub_menu_list = None # Menu name. _menu_name = "Tools" # ------------------------------------------. # Initialize menu. # ------------------------------------------. def init_menu (self): async def _rebuild_menus(): await omni.kit.app.get_app().next_update_async() omni.kit.menu.utils.rebuild_menus() def menu_select (mode): if mode == 0: setOrigin = SetOrigin() setOrigin.doCenterOfGeometry() if mode == 1: setOrigin = SetOrigin() setOrigin.doLowerCenterOfGeometry() self._sub_menu_list = [ MenuItemDescription(name="Center of Geometry", onclick_fn=lambda: menu_select(0)), MenuItemDescription(name="Lower center of Geometry", onclick_fn=lambda: menu_select(1)), ] self._menu_list = [ MenuItemDescription(name="Set Origin", sub_menu=self._sub_menu_list), ] # Rebuild with additional menu items. omni.kit.menu.utils.add_menu_items(self._menu_list, self._menu_name) asyncio.ensure_future(_rebuild_menus()) # ------------------------------------------. # Term menu. # It seems that the additional items in the top menu will not be removed. # ------------------------------------------. def term_menu (self): async def _rebuild_menus(): await omni.kit.app.get_app().next_update_async() omni.kit.menu.utils.rebuild_menus() # Remove and rebuild the added menu items. omni.kit.menu.utils.remove_menu_items(self._menu_list, self._menu_name) asyncio.ensure_future(_rebuild_menus()) # ------------------------------------------. # ------------------------------------------. # Extension startup. # ------------------------------------------. def on_startup (self, ext_id): # Initialize menu. self.init_menu() # ------------------------------------------. # Extension shutdown. # ------------------------------------------. def on_shutdown(self): # Term menu. self.term_menu()
2,607
Python
31.6
100
0.498274
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/TransformUtil.py
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf import omni.kit.commands # ---------------------------. # Set Translate. # ---------------------------. def TUtil_SetTranslate (prim : Usd.Prim, tV : Gf.Vec3f): trans = prim.GetAttribute("xformOp:translate").Get() if trans != None: # Specify a value for each type. if type(trans) == Gf.Vec3f: prim.GetAttribute("xformOp:translate").Set(Gf.Vec3f(tV)) elif type(trans) == Gf.Vec3d: prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(tV)) else: # xformOpOrder is also updated. xformAPI = UsdGeom.XformCommonAPI(prim) xformAPI.SetTranslate(Gf.Vec3d(tV)) # ---------------------------. # Set Scale. # ---------------------------. def TUtil_SetScale (prim : Usd.Prim, sV : Gf.Vec3f): scale = prim.GetAttribute("xformOp:scale").Get() if scale != None: # Specify a value for each type. if type(scale) == Gf.Vec3f: prim.GetAttribute("xformOp:scale").Set(Gf.Vec3f(sV)) elif type(scale) == Gf.Vec3d: prim.GetAttribute("xformOp:scale").Set(Gf.Vec3d(sV)) else: # xformOpOrder is also updated. xformAPI = UsdGeom.XformCommonAPI(prim) xformAPI.SetScale(Gf.Vec3f(sV)) # ---------------------------. # Set Rotate. # ---------------------------. def TUtil_SetRotate (prim : Usd.Prim, rV : Gf.Vec3f): # Get rotOrder. # If rotation does not exist, rotOrder = UsdGeom.XformCommonAPI.RotationOrderXYZ. xformAPI = UsdGeom.XformCommonAPI(prim) time_code = Usd.TimeCode.Default() translation, rotation, scale, pivot, rotOrder = xformAPI.GetXformVectors(time_code) # Convert rotOrder to "xformOp:rotateXYZ" etc. t = xformAPI.ConvertRotationOrderToOpType(rotOrder) rotateAttrName = "xformOp:" + UsdGeom.XformOp.GetOpTypeToken(t) # Set rotate. rotate = prim.GetAttribute(rotateAttrName).Get() if rotate != None: # Specify a value for each type. if type(rotate) == Gf.Vec3f: prim.GetAttribute(rotateAttrName).Set(Gf.Vec3f(rV)) elif type(rotate) == Gf.Vec3d: prim.GetAttribute(rotateAttrName).Set(Gf.Vec3d(rV)) else: # xformOpOrder is also updated. xformAPI.SetRotate(Gf.Vec3f(rV), rotOrder) # ---------------------------. # Set Pivot. # ---------------------------. def TUtil_SetPivot (prim : Usd.Prim, pV : Gf.Vec3f): pivot = prim.GetAttribute("xformOp:translate:pivot").Get() if pivot != None: # Specify a value for each type. if type(pivot) == Gf.Vec3f: prim.GetAttribute("xformOp:translate:pivot").Set(Gf.Vec3f(pV)) elif type(pivot) == Gf.Vec3d: prim.GetAttribute("xformOp:translate:pivot").Set(Gf.Vec3d(pV)) else: # xformOpOrder is also updated. # ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"] # The following do not work correctly? #xformAPI = UsdGeom.XformCommonAPI(prim) #xformAPI.SetPivot(Gf.Vec3f(pV)) prim.CreateAttribute("xformOp:translate:pivot", Sdf.ValueTypeNames.Float3, False).Set(Gf.Vec3f(pV)) # ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"] transformOrder = prim.GetAttribute("xformOpOrder").Get() orderList = [] for sV in transformOrder: orderList.append(sV) orderList.append("xformOp:translate:pivot") orderList.append("!invert!xformOp:translate:pivot") prim.GetAttribute("xformOpOrder").Set(orderList) # -------------------------------------------. # Check the order of Pivot in OpOrder # @return -1 ... unknown # 0 ... No pivot. # 1 ... ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"] # 2 ... ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"] # -------------------------------------------. def TUtil_ChkOrderOfPivot (prim : Usd.Prim): if prim == None: return transformOrder = prim.GetAttribute("xformOpOrder").Get() orderList = [] for sV in transformOrder: orderList.append(sV) orderLen = len(orderList) pos1 = -1 pos2 = -1 for i in range(orderLen): if orderList[i] == "xformOp:translate:pivot": pos1 = i elif orderList[i] == "!invert!xformOp:translate:pivot": pos2 = i if pos1 < 0 or pos2 < 0: return 0 # ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ", "xformOp:scale", "!invert!xformOp:translate:pivot"] if pos1 == 1 and pos2 == orderLen - 1: return 1 # ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale", "xformOp:translate:pivot", "!invert!xformOp:translate:pivot"] if pos1 == orderLen - 2 and pos2 == orderLen - 1: return 2 return -1 # -------------------------------------------. # Delete Pivot. # -------------------------------------------. def TUtil_DeletePivot (prim : Usd.Prim): if prim == None: return path = prim.GetPath().pathString + ".xformOp:translate:pivot" omni.kit.commands.execute('RemoveProperty', prop_path=path) transformOrder = prim.GetAttribute("xformOpOrder").Get() if transformOrder != None: orderList = [] for sV in transformOrder: if sV == "xformOp:translate:pivot" or sV == "!invert!xformOp:translate:pivot": continue orderList.append(sV) prim.GetAttribute("xformOpOrder").Set(orderList)
5,746
Python
36.318182
138
0.59102
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/MathUtil.py
# -----------------------------------------------------. # Math functions. # -----------------------------------------------------. from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf # Get local matrix. def GetLocalMatrix (prim : Usd.Prim): xformCache = UsdGeom.XformCache() curM = xformCache.GetLocalToWorldTransform(prim) parentPrim = prim.GetParent() matrix = curM * xformCache.GetLocalToWorldTransform(parentPrim).GetInverse() return matrix # Get world matrix. def GetWorldMatrix (prim : Usd.Prim): xformCache = UsdGeom.XformCache() return xformCache.GetLocalToWorldTransform(prim)
617
Python
33.333331
80
0.606159
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/CalcWorldBoundingBox.py
# -----------------------------------------------------. # # Calculate bounding box in world coordinates. # -----------------------------------------------------. from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf def CalcWorldBoundingBox (prim : Usd.Prim): # Calc world boundingBox. bboxCache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), ["default"]) bboxD = bboxCache.ComputeWorldBound(prim).ComputeAlignedRange() bb_min = Gf.Vec3f(bboxD.GetMin()) bb_max = Gf.Vec3f(bboxD.GetMax()) return bb_min, bb_max
537
Python
34.866664
70
0.55121
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/ft_lab/Tools/SetOrigin/scripts/SetOrigin.py
# -----------------------------------------------------. # Change the center. # -----------------------------------------------------. from pxr import Usd, UsdGeom, UsdShade, Sdf, Gf, Tf import omni.usd import omni.kit.commands import omni.kit.undo from .CalcWorldBoundingBox import * from .MathUtil import * from .TransformUtil import * # Check if Prim can handle. def _checkPrim (prim : Usd.Prim): if prim == None: return False if prim.IsA(UsdGeom.Mesh) == False and prim.IsA(UsdGeom.Xform) == False: return False # Skip for reference. #if prim.HasAuthoredReferences(): # return False return True # ------------------------------------------------------------------------. # Change Mesh Center # ------------------------------------------------------------------------. class ToolSetOrigin (omni.kit.commands.Command): _prim = None _centerWPos = None _targetCenterWPos = None _prevTranslate = None _prevPivot = None # prim : Target prim. # center_position : Position of the center in world coordinates. def __init__ (self, prim : Usd.Prim, center_position : Gf.Vec3f): self._prim = prim self._targetCenterWPos = center_position # Calculate world center from bounding box. bbMin, bbMax = CalcWorldBoundingBox(prim) self._centerWPos = (bbMin + bbMax) * 0.5 # Execute process. def do (self): if _checkPrim(self._prim) == False: return self._prevTranslate = self._prim.GetAttribute("xformOp:translate").Get() if self._prevTranslate == None: self._prevTranslate = Gf.Vec3f(0, 0, 0) self._prevPivot = self._prim.GetAttribute("xformOp:translate:pivot").Get() localM = GetWorldMatrix(self._prim).GetInverse() centerPosL = localM.Transform(self._targetCenterWPos) TUtil_SetPivot(self._prim, Gf.Vec3f(centerPosL)) # Calculate world center from bounding box. bbMin, bbMax = CalcWorldBoundingBox(self._prim) bbCenter = (bbMin + bbMax) * 0.5 # Recalculate the center position in world coordinates and correct for any misalignment. ddV = Gf.Vec3f(bbCenter - self._centerWPos) fMin = 1e-6 if abs(ddV[0]) > fMin or abs(ddV[1]) > fMin or abs(ddV[2]) > fMin: parentLocalM = GetWorldMatrix(self._prim.GetParent()).GetInverse() p1 = parentLocalM.Transform(self._centerWPos) p2 = parentLocalM.Transform(bbCenter) transV = self._prim.GetAttribute("xformOp:translate").Get() if transV == None: transV = Gf.Vec3f(0, 0, 0) transV = Gf.Vec3f(transV) + (p1 - p2) TUtil_SetTranslate(self._prim, Gf.Vec3f(transV)) # Undo process. def undo (self): if _checkPrim(self._prim) == False: return TUtil_SetTranslate(self._prim, Gf.Vec3f(self._prevTranslate)) if self._prevPivot != None: TUtil_SetPivot(self._prim, Gf.Vec3f(self._prevPivot)) else: TUtil_DeletePivot(self._prim) # ------------------------------------------------------------------------. class SetOrigin: def __init__(self): pass # Get selected Prim. def _getSelectedPrim (self): # Get stage. stage = omni.usd.get_context().get_stage() # Get selection. selection = omni.usd.get_context().get_selection() paths = selection.get_selected_prim_paths() prim = None for path in paths: prim = stage.GetPrimAtPath(path) break return prim def doCenterOfGeometry (self): prim = self._getSelectedPrim() if _checkPrim(prim) == False: return # Calculate world center from bounding box. bbMin, bbMax = CalcWorldBoundingBox(prim) bbCenter = (bbMin + bbMax) * 0.5 # Register a Class and run it. omni.kit.commands.register(ToolSetOrigin) omni.kit.commands.execute("ToolSetOrigin", prim=prim, center_position=bbCenter) def doLowerCenterOfGeometry (self): prim = self._getSelectedPrim() if _checkPrim(prim) == False: return # Calculate world lower center from bounding box. bbMin, bbMax = CalcWorldBoundingBox(prim) bbCenter = Gf.Vec3f((bbMin[0] + bbMax[0]) * 0.5, bbMin[1], (bbMin[2] + bbMax[2]) * 0.5) # Register a Class and run it. omni.kit.commands.register(ToolSetOrigin) omni.kit.commands.execute("ToolSetOrigin", prim=prim, center_position=bbCenter)
4,640
Python
32.388489
96
0.567026
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/docs/CHANGELOG.md
# CHANGELOG ## Ver.0.0.1 (08/11/2022) * Adjustments for Extension Manager ## Ver.0.0.1 (04/28/2022) * First Version
123
Markdown
8.538461
35
0.642276
ft-lab/Omniverse_extension_SetOrigin/exts/ft_lab.Tools.SetOrigin/docs/README.md
# Set Origin [ft_lab.Tools.SetOrigin] Changes the center position of the rotation or scale for the selected Mesh or Xform. https://github.com/ft-lab/Omniverse_extension_SetOrigin ## Usage 1. Activate "ft_lab.Tools.SetOrigin" in the Extension window. 2. Select Mesh or Xform. 3. Select "Tools"-"Set Origin"-"Center of Geometry" from the menu to move the center of the manipulator to the center of the geometry. 4. Select "Tools"-"Set Origin"-"Lower center of Geometry" from the menu to move the center of the manipulator to the lower center of the geometry. ## Operation Description This Set Origin function adjusts the Translate and Pivot of the Prim. Add "ToolSetOrigin" to omni.kit.commands. The argument "prim" specifies Usd.Prim. The argument "center_position" specifies the center position in world coordinates.
850
Markdown
37.681816
146
0.752941
omnioverflow/kit-extension-path-tracking/README.md
# Vehicle Path Tracking Extension ## 1. About Omniverse Vehicle Path tracking extension allows a physics-enabled vehicle created with a PhysX Vehicle extension (omni.physx.vehicle) to move and automatically track a user-defined path. User-defined path is represented by an instance of USD BasisCurves, and a path tracking algorithm is inspired by a classic Pure Pursuit algorithm [3]. ![Vehicle Path Tracking Preview](exts/ext.path.tracking/data/preview.PNG) Figure 1. Preview of Vehicle Path Tracking Extension ### System Requirements: - `Code 2022.1.3+` or `Create 2022.1.5+` (support for Create 2022.3.0 is in progress) - `Pyhton 3.7+`, `numpy` (this requirement should be satisfied when using Omniverse Kit's embedded `CPython 3.7`) ### Limitations For the moment, the extension is simple and a number of shortcuts have been taken and a few simplifications applied, including the following: * Pure Pursuit Tracking algorithm is kinematics-based and therefore several physics vehicle dynamics properties are not considered while computing wheel steering angle, such as tire slipping etc. * A vehicle might go off the track if proposed an input path of a physically "impossible" trajectory, or at high-speed turn. * Limited unit test coverage; occasional bugs might exist. ### Future Work * Implement automatic computation of vehicle path which satisfies certain constraints (waypoints, collision free path etc.). * Add support for different vehicle controller algorithms, including more sophisticated ones (e.g., PID controller). * Getting rid of limitations, bugfix. ## 2. Installing Extension ### Add a path to a local clone to Omniverse extension search path 1. `git clone -b main $PATH_TO_DIR` 2. `Window` -> `Extension Manager` -> ⚙️ `Gear Icon` -> `Extension Search Path` 3. Add a path to just cloned extension as an extension search path: `$PATH_TO_DIR/exts` ### Omniverse Community Tab Extension is also available in the community tab in the Extension Manager: just search for path.tracking in the search field. ### Activate extension When extension search path configuration is done, start the extension: 1. `Window` -> `Extension Manager` 2. Find Vehicle path tracking extension in the list and enable it (Figure 2) <img src="exts/ext.path.tracking/data/img/figures/figure_01.png" alt="activating extension" style="height:400px;"/></br> Figure 2. Activating path tracking extension in extension manager.</br> --- ## 3. Getting Started ### 3.1. Evaluate vehicle path tracking on a preset configuration The fastest way to evaluate how vehicle path tracking extension works is to use a preset vehicle and curve (could be considered as `HelloWorld` before importing your own physx-vehicle and custom paths). To get started with the preset configuration please proceed as follows (Figure 3): 1. Click `Load a preset scene` button 2. Click `Start scenario` button <img src="exts/ext.path.tracking/data/img/figures/figure_02.png" style="width:600px" alt="extension preview"><br/> Figure 3. Getting started with a preset scene. The extension also allows a quick way to load a ground plane, a sample physics vehicle, and a sample basis curve. See Figure 4. <img src="exts/ext.path.tracking/data/img/figures/figure_03.png" style="width:600px" alt="extension controls"/><br/> Figure 4. Other extension controls. --- ### 3.2. Create your custom vehicle-to-curve attachment setup Extension supports path tracking for any Omniverse PhysX Vehicle. One could load a template vehicle using the extension ui or using a conventional method via `Create`->`Physics`->`Vehicle`. It is also straightforward to add a custom mesh and materials to a physics vehicle [2]. You can create a curve for vehicle path tracking using either of the following methods (Figure 5): - `Create`->`BasisCurves`->`From Bezier` - `Create`->`BasisCurves`->`From Pencil` <img src="exts/ext.path.tracking/data/img/figures/figure_04.png" style="height:500px"/> | <img src="exts/ext.path.tracking/data/img/figures/figure_05.png" style="height:500px"/><br/> Figure 5. Create a custom path to track via USD BasisCurves. --- Once a physics vehicle and a path to be tracked defined by USD BasisCurves is created, select the WizardVehicle and the BasisCruves prims in the stage (via Ctrl-click) and click `Attach Selected` button. Note that is very important to select specifically `WizardVehicle` prim in the scene, not `WizardVehicle/Vehicle` for instance. Please see Figure 6 for the illustration. <img src="exts/ext.path.tracking/data/img/figures/figure_06.png" style="width:1100px"/><br/> Figure 6. Attachment of a path (USD BasisCurves) to a physics-enabled vehicle. In case if vehicle-to-curve attachment was successful it should be reflected on the extension UI (Figure 7). <img src="exts/ext.path.tracking/data/img/figures/figure_07.png" style="width:600px"/><br/> Figure 7. Successful vehicle-to-curve attachment is shown on the right side. When vehicle-to-curve attachment(s) is created, proceed by clicking Start Scenario button. If you want to get rid of all already existing vehicle-to-curve attachments, please click `Clear All Attachments` (Figure 8). It is very important to clear vehicle-to-curve attachments, when changing vehicles and corresponding tracked paths. <img src="exts/ext.path.tracking/data/img/figures/figure_08.png" style="width:600px"/><br/> Figure 8. Removing existing vehicle-to-curve attachments. ### 3.3. Multiple Vehicles The extension supports multiple vehicle-to-curve attachments. Note, that for attachment to work, a pair of `WizardVehicle` and `BasisCurve` objects should be selected and attached consequently. Results of path tracking with multiple vehicles is shown in Figure 9. <img src="exts/ext.path.tracking/data/img/figures/figure_09_01.png" style="height:300px"/> <img src="exts/ext.path.tracking/data/img/figures/figure_09_02.png" style="height:300px"/> <img src="exts/ext.path.tracking/data/img/figures/figure_09_03.png" style="height:300px"/><br/> Figure 9. Support of multiple vehicle-to-curve attachments. ### Troubleshooting Note that extension is in Beta. The following items might be of help if any issues: - It always takes a few seconds between clicking 'Start scenario' button and actual start of the simulation, so please be patient. - On a fresh install some physx warnings/errors might be occasionally reported to the console log, they should not prevent the extension from producing expected results though. - If path tracking is not working on a custom vehicle and path, please verify that exactly `WizardVehicle1` from omni.physx.vehicle is selected (not a child prim 'WizardVehicle1/Vehicle' or some parent prim) along with a prim of type `BasisCurves` (which is to be tracked) before clicking 'Attach Selected’. - Use 'Clear All Attachments` if there are some issues. --- ## 4. Results 1. [youtube video] [Vehicle Path Tracking Extension Overview](https://youtu.be/tv-_xrqjzm4) 2. [youtube video] [Vehicle Dynamics and Vehicle Path Tracking: Forklift Usecase](https://youtu.be/SRibExkL4aE) 2. [youtube video] [OmniPhysX & Vehicle Dynamics Showcase](https://youtu.be/C8tjZWtU6w8) ## 5. References 1. [Omniverse Developer Contest] https://www.nvidia.com/en-us/omniverse/apps/code/developer-contest/ 2. [Omniverse Vehicle Dynamics] https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_vehicle-dynamics.html 3. [Coutler 1992] Coulter, R. Craig. Implementation of the pure pursuit path tracking algorithm. Carnegie-Mellon UNIV Pittsburgh PA Robotics INST, 1992. (https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf) 4. Credits for a forklift model model: https://sketchfab.com/3d-models/forklift-73d21c990e634589b0c130777751be28 (license: [Creative Commons Attribution](https://creativecommons.org/licenses/by/4.0/)) 5. Credits for a Dodge Challenger car model: https://sketchfab.com/3d-models/dodge-challenger-ef40662c84eb4beb85acdfce5ac4f40e (license: [Creative Commons Attribution NonCommercial](https://creativecommons.org/licenses/by-nc/4.0/)) 6. Credits for a monster truck (used in the result video): https://sketchfab.com/3d-models/hcr2-monster-truck-811bd567566b497a8cbbb06fd5a267b6 (license: [Creative Commons Attribution](https://creativecommons.org/licenses/by/4.0/)) 7. Credits for a race track model (used in the result video): https://sketchfab.com/3d-models/track-5f5e9454fd59436e8d0dd38df9ec83c4 (license: [Creative Commons Attribution NonCommercial](https://creativecommons.org/licenses/by-nc/4.0/))
8,547
Markdown
55.609271
307
0.776881
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/__init__.py
from .scripts.debug_draw import * from .scripts.extension import * from .scripts.model import * from .scripts.path_tracker import * from .scripts.path_tracker import * from .scripts.ui import * from .scripts.utils import * from .scripts.vehicle import *
253
Python
30.749996
35
0.770751
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/vehicle.py
import omni.usd from enum import IntEnum from pxr import Gf, Usd, UsdGeom, PhysxSchema import numpy as np # ====================================================================================================================== # Vehicle # ====================================================================================================================== class Axle(IntEnum): FRONT = 0, REAR = 1 class Wheel(IntEnum): FRONT_LEFT = 0, FRONT_RIGHT = 1, REAR_LEFT = 2, REAR_RIGHT = 3 # ====================================================================================================================== class Vehicle(): """ A wrapper created to help manipulating state of a vehicle prim and its dynamic properties, such as acceleration, desceleration, steering etc. """ def __init__(self, vehicle_prim, max_steer_angle_radians, rear_steering=True): self._prim = vehicle_prim self._path = self._prim.GetPath() self._steer_delta = 0.01 self._stage = omni.usd.get_context().get_stage() self._rear_stearing = rear_steering self._wheel_prims = { Wheel.FRONT_LEFT: self._stage.GetPrimAtPath(f"{self._path}/LeftWheel1References"), Wheel.FRONT_RIGHT: self._stage.GetPrimAtPath(f"{self._path}/RightWheel1References"), Wheel.REAR_LEFT: self._stage.GetPrimAtPath(f"{self._path}/LeftWheel2References"), Wheel.REAR_RIGHT: self._stage.GetPrimAtPath(f"{self._path}/RightWheel2References") } steering_wheels = [Wheel.FRONT_LEFT, Wheel.FRONT_RIGHT] non_steering_wheels = [Wheel.REAR_LEFT, Wheel.REAR_RIGHT] if self._rear_stearing: steering_wheels, non_steering_wheels = non_steering_wheels, steering_wheels for wheel_prim_key in steering_wheels: self._set_max_steer_angle(self._wheel_prims[wheel_prim_key], max_steer_angle_radians) for wheel_prim_key in non_steering_wheels: self._set_max_steer_angle(self._wheel_prims[wheel_prim_key], 0.0) p = self._prim.GetAttribute("xformOp:translate").Get() self._p = Gf.Vec4f(p[0], p[1], p[2], 1.0) def _set_max_steer_angle(self, wheel_prim, max_steer_angle_radians): physx_wheel = PhysxSchema.PhysxVehicleWheelAPI(wheel_prim) physx_wheel.GetMaxSteerAngleAttr().Set(max_steer_angle_radians) def get_bbox_size(self): """Computes size of vehicle's oriented bounding box.""" purposes = [UsdGeom.Tokens.default_] bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes) return bbox_cache.ComputeWorldBound(self._prim).ComputeAlignedRange().GetSize() def steer_left(self, value): if self._rear_stearing: self._steer_right_priv(value) else: self._steer_left_priv(value) def steer_right(self, value): if self._rear_stearing: self._steer_left_priv(value) else: self._steer_right_priv(value) def _steer_left_priv(self, value): self._prim.GetAttribute("physxVehicleController:steerLeft").Set(value) self._prim.GetAttribute("physxVehicleController:steerRight").Set(0.0) def _steer_right_priv(self, value): self._prim.GetAttribute("physxVehicleController:steerLeft").Set(0.0) self._prim.GetAttribute("physxVehicleController:steerRight").Set(value) def accelerate(self, value): self._vehicle().GetAttribute("physxVehicleController:accelerator").Set(value) def brake(self, value): self._prim.GetAttribute("physxVehicleController:brake").Set(value) def get_velocity(self): return self._prim.GetAttribute("physics:velocity").Get() def get_speed(self): return np.linalg.norm(self.get_velocity()) def curr_position(self): prim = self._vehicle() cache = UsdGeom.XformCache() T = cache.GetLocalToWorldTransform(prim) p = self._p * T return Gf.Vec3f(p[0], p[1], p[2]) def axle_front(self): return self.axle_position(Axle.FRONT) def axle_rear(self): return self.axle_position(Axle.REAR) def axle_position(self, type): cache = UsdGeom.XformCache() T = cache.GetLocalToWorldTransform(self._vehicle()) if type == Axle.FRONT: wheel_fl = self._wheel_prims[Wheel.FRONT_LEFT].GetAttribute("xformOp:translate").Get() wheel_fr = self._wheel_prims[Wheel.FRONT_RIGHT].GetAttribute("xformOp:translate").Get() wheel_fl[1] = 0.0 wheel_fr[1] = 0.0 wheel_fl = Gf.Vec4f(wheel_fl[0], wheel_fl[1], wheel_fl[2], 1.0) * T wheel_fr = Gf.Vec4f(wheel_fr[0], wheel_fr[1], wheel_fr[2], 1.0) * T wheel_fl = Gf.Vec3f(wheel_fl[0], wheel_fl[1], wheel_fl[2]) wheel_fr = Gf.Vec3f(wheel_fr[0], wheel_fr[1], wheel_fr[2]) return (wheel_fl + wheel_fr) / 2 elif type == Axle.REAR: wheel_rl = self._wheel_prims[Wheel.REAR_LEFT].GetAttribute("xformOp:translate").Get() wheel_rr = self._wheel_prims[Wheel.REAR_RIGHT].GetAttribute("xformOp:translate").Get() wheel_rl[1] = 0.0 wheel_rr[1] = 0.0 wheel_rl = Gf.Vec4f(wheel_rl[0], wheel_rl[1], wheel_rl[2], 1.0) * T wheel_rr = Gf.Vec4f(wheel_rr[0], wheel_rr[1], wheel_rr[2], 1.0) * T wheel_rl = Gf.Vec3f(wheel_rl[0], wheel_rl[1], wheel_rl[2]) wheel_rr = Gf.Vec3f(wheel_rr[0], wheel_rr[1], wheel_rr[2]) return (wheel_rl + wheel_rr) / 2 else: return None def _wheel_pos(self, type): R = self.rotation_matrix() wheel_pos = self._wheel_prims[type].GetAttribute("xformOp:translate").Get() wheel_pos = Gf.Vec4f(wheel_pos[0], wheel_pos[1], wheel_pos[2], 1.0) * R return Gf.Vec3f(wheel_pos[0], wheel_pos[1], wheel_pos[2]) + self.curr_position() def wheel_pos_front_left(self): return self._wheel_pos(Wheel.FRONT_LEFT) def wheel_pos_front_right(self): return self._wheel_pos(Wheel.FRONT_RIGHT) def wheel_pos_rear_left(self): return self._wheel_pos(Wheel.REAR_LEFT) def wheel_pos_rear_right(self): return self._wheel_pos(Wheel.REAR_RIGHT) def rotation_matrix(self): """ Produces vehicle's local-to-world rotation transform. """ cache = UsdGeom.XformCache() T = cache.GetLocalToWorldTransform(self._vehicle()) return Gf.Matrix4d(T.ExtractRotationMatrix(), Gf.Vec3d()) def forward(self): R = self.rotation_matrix() f = self._forward_local() return Gf.Vec4f(f[0], f[1], f[2], 1.0) * R def up(self): R = self.rotation_matrix() u = self._up_local() return Gf.Vec4f(u[0], u[1], u[2], 1.0) * R def _forward_local(self): return Gf.Vec3f(0.0, 0.0, 1.0) def _up_local(self): return Gf.Vec3f(0.0, 1.0, 0.0) def _vehicle(self): return self._stage.GetPrimAtPath(self._path) def is_close_to(self, point, lookahead_distance): if not point: raise Exception("[Vehicle] Point is None") curr_vehicle_pos = self.curr_position() if not curr_vehicle_pos: raise Exception("[Vechicle] Current position is None") distance = np.linalg.norm(curr_vehicle_pos - point) return tuple([distance, distance < lookahead_distance])
7,517
Python
36.402985
120
0.579753
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/stepper.py
import omni.kit import omni.physx import omni.usd import omni.timeline from omni.physx.bindings._physx import SimulationEvent import math import threading """ Based on Nvidia's sample from omni.physx.vehicle Physics extension. """ # ====================================================================================================================== # # Scenario # # ====================================================================================================================== class Scenario: def __init__(self, secondsToRun, timeStep=1.0 / 60.0): self._targetIterationCount = math.ceil(secondsToRun / timeStep) def get_iteration_count(self): return self._targetIterationCount # override in subclass as needed def on_start(self): pass def on_end(self): pass def on_step(self, deltaTime, totalTime): pass # ====================================================================================================================== # # SimStepTracker # # ====================================================================================================================== class SimStepTracker: def __init__(self, scenario, scenarioDoneSignal): self._scenario = scenario self._targetIterationCount = scenario.get_iteration_count() self._scenarioDoneSignal = scenarioDoneSignal self._physx = omni.physx.get_physx_interface() self._physxSimEventSubscription = self._physx.get_simulation_event_stream_v2().create_subscription_to_pop( self._on_simulation_event ) self._hasStarted = False self._resetOnNextResume = False def abort(self): if self._hasStarted: self._on_stop() self._physxSimEventSubscription = None self._physx = ( None ) # should release automatically (note: explicit release call results in double release being reported) self._scenarioDoneSignal.set() def stop(self): self._scenario.on_end() self._scenarioDoneSignal.set() def reset_on_next_resume(self): self._resetOnNextResume = True def _on_stop(self): self._hasStarted = False self._physxStepEventSubscription = None # should unsubscribe automatically self._scenario.on_end() def _on_simulation_event(self, event): if event.type == int(SimulationEvent.RESUMED): if not self._hasStarted: self._scenario.on_start() self._iterationCount = 0 self._totalTime = 0 self._physxStepEventSubscription = self._physx.subscribe_physics_step_events(self._on_physics_step) self._hasStarted = True elif self._resetOnNextResume: self._resetOnNextResume = False # the simulation step callback is still registered and should remain so, thus no unsubscribe self._hasStarted = False self._scenario.on_end() self._scenario.on_start() self._iterationCount = 0 self._totalTime = 0 self._hasStarted = True # elif event.type == int(SimulationEvent.PAUSED): # self._on_pause() elif event.type == int(SimulationEvent.STOPPED): self._on_stop() def _on_physics_step(self, dt): if self._hasStarted: pass if self._iterationCount < self._targetIterationCount: self._scenario.on_step(dt, self._totalTime) self._iterationCount += 1 self._totalTime += dt else: self._scenarioDoneSignal.set() # ====================================================================================================================== # # StageEventListener # # ====================================================================================================================== class StageEventListener: def __init__(self, simStepTracker): self._simStepTracker = simStepTracker self._stageEventSubscription = ( omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self._on_stage_event) ) self._stageIsClosing = False self.restart_after_stop = False def cleanup(self): self._stageEventSubscription = None def is_stage_closing(self): return self._stageIsClosing def _on_stage_event(self, event): # Check out omni.usd docs for more information regarding # omni.usd.StageEventType in particular. # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.usd/docs/index.html if event.type == int(omni.usd.StageEventType.CLOSING): self._stop(stageIsClosing=True) elif event.type == int(omni.usd.StageEventType.SIMULATION_STOP_PLAY): if self.restart_after_stop: omni.timeline.get_timeline_interface().play() elif event.type == int(omni.usd.StageEventType.SIMULATION_START_PLAY): self.restart_after_stop = False elif event.type == int(omni.usd.StageEventType.ANIMATION_STOP_PLAY): pass def _stop(self, stageIsClosing=False): self._stageIsClosing = stageIsClosing self._simStepTracker.stop() # ====================================================================================================================== # # ScenarioManager # # ====================================================================================================================== class ScenarioManager: def __init__(self, scenario): self._scenario = scenario self._setup(scenario) def _setup(self, scenario): self._init_done = False scenarioDoneSignal = threading.Event() self._simStepTracker = SimStepTracker(scenario, scenarioDoneSignal) self._stageEventListener = StageEventListener(self._simStepTracker) def stop_scenario(self): self._stageEventListener._stop() def cleanup(self): self._stageEventListener.cleanup() self._simStepTracker.abort() @property def scenario(self): return self._scenario @scenario.setter def set_scenario(self, scenario): self.stop_scenario() self._setup(scenario)
6,373
Python
32.197916
120
0.530206
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/path_tracker.py
import omni.usd from pxr import Gf, UsdGeom import math import numpy as np from .debug_draw import DebugRenderer from .stepper import Scenario from .vehicle import Axle, Vehicle # ====================================================================================================================== # # PurePursuitScenario # # ====================================================================================================================== class PurePursuitScenario(Scenario): def __init__(self, lookahead_distance, vehicle_path, trajectory_prim_path, meters_per_unit, close_loop_flag, enable_rear_steering): super().__init__(secondsToRun=10000.0, timeStep=1.0/25.0) self._MAX_STEER_ANGLE_RADIANS = math.pi / 3 self._lookahead_distance = lookahead_distance self._METERS_PER_UNIT = meters_per_unit self._max_speed = 250.0 self._stage = omni.usd.get_context().get_stage() self._vehicle = Vehicle( self._stage.GetPrimAtPath(vehicle_path), self._MAX_STEER_ANGLE_RADIANS, enable_rear_steering ) self._debug_render = DebugRenderer(self._vehicle.get_bbox_size()) self._path_tracker = PurePursuitPathTracker(math.pi/4) self._dest = None self._trajectory_prim_path = trajectory_prim_path self._trajectory = Trajectory(trajectory_prim_path, close_loop=close_loop_flag) self._stopped = False self.draw_track = False self._close_loop = close_loop_flag def on_start(self): self._vehicle.accelerate(1.0) def on_end(self): self._trajectory.reset() def _process(self, forward, up, dest_position, distance=None, is_close_to_dest=False): """ Steering/accleleration vehicle control heuristic. """ if (distance is None): distance, is_close_to_dest = self._vehicle.is_close_to(dest_position, self._lookahead_distance) curr_vehicle_pos = self._vehicle.curr_position() self._debug_render.update_vehicle(self._vehicle) self._debug_render.update_path_to_dest(curr_vehicle_pos, dest_position) # FIXME: - currently the extension expect Y-up axis which is not flexible. # Project onto XZ plane curr_vehicle_pos[1] = 0.0 forward[1] = 0.0 dest_position[1] = 0.0 speed = self._vehicle.get_speed() * self._METERS_PER_UNIT axle_front = Gf.Vec3f(self._vehicle.axle_position(Axle.FRONT)) axle_rear = Gf.Vec3f(self._vehicle.axle_position(Axle.REAR)) axle_front[1] = 0.0 axle_rear[1] = 0.0 # self._debug_render.update_path_tracking(axle_front, axle_rear, forward, dest_position) steer_angle = self._path_tracker.on_step( axle_front, axle_rear, forward, dest_position, curr_vehicle_pos ) if steer_angle < 0: self._vehicle.steer_left(abs(steer_angle)) else: self._vehicle.steer_right(steer_angle) # Accelerate/break control heuristic if abs(steer_angle) > 0.1 and speed > 5.0: self._vehicle.brake(1.0) self._vehicle.accelerate(0.0) else: if (speed >= self._max_speed): self._vehicle.brake(0.8) self._vehicle.accelerate(0.0) else: self._vehicle.brake(0.0) self._vehicle.accelerate(0.7) def _full_stop(self): self._vehicle.accelerate(0.0) self._vehicle.brake(1.0) def set_meters_per_unit(self, value): self._METERS_PER_UNIT = value def teardown(self): super().abort() self._dest.teardown() self._dest = None self._stage = None self._vehicle = None self._debug_render = None self._path_tracker = None def enable_debug(self, flag): self._debug_render.enable(flag) def on_step(self, deltaTime, totalTime): """ Updates vehicle control on sim update callback in order to stay on tracked path. """ forward = self._vehicle.forward() up = self._vehicle.up() if self._trajectory and self.draw_track: self._trajectory.draw() dest_position = self._trajectory.point() is_end_point = self._trajectory.is_at_end_point() # Run vehicle control unless reached the destination if dest_position: distance, is_close_to_dest = self._vehicle.is_close_to(dest_position, self._lookahead_distance) if (is_close_to_dest): dest_position = self._trajectory.next_point() else: # Compute vehicle steering and acceleration self._process(forward, up, dest_position, distance, is_close_to_dest) else: self._stopped = True self._full_stop() def recompute_trajectory(self): self._trajectory = Trajectory(self._trajectory_prim_path, self._close_loop) def set_lookahead_distance(self, distance): self._lookahead_distance = distance def set_close_trajectory_loop(self, flag): self._close_loop = flag self._trajectory.set_close_loop(flag) # ====================================================================================================================== # # PurePursuitPathTracker # # ====================================================================================================================== class PurePursuitPathTracker(): """ Implements path tracking in spirit of Pure Pursuit algorithm. References * Implementation of the Pure Pursuit Path tracking Algorithm, RC Conlter: https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf * https://dingyan89.medium.com/three-methods-of-vehicle-lateral-control-pure-pursuit-stanley-and-mpc-db8cc1d32081 """ def __init__(self, max_steer_angle_radians): self._max_steer_angle_radians = max_steer_angle_radians self._debug_enabled = False def _steer_value_from_angle(self, angle): """ Computes vehicle's steering wheel angle in expected range [-1, 1]. """ return np.clip(angle / self._max_steer_angle_radians, -1.0, 1.0) def on_step(self, front_axle_pos, rear_axle_pos, forward, dest_pos, curr_pos): """ Recomputes vehicle's steering angle on a simulation step. """ front_axle_pos, rear_axle_pos = rear_axle_pos, front_axle_pos # Lookahead points to the next destination point lookahead = dest_pos - rear_axle_pos # Forward vector corrsponds to an axis segment front-to-rear forward = front_axle_pos - rear_axle_pos lookahead_dist = np.linalg.norm(lookahead) forward_dist = np.linalg.norm(forward) if self._debug_enabled: if lookahead_dist == 0.0 or forward_dist == 0.0: raise Exception("Pure pursuit aglorithm: invalid state") lookahead.Normalize() forward.Normalize() # Compute a signed angle alpha between lookahead and forward vectors, # /!\ left-handed rotation assumed. dot = lookahead[0] * forward[0] + lookahead[2] * forward[2] cross = lookahead[0] * forward[2] - lookahead[2] * forward[0] alpha = math.atan2(cross, dot) theta = math.atan(2.0 * forward_dist * math.sin(alpha) / lookahead_dist) steer_angle = self._steer_value_from_angle(theta) return steer_angle # ====================================================================================================================== # # Trajectory # # ====================================================================================================================== class Trajectory(): """ A helper class to access coordinates of points that form a BasisCurve prim. """ def __init__(self, prim_path, close_loop=True): stage = omni.usd.get_context().get_stage() basis_curves = UsdGeom.BasisCurves.Get(stage, prim_path) if (basis_curves and basis_curves is not None): curve_prim = stage.GetPrimAtPath(prim_path) self._points = basis_curves.GetPointsAttr().Get() self._num_points = len(self._points) cache = UsdGeom.XformCache() T = cache.GetLocalToWorldTransform(curve_prim) for i in range(self._num_points): p = Gf.Vec4d(self._points[i][0], self._points[i][1], self._points[i][2], 1.0) p_ = p * T self._points[i] = Gf.Vec3f(p_[0], p_[1], p_[2]) else: self._points = None self._num_points = 0 self._pointer = 0 self._close_loop = close_loop def point(self): """ Returns current point. """ return self._points[self._pointer] if self._pointer < len(self._points) else None def next_point(self): """ Next point on the curve. """ if (self._pointer < self._num_points): self._pointer = self._pointer + 1 if self._pointer >= self._num_points and self._close_loop: self._pointer = 0 return self.point() return None def is_at_end_point(self): """ Checks if the current point is the last one. """ return self._pointer == (self._num_points - 1) def reset(self): """ Resets current point to the first one. """ self._pointer = 0 def set_close_loop(self, flag): self._close_loop = flag
9,688
Python
34.752767
120
0.55192
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/extension.py
import omni.ext import omni.kit import omni.usd import carb import asyncio from .model import ExtensionModel from .ui import ExtensionUI # ====================================================================================================================== # # PathTrackingExtension # # ====================================================================================================================== class PathTrackingExtension(omni.ext.IExt): def __init__(self): self._DEFAULT_LOOKAHEAD = 550.0 # Any user-defined changes to the lookahead parameter will be clamped: self._MIN_LOOKAHEAD = 400.0 self._MAX_LOOKAHEAD = 2000.0 def on_startup(self, ext_id): if omni.usd.get_context().get_stage() is None: # Workaround for running within test environment. omni.usd.get_context().new_stage() # Usd listener could be used in the future if we could be interested # in recomputing changes in the vehicle planned trajectory "on the fly". # self._usd_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_usd_change, None) self._stage_event_sub = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop( self._on_stage_event, name="Stage Open/Closing Listening" ) self._model = ExtensionModel( ext_id, default_lookahead_distance=self._DEFAULT_LOOKAHEAD, max_lookahed_distance=self._MAX_LOOKAHEAD, min_lookahed_distance=self._MIN_LOOKAHEAD ) self._ui = ExtensionUI(self) self._ui.build_ui(self._model.get_lookahead_distance(), attachments=[]) def on_shutdown(self): timeline = omni.timeline.get_timeline_interface() if timeline.is_playing(): timeline.stop() self._clear_attachments() self._usd_listener = None self._stage_event_sub = None self._ui.teardown() self._ui = None self._model.teardown() self._model = None def _update_ui(self): self._ui.update_attachment_info(self._model._vehicle_to_curve_attachments.keys()) # ====================================================================================================================== # Callbacks # ====================================================================================================================== def _on_click_start_scenario(self): async def start_scenario(model): timeline = omni.timeline.get_timeline_interface() if timeline.is_playing(): timeline.stop() await omni.kit.app.get_app().next_update_async() lookahead_distance = self._ui.get_lookahead_distance() model.load_simulation(lookahead_distance) omni.timeline.get_timeline_interface().play() run_loop = asyncio.get_event_loop() asyncio.run_coroutine_threadsafe(start_scenario(self._model), loop=run_loop) def _on_click_stop_scenario(self): async def stop_scenario(): timeline = omni.timeline.get_timeline_interface() if timeline.is_playing(): timeline.stop() await omni.kit.app.get_app().next_update_async() run_loop = asyncio.get_event_loop() asyncio.run_coroutine_threadsafe(stop_scenario(), loop=run_loop) def _on_click_load_sample_vehicle(self): self._model.load_sample_vehicle() def _on_click_load_ground_plane(self): self._model.load_ground_plane() def _on_click_load_basis_curve(self): self._model.load_sample_track() def _on_click_load_forklift(self): self._model.load_forklift_rig() def _on_click_attach_selected(self): selected_prim_paths = omni.usd.get_context().get_selection().get_selected_prim_paths() self._model.attach_selected_prims(selected_prim_paths) self._update_ui() def _clear_attachments(self): async def stop_scenario(): timeline = omni.timeline.get_timeline_interface() if timeline.is_playing(): timeline.stop() await omni.kit.app.get_app().next_update_async() run_loop = asyncio.get_event_loop() asyncio.run_coroutine_threadsafe(stop_scenario(), loop=run_loop) self._model.clear_attachments() self._update_ui() def _on_click_clear_attachments(self): self._clear_attachments() def _on_click_load_preset_scene(self): self._model.load_preset_scene() self._update_ui() def _on_stage_event(self, event: carb.events.IEvent): """Called on USD Context event""" if event.type == int(omni.usd.StageEventType.CLOSING): self._model.clear_attachments() self._update_ui() def _on_usd_change(self, objects_changed, stage): carb.log_info("_on_usd_change") for resync_path in objects_changed.GetResyncedPaths(): carb.log_info(resync_path) def _changed_enable_debug(self, model): self._model.set_enable_debug(model.as_bool) def _on_lookahead_distance_changed(self, distance): # self._clear_attachments() clamped_lookahead_distance = self._model.update_lookahead_distance(distance) self._ui.set_lookahead_distance(clamped_lookahead_distance) def _on_trajectory_loop_value_changed(self, widget_model): self._model.set_close_trajectory_loop(widget_model.as_bool) def _on_steering_changed(self, model): # First we have to stop current simulation. self._on_click_stop_scenario() self._model.set_enable_rear_steering(model.as_bool)
5,705
Python
35.576923
120
0.57844
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/utils.py
import omni.usd from pxr import UsdGeom, Sdf, Gf, UsdPhysics, PhysxSchema class Utils: @staticmethod def create_mesh_square_axis(stage, path, axis, halfSize): if axis == "X": points = [ Gf.Vec3f(0.0, -halfSize, -halfSize), Gf.Vec3f(0.0, halfSize, -halfSize), Gf.Vec3f(0.0, halfSize, halfSize), Gf.Vec3f(0.0, -halfSize, halfSize), ] normals = [Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0), Gf.Vec3f(1, 0, 0)] indices = [0, 1, 2, 3] vertexCounts = [4] # Create the mesh return Utils.create_mesh(stage, path, points, normals, indices, vertexCounts) elif axis == "Y": points = [ Gf.Vec3f(-halfSize, 0.0, -halfSize), Gf.Vec3f(halfSize, 0.0, -halfSize), Gf.Vec3f(halfSize, 0.0, halfSize), Gf.Vec3f(-halfSize, 0.0, halfSize), ] normals = [Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0), Gf.Vec3f(0, 1, 0)] indices = [0, 1, 2, 3] vertexCounts = [4] # Create the mesh return Utils.create_mesh(stage, path, points, normals, indices, vertexCounts) points = [ Gf.Vec3f(-halfSize, -halfSize, 0.0), Gf.Vec3f(halfSize, -halfSize, 0.0), Gf.Vec3f(halfSize, halfSize, 0.0), Gf.Vec3f(-halfSize, halfSize, 0.0), ] normals = [Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1), Gf.Vec3f(0, 0, 1)] indices = [0, 1, 2, 3] vertexCounts = [4] # Create the mesh mesh = Utils.create_mesh(stage, path, points, normals, indices, vertexCounts) # text coord texCoords = mesh.CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying) texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)]) return mesh @staticmethod def create_mesh(stage, path, points, normals, indices, vertexCounts): mesh = UsdGeom.Mesh.Define(stage, path) # Fill in VtArrays mesh.CreateFaceVertexCountsAttr().Set(vertexCounts) mesh.CreateFaceVertexIndicesAttr().Set(indices) mesh.CreatePointsAttr().Set(points) mesh.CreateDoubleSidedAttr().Set(False) mesh.CreateNormalsAttr().Set(normals) return mesh @staticmethod def add_ground_plane(stage, planePath, axis, size=3000.0, position=Gf.Vec3f(0.0), color=Gf.Vec3f(0.2, 0.25, 0.25)): # plane xform, so that we dont nest geom prims planePath = omni.usd.get_stage_next_free_path(stage, planePath, True) planeXform = UsdGeom.Xform.Define(stage, planePath) planeXform.AddTranslateOp().Set(position) planeXform.AddOrientOp().Set(Gf.Quatf(1.0)) planeXform.AddScaleOp().Set(Gf.Vec3f(1.0)) # (Graphics) Plane mesh geomPlanePath = planePath + "/CollisionMesh" entityPlane = Utils.create_mesh_square_axis(stage, geomPlanePath, axis, size) entityPlane.CreateDisplayColorAttr().Set([color]) # (Collision) Plane colPlanePath = planePath + "/CollisionPlane" planeGeom = PhysxSchema.Plane.Define(stage, colPlanePath) planeGeom.CreatePurposeAttr().Set("guide") planeGeom.CreateAxisAttr().Set(axis) prim = stage.GetPrimAtPath(colPlanePath) UsdPhysics.CollisionAPI.Apply(prim) return planePath
3,519
Python
38.111111
104
0.577721
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/model.py
import omni from pxr import UsdGeom import omni.kit.commands from omni.physxvehicle.scripts.wizards import physxVehicleWizard as VehicleWizard from omni.physxvehicle.scripts.helpers.UnitScale import UnitScale from omni.physxvehicle.scripts.commands import PhysXVehicleWizardCreateCommand from .stepper import ScenarioManager from .path_tracker import PurePursuitScenario from .utils import Utils from pxr import UsdPhysics # ====================================================================================================================== # # ExtensionModel # # ====================================================================================================================== class ExtensionModel: ROOT_PATH = "/World" def __init__(self, extension_id, default_lookahead_distance, max_lookahed_distance, min_lookahed_distance): self._ext_id = extension_id self._METADATA_KEY = f"{extension_id.split('-')[0]}.metadata" self._lookahead_distance = default_lookahead_distance self._min_lookahead_distance = min_lookahed_distance self._max_lookahead_distance = max_lookahed_distance self.METERS_PER_UNIT = 0.01 UsdGeom.SetStageMetersPerUnit(omni.usd.get_context().get_stage(), self.METERS_PER_UNIT) # Currently the extension expects Y-axis to be up-axis. # Conventionally Y-up is often used in graphics, including Kit-apps. # TODO: refactor impl to avoid breaking things when changing up-axis settings. self._up_axis = "Y" self._vehicle_to_curve_attachments = {} self._scenario_managers = [] self._dirty = False # Enables debug overlay with additional info regarding current vehicle state. self._enable_debug = False # Closed trajectory loop self._closed_trajectory_loop = False self._rear_steering = False def teardown(self): self.stop_scenarios() self._scenario_managers = None def attach_vehicle_to_curve(self, wizard_vehicle_path, curve_path): """ Links a vehicle prim (must be WizardVehicle Xform) to the path (BasisCurve) to be tracked by the vechile. Currently we expect two prims to be selected: - WizardVehicle - BasisCurve (corresponding curve/trajectory the vehicle must track) """ stage = omni.usd.get_context().get_stage() prim0 = stage.GetPrimAtPath(wizard_vehicle_path) prim1 = stage.GetPrimAtPath(curve_path) if prim0.IsA(UsdGeom.BasisCurves): # Fix order of selected prims: WizardVehicle should be first prim0, prim1 = prim1, prim0 wizard_vehicle_path, curve_path = curve_path, wizard_vehicle_path if prim0.IsA(UsdGeom.Xformable): key = wizard_vehicle_path + "/Vehicle" self._vehicle_to_curve_attachments[key] = curve_path self._dirty = True def attach_selected_prims(self, selected_prim_paths): """ Attaches selected prims paths from a stage to be considered as a vehicle and path to be tracked correspondingly. The selected prim paths should include a WizardVehicle Xform that represents vehicle, and a BasisCurves that represents tracked path. """ if len(selected_prim_paths) == 2: self.attach_vehicle_to_curve( wizard_vehicle_path=selected_prim_paths[0], curve_path=selected_prim_paths[1] ) def attach_preset_metadata(self, metadata): """ Does vehicle-to-curve attachment from the metadata dictionary directly. """ self.attach_vehicle_to_curve( wizard_vehicle_path=metadata["WizardVehicle"], curve_path=metadata["BasisCurve"] ) def _cleanup_scenario_managers(self): """Cleans up scenario managers. Often useful when tracked data becomes obsolete.""" self.stop_scenarios() for manager in self._scenario_managers: manager.cleanup() self._scenario_managers.clear() self._dirty = True def clear_attachments(self): """ Removes previously added path tracking attachments. """ self._cleanup_scenario_managers() self._vehicle_to_curve_attachments.clear() def stop_scenarios(self): """ Stops path tracking scenarios. """ for manager in self._scenario_managers: manager.stop_scenario() def load_simulation(self, lookahead_distance): """ Load scenarios with vehicle-to-curve attachments. Note that multiple vehicles could run at the same time. """ if self._dirty: self._cleanup_scenario_managers() for vehicle_path in self._vehicle_to_curve_attachments: scenario = PurePursuitScenario( lookahead_distance, vehicle_path, self._vehicle_to_curve_attachments[vehicle_path], self.METERS_PER_UNIT, self._closed_trajectory_loop, self._rear_steering ) scenario.enable_debug(self._enable_debug) scenario_manager = ScenarioManager(scenario) self._scenario_managers.append(scenario_manager) self._dirty = False self.recompute_trajectories() def recompute_trajectories(self): """ Update tracked trajectories. Often needed when BasisCurve defining a trajectory in the scene was updated by a user. """ for i in range(len(self._scenario_managers)): manager = self._scenario_managers[i] manager.scenario.recompute_trajectory() def set_enable_debug(self, flag): """ Enables/disables debug overlay. """ self._enable_debug = flag for manager in self._scenario_managers: manager.scenario.enable_debug(flag) def set_close_trajectory_loop(self, flag): """ Enables closed loop path tracking. """ self._closed_trajectory_loop = flag for manager in self._scenario_managers: manager.scenario.set_close_trajectory_loop(flag) def set_enable_rear_steering(self, flag): """ Enables rear steering for the vehicle. """ self._rear_steering = flag # Mark simulation config as dirty in order to re-create vehicle object. self._dirty = True def load_ground_plane(self): """ Helper to quickly load a preset ground plane prim. """ stage = omni.usd.get_context().get_stage() path = omni.usd.get_stage_next_free_path(stage, "/GroundPlane", False) Utils.add_ground_plane(stage, path, self._up_axis) def get_unit_scale(self, stage): metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage) lengthScale = 1.0 / metersPerUnit kilogramsPerUnit = UsdPhysics.GetStageKilogramsPerUnit(stage) massScale = 1.0 / kilogramsPerUnit return UnitScale(lengthScale, massScale) def load_sample_vehicle(self): """ Load a preset vechile from a USD data provider shipped with the extension. """ usd_context = omni.usd.get_context() stage = usd_context.get_stage() vehicleData = VehicleWizard.VehicleData(self.get_unit_scale(stage), VehicleWizard.VehicleData.AXIS_Y, VehicleWizard.VehicleData.AXIS_Z) root_vehicle_path = self.ROOT_PATH + VehicleWizard.VEHICLE_ROOT_BASE_PATH root_vehicle_path = omni.usd.get_stage_next_free_path(stage, root_vehicle_path, True) root_shared_path = self.ROOT_PATH + VehicleWizard.SHARED_DATA_ROOT_BASE_PATH root_vehicle_path = omni.usd.get_stage_next_free_path(stage, root_shared_path, True) vehicleData.rootVehiclePath = root_vehicle_path vehicleData.rootSharedPath = root_shared_path (success, (messageList, scenePath)) = PhysXVehicleWizardCreateCommand.execute(vehicleData) assert (success) assert (not messageList) assert (scenePath and scenePath is not None) return root_vehicle_path def load_sample_track(self): """ Load a sample BasisCurve serialiazed in USD. """ usd_context = omni.usd.get_context() ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id) basis_curve_prim_path = "/BasisCurves" basis_curve_prim_path = omni.usd.get_stage_next_free_path( usd_context.get_stage(), basis_curve_prim_path, True ) basis_curve_usd_path = f"{ext_path}/data/usd/curve.usd" omni.kit.commands.execute( "CreateReferenceCommand", path_to=basis_curve_prim_path, asset_path=basis_curve_usd_path, usd_context=usd_context, ) def load_forklift_rig(self): """Load a forklift model from USD with already exisitng physx vehicle rig.""" usd_context = omni.usd.get_context() ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id) forklift_prim_path = "/ForkliftRig" forklift_prim_path = omni.usd.get_stage_next_free_path( usd_context.get_stage(), forklift_prim_path, True ) vehicle_usd_path = f"{ext_path}/data/usd/forklift/forklift_rig.usd" omni.kit.commands.execute( "CreateReferenceCommand", path_to=forklift_prim_path, asset_path=vehicle_usd_path, usd_context=usd_context, ) return forklift_prim_path def load_preset_scene(self): """ Loads a preset scene with vehicle template and predefined curve for path tracking. """ default_prim_path = self.ROOT_PATH stage = omni.usd.get_context().get_stage() if not stage.GetPrimAtPath(default_prim_path): omni.kit.commands.execute( "CreatePrim", prim_path=default_prim_path, prim_type="Xform", select_new_prim=True, attributes={} ) stage.SetDefaultPrim(stage.GetPrimAtPath(default_prim_path)) self.load_ground_plane() vehicle_prim_path = self.load_sample_vehicle() self.load_sample_track() metadata_vehicle_to_curve = self.get_attachment_presets(vehicle_prim_path) self.attach_preset_metadata(metadata_vehicle_to_curve) def get_attachment_presets(self, vehicle_path): """ Prim paths for the preset scene with prim paths for vehicle-to-curve attachment. """ stage = omni.usd.get_context().get_stage() vehicle_prim = stage.GetPrimAtPath(vehicle_path) metadata = vehicle_prim.GetCustomData() # Vehicle-to-Curve attachment of the preset is stored in the metadata. attachment_preset = metadata.get(self._METADATA_KEY) if not attachment_preset or attachment_preset is None: # Fallback to defaults attachment_preset = { "WizardVehicle": vehicle_path, "BasisCurve": "/World/BasisCurves/BasisCurves" } return attachment_preset def get_lookahead_distance(self): return self._lookahead_distance def update_lookahead_distance(self, distance): """Updates the lookahead distance parameter for pure pursuit""" clamped_distance = max( self._min_lookahead_distance, min(self._max_lookahead_distance, distance) ) for scenario_manager in self._scenario_managers: scenario_manager.scenario.set_lookahead_distance(clamped_distance) return clamped_distance
11,903
Python
38.287129
120
0.614971
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/debug_draw.py
import carb from omni.debugdraw import get_debug_draw_interface """ Note: DebugRenderer relies on `omni.debugdraw` utility to optionally provide a debug overlay with additional info regarding current state of vehicle, path tracking destination etc. Using omni.ui.scene would be more future proof as it will break dependency on `omni.debugdraw` which may change or not guaranteed to be kept in the future in Kit-based apps. """ class DebugRenderer(): def __init__(self, vehicle_bbox_size): self._debug_draw = get_debug_draw_interface() self._curr_time = 0.0 self._color = 0x60FF0000 self._line_thickness = 2.0 self._size = max(vehicle_bbox_size) self._enabled = True # update_stream = omni.kit.app.get_app().get_update_event_stream() # self._update_sub = update_stream.create_subscription_to_pop(self._on_update, name="omni.physx update") def _draw_segment(self, start, end, color, thickness): self._debug_draw.draw_line( carb.Float3(start[0], start[1], start[2]), color, thickness, carb.Float3(end[0], end[1], end[2]), color, thickness ) def update_path_tracking(self, front_axle_pos, rear_axle_pos, forward, dest_pos): if not self._enabled: return color = 0xFF222222 thickness = 10.0 self._draw_segment(rear_axle_pos, dest_pos, color, thickness) color = 0xFF00FA9A self._draw_segment(rear_axle_pos, front_axle_pos, color, thickness) def update_vehicle(self, vehicle): if not self._enabled: return curr_vehicle_pos = vehicle.curr_position() forward = vehicle.forward() up = vehicle.up() t = self._line_thickness * 2 x = curr_vehicle_pos[0] y = curr_vehicle_pos[1] z = curr_vehicle_pos[2] s = self._size / 2 # Draw forward self._debug_draw.draw_line( carb.Float3(x, y, z), 0xFF0000FF, t, carb.Float3(x + s * forward[0], y + s * forward[1], z + s * forward[2]), 0xFF0000FF, t ) # Draw up self._debug_draw.draw_line( carb.Float3(x, y, z), 0xFF00FF00, t, carb.Float3(x + s * up[0], y + s * up[1], z + s * up[2]), 0xFF00FF00, t ) # /!\ Uncomment additional debug overlay drawing below if needed # Draw axle axis connecting front to rear # af = vehicle.axle_front() # ar = vehicle.axle_rear() # axle_color = 0xFF8A2BE2 # self._debug_draw.draw_line( # carb.Float3(af[0], af[1], af[2]), # axle_color, t*4, # carb.Float3(ar[0], ar[1], ar[2]), # axle_color, t*4 # ) # Draw front axle # fl = vehicle.wheel_pos_front_left() # fr = vehicle.wheel_pos_front_right() # front_axle_color = 0xFFFF0000 # self._debug_draw.draw_line( # carb.Float3(fl[0], fl[1], fl[2]), # front_axle_color, t*2, # carb.Float3(fr[0], fr[1], fr[2]), # front_axle_color, t*2 # ) # Draw rear axle # rl = vehicle.wheel_pos_rear_left() # rr = vehicle.wheel_pos_rear_right() # rear_axle_color = 0xFFAAAAAA # self._debug_draw.draw_line( # carb.Float3(rl[0], rl[1], rl[2]), # rear_axle_color, t*2, # carb.Float3(rr[0], rr[1], rr[2]), # rear_axle_color, t*2 # ) def update_path_to_dest(self, vehicle_pos, dest_pos): if not self._enabled: return if dest_pos: self._debug_draw.draw_line( carb.Float3(vehicle_pos[0], vehicle_pos[1], vehicle_pos[2]), self._color, self._line_thickness, carb.Float3(dest_pos[0], dest_pos[1], dest_pos[2]), self._color, self._line_thickness ) def enable(self, value): self._enabled = value
4,039
Python
32.666666
112
0.549641
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/scripts/ui.py
from ctypes import alignment import omni.ui as ui from typing import List DEFAULT_BTN_HEIGHT = 22 COLLAPSABLE_FRAME_HEIGHT = 32 LINE_HEIGHT = 32 LABEL_WIDTH = 150 LABEL_INNER_WIDTH = 70 ELEM_MARGIN = 4 BTN_WIDTH = 32 VSPACING = ELEM_MARGIN * 2 BORDER_RADIUS = 4 CollapsableFrameStyle = { "CollapsableFrame": { "background_color": 0xFF333333, "secondary_color": 0xFF333333, "color": 0xFF00b976, "border_radius": BORDER_RADIUS, "border_color": 0x0, "border_width": 0, "font_size": 14, "padding": ELEM_MARGIN * 2, "margin_width": ELEM_MARGIN, "margin_height": ELEM_MARGIN, }, "CollapsableFrame:hovered": {"secondary_color": 0xFF3C3C3C}, "CollapsableFrame:pressed": {"secondary_color": 0xFF333333}, "Button": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS}, "Button:selected": {"background_color": 0xFF666666}, "Button.Label:disabled": {"color": 0xFF888888}, "Slider": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS}, "Slider:disabled": {"color": 0xFF888888}, "ComboBox": {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS}, "Label": {"margin_height": 0, "margin_width": ELEM_MARGIN}, "Label:disabled": {"color": 0xFF888888}, } TREE_VIEW_STYLE = { "TreeView:selected": {"background_color": 0x66FFFFFF}, "TreeView.Item": {"color": 0xFFCCCCCC}, "TreeView.Item:selected": {"color": 0xFFCCCCCC}, "TreeView.Header": {"background_color": 0xFF000000}, } IMPORTANT_BUTTON_STYLE = { "Button": { "background_color": 0x7000b976 } } class AttachedItem(ui.AbstractItem): """Single item of the model""" def __init__(self, text): super().__init__() self.name_model = ui.SimpleStringModel(text) class AttachmentModel(ui.AbstractItemModel): """ Represents the list active vehicle-to-curve attachments. It is used to make a single level tree appear like a simple list. """ def __init__(self, items: List[object]): super().__init__() self.attachments_changed(items) def get_item_children(self, item): """Returns all the children when the widget asks it.""" if item is not None: # Since we are doing a flat list, we return the children of root only. # If it's not root we return. return [] return self._attachments def get_item_value_model_count(self, item): """The number of columns""" return 1 def get_item_value_model(self, item, column_id): """ Return value model. It's the object that tracks the specific value. In our case we use ui.SimpleStringModel. """ if item and isinstance(item, AttachedItem): return item.name_model def attachments_changed(self, attachments): self._attachments = [] i = 1 for attachment in attachments: self._attachments.append(AttachedItem(f"[{i}] {attachment}")) i = i + 1 self._item_changed(None) class ExtensionUI(): def __init__(self, controller): self._controller = controller def build_ui(self, lookahead_distance, attachments): self._window = ui.Window("Vehicle Path Tracking Extension (Beta)", width=300, height=300) with self._window.frame: with ui.HStack(): # Column #1 with ui.VStack(): self._settings_frame = ui.CollapsableFrame( "SETTINGS", collapsed=False, height=COLLAPSABLE_FRAME_HEIGHT, style=CollapsableFrameStyle ) with self._settings_frame: with ui.VStack(): width = 64 height = 16 with ui.HStack(width=width, height=height): ui.Label("Enable debug: ") enable_debug_checkbox = ui.CheckBox() enable_debug_checkbox.model.add_value_changed_fn( self._controller._changed_enable_debug ) ui.Spacer(height=LINE_HEIGHT/4) ui.Label("REFERENCE COORDINATE SYSTEM: Up-axis: Y-axis (fixed)") ui.Spacer(height=LINE_HEIGHT/4) with ui.HStack(width=width, height=height): ui.Label("Pure Pursuit look ahead distance: ") self._lookahead_field = ui.FloatField(width=64.0) self._lookahead_field.model.set_value(lookahead_distance) self._lookahead_field.model.add_end_edit_fn(self._notify_lookahead_distance_changed) with ui.HStack(width=width, height=height): ui.Label("Trajectory Loop:") self._checkbox_trajectory_loop = ui.CheckBox(name="TracjectoryLoop") self._checkbox_trajectory_loop.model.set_value(False) self._checkbox_trajectory_loop.model.add_value_changed_fn( self._controller._on_trajectory_loop_value_changed ) # FIXME: Fix regression in rear steering behaviour. # (Issue #13) # with ui.HStack(width=width, height=height): # ui.Label("Enable rear steering:") # self._checkbox_rear_steering = ui.CheckBox(name="RearSteering") # self._checkbox_rear_steering.model.set_value(False) # self._checkbox_rear_steering.model.add_value_changed_fn( # self._controller._on_steering_changed # ) self._controls_frame = ui.CollapsableFrame("CONTROLS", collapsed=False, height=COLLAPSABLE_FRAME_HEIGHT, style=CollapsableFrameStyle ) with self._controls_frame: with ui.HStack(): with ui.VStack(): ui.Button( "Start Scenario", clicked_fn=self._controller._on_click_start_scenario, height=DEFAULT_BTN_HEIGHT, style=IMPORTANT_BUTTON_STYLE ) ui.Spacer(height=LINE_HEIGHT/8) ui.Button( "Stop Scenario", clicked_fn=self._controller._on_click_stop_scenario, height=DEFAULT_BTN_HEIGHT, style=IMPORTANT_BUTTON_STYLE ) ui.Line(height=LINE_HEIGHT/2) ui.Button( "Load a preset scene", clicked_fn=self._controller._on_click_load_preset_scene, height=DEFAULT_BTN_HEIGHT ) ui.Line(height=LINE_HEIGHT/2) ui.Button( "Load a ground plane", clicked_fn=self._controller._on_click_load_ground_plane, height=DEFAULT_BTN_HEIGHT ) ui.Spacer(height=LINE_HEIGHT/8) ui.Button( "Load a sample vehicle template", clicked_fn=self._controller._on_click_load_sample_vehicle, height=DEFAULT_BTN_HEIGHT ) ui.Spacer(height=LINE_HEIGHT/8) ui.Button( "Load a sample BasisCurve", clicked_fn=self._controller._on_click_load_basis_curve, height=DEFAULT_BTN_HEIGHT ) # FIXME: re-enable Forklift once the new updated # meta-data for it will be provided. # ui.Spacer(height=LINE_HEIGHT/8) # ui.Button( # "Load a Forklift", # clicked_fn=self._controller._on_click_load_forklift, # height=DEFAULT_BTN_HEIGHT # ) self._atachments_frame = ui.CollapsableFrame( "VEHICLE-TO-CURVE ATTACHMENTS", collapsed=False, height=COLLAPSABLE_FRAME_HEIGHT, style=CollapsableFrameStyle ) with self._atachments_frame: with ui.VStack(): ui.Label( "(1) Select WizardVehicle Xform and corresponding BasisCurve;\n(2) Click 'Attach Selected'", width=32 ) ui.Spacer(height=LINE_HEIGHT/8) ui.Button( "Attach Selected", clicked_fn=self._controller._on_click_attach_selected, height=DEFAULT_BTN_HEIGHT, style=IMPORTANT_BUTTON_STYLE ) ui.Spacer(height=LINE_HEIGHT/8) ui.Button( "Clear All Attachments", clicked_fn=self._controller._on_click_clear_attachments ) # Column #2 self._attachments_frame = ui.CollapsableFrame( "VEHICLE-TO-CURVE attachments", collapsed=False, height=COLLAPSABLE_FRAME_HEIGHT, style=CollapsableFrameStyle ) with self._attachments_frame: with ui.VStack(direction=ui.Direction.TOP_TO_BOTTOM, height=20, style=CollapsableFrameStyle): if attachments is not None and len(attachments) > 0: self._attachment_label = ui.Label( "Active vehicle-to-curve attachments:", alignment=ui.Alignment.TOP ) else: self._attachment_label = ui.Label("No active vehicle-to-curve attachments") self._attachment_model = AttachmentModel(attachments) tree_view = ui.TreeView( self._attachment_model, root_visible=False, header_visible=False, style={"TreeView.Item": {"margin": 4}} ) # viewport = ui.Workspace.get_window("Viewport") # self._window.dock_in(viewport, ui.DockPosition.BOTTOM) # Dock extension window alongside 'Property' extension. self._window.deferred_dock_in("Property") # dock_in_window is deprecated unfortunatelly # self._window.dock_in_window("Viewport", ui.DockPosition.RIGHT, ratio=0.1) def teardown(self): self._controller = None self._settings_frame = None self._controls_frame = None self._atachments_frame = None self._window = None def get_lookahead_distance(self): return self._lookahead_field.model.as_float def set_lookahead_distance(self, distance): self._lookahead_field.model.set_value(distance) def _notify_lookahead_distance_changed(self, model): self._controller._on_lookahead_distance_changed(model.as_float) def update_attachment_info(self, attachments): self._attachment_model.attachments_changed(attachments) if len(attachments) > 0: self._attachment_label.text = "Active vehicle-to-curve attachments:" else: self._attachment_label.text = "No active vehicle-to-curve attachments"
13,127
Python
45.553191
124
0.475585
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/tests/test_extension_model.py
from email.policy import default import omni.kit.app import omni.kit.commands import omni.usd from omni.kit.test import AsyncTestCaseFailOnLogError # from omni.kit.test_suite.helpers import wait_stage_loading from ..scripts.model import ExtensionModel # ====================================================================================================================== class TestExtensionModel(AsyncTestCaseFailOnLogError): async def setUp(self): usd_context = omni.usd.get_context() await usd_context.new_stage_async() ext_manager = omni.kit.app.get_app().get_extension_manager() self._ext_id = ext_manager.get_enabled_extension_id("ext.path.tracking") self._DEFAULT_LOOKAHEAD = 550.0 self._MAX_LOOKAHEAD = 1200.0 self._MIN_LOOKAHEAD = 300.0 async def tearDown(self): self._ext_id = None async def test_load_preset(self): ext_model = ExtensionModel(self._ext_id, default_lookahead_distance=self._DEFAULT_LOOKAHEAD, max_lookahed_distance=self._MAX_LOOKAHEAD, min_lookahed_distance=self._MIN_LOOKAHEAD ) ext_model.load_preset_scene() stage = omni.usd.get_context().get_stage() ground_plane = stage.GetPrimAtPath("/World/GroundPlane") vehicle_template = stage.GetPrimAtPath("/World/VehicleTemplate") curve = stage.GetPrimAtPath("/World/BasisCurves") self.assertTrue(ground_plane is not None) self.assertTrue(vehicle_template is not None) self.assertTrue(curve is not None) async def test_hello(self): ext_model = ExtensionModel(self._ext_id, default_lookahead_distance=self._DEFAULT_LOOKAHEAD, max_lookahed_distance=self._MAX_LOOKAHEAD, min_lookahed_distance=self._MIN_LOOKAHEAD ) async def test_attachments_preset(self): # TODO: provide impl self.assertTrue(True)
2,139
Python
37.90909
120
0.57223
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/ext/path/tracking/tests/__init__.py
try: from .test_extension_model import * except: import carb carb.log_error("No tests for this module, check extension settings")
142
Python
22.83333
72
0.711268
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/config/extension.toml
[package] version = "1.0.2-beta" title = "Vehicle Path Tracking Extension" description="Allows omni.physxvehicle to move along a user-defined trajectory via path tracking 'pure pursuit' inspired algorithm." readme = "docs/index.rst" changelog="docs/CHANGELOG.md" repository = "" icon = "data/icon.png" preview_image="data/preview.png" keywords = ["kit", "omni.physxvehicle", "animation", "path", "tracking", "vehicle"] [dependencies] "omni.usd" = {} "omni.kit.uiapp" = {} "omni.physx" = {} "omni.physx.ui" = {} "omni.physx.vehicle" = {} "omni.usdphysics" = {} "omni.physx.commands" = {} "omni.kit.test_suite.helpers" = {} [[python.module]] name = "ext.path.tracking" [[test]] args = [ "--/renderer/enabled=pxr", "--/renderer/active=pxr", "--/app/window/dpiScaleOverride=1.0", "--/app/window/scaleToMonitor=false", "--no-window" ] dependencies = [ "omni.hydra.pxr", "omni.kit.mainwindow", "omni.kit.widget.stage", "omni.kit.window.viewport", "omni.kit.window.stage", "omni.kit.window.console", "omni.kit.test_suite.helpers", ]
1,079
TOML
24.714285
131
0.658017
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/docs/CHANGELOG.md
# Changelog ## [1.0.2-beta] - 2023-01-29 ### Changes - Fixed regression in preset vehicle scene after Kit 104 updates; - Temporarily removed forklfit model from simulation templates (Kit 104 regression); - Temporarily removed ui control for a user to select rear steering option (Kit 104 regression). ## [1.0.0] - 2022-08-18 ### Changes - Created initial vehicle path tracking extension for Nvidia Omniverse Developer Contest
428
Markdown
34.749997
96
0.759346
omnioverflow/kit-extension-path-tracking/exts/ext.path.tracking/docs/index.rst
omni.path.tracking ######################## Omniverse Vehicle Path tracking extension allows a physics-enabled vehicle created with a PhysX Vehicle extension (omni.physx.vehicle) to move and automatically track a user-defined path. User-defined path is represented by an instance of USD BasisCurves, and a path tracking algorithm is inspired by a classic Pure Pursuit algorithm. The fastest way to evaluate how vehicle path tracking extension works is to use a preset vehicle and curve. In order to get started with the preset configuration please proceed as follows: 1. Click `Load a preset scene` button 2. Click `Start scenario` button --- Extension supports path tracking for any Omniverse PhysX Vehicle. One could load a template vehicle using the extension ui, or using a conventional method via `Create`->`Physics`->`Vehicle`. It is also straightforward to add a custom mesh and materials to a physics vehicle. You can create a curve for vehicle path tracking using either of the following methods: - `Create`->`BasisCurves`->`From Bezier` - `Create`->`BasisCurves`->`From Pencil` --- Once a physics vehicle and a path to be tracked defined by USD BasisCurves is created, select the WizardVehicle and the BasisCruves prims in the stage (via Ctrl-click) and click `Attach Selected` button. Note that is very important to select specifically `WizardVehicle` prim in the scene, not `WizardVehicle/Vehicle` for instance. If vehicle-to-curve attachment was successful it should be reflected on the extension UI. When vehicle-to-curve attachment(s) is created, proceed by clicking `Start Scenario` button. If you want to get rid of all already existing vehicle-to-curve attachments please click `Clear All Attachments`.
1,731
reStructuredText
47.11111
167
0.783362
ericcraft-mh/omniverse-resources/README.md
## USD Resources ###### Pixar [USD](https://graphics.pixar.com/usd/release/index.html)</br> [Universal Scene Description (USD) API](https://graphics.pixar.com/usd/release/api/index.html) ###### NVIDIA Developer [USD](https://developer.nvidia.com/usd)</br> [Working with USD Python Libraries](https://developer.nvidia.com/usd/tutorials)</br> [USD Python API Notes](https://developer.nvidia.com/usd/apinotes) ## Omniverse Resources ###### NVIDIA [Omniverse Documentation Site](https://docs.omniverse.nvidia.com/)</br> [Omniverse Utilities](https://docs.omniverse.nvidia.com/prod_utilities/prod_utilities/overview.html) Helpful utilities in the Omniverse.</br> [Omniverse Workflows](https://docs.omniverse.nvidia.com/prod_workflows/prod_workflows/overview.html) Objective based tutorials using Omniverse.</br> [Omniverse Kit API](https://docs.omniverse.nvidia.com/py/kit/index.html)</br> [Frequently Used Python Snippets](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/reference_python_snippets.html)</br> NVIDIA On-Demand: [Omniverse Video Lists](https://docs.omniverse.nvidia.com/plat_omniverse/common/video-list.html) [^1]</br> [Omniverse Forums](https://forums.developer.nvidia.com/c/omniverse/300) ###### Third Party [Official Omniverse Channel](https://discord.com/invite/nvidiaomniverse) (Discord)</br> [omniverse-kit-extension](https://github.com/topics/omniverse-kit-extension) (GitHub)</br> [**PHYSICALLY**BASED](https://physicallybased.info/) A database of physically based values for CG artists [^2]</br> [NVIDIA Omniverse Channel](https://www.youtube.com/c/NVIDIAOmniverse) (YouTube)</br> NVIDIA Studio: [Omniverse Search](https://www.youtube.com/channel/UCDeQdW6Lt6nhq3mLM4oLGWw/search?query=Omniverse) (YouTube)</br> NVIDIA: [Omniverse Search](https://www.youtube.com/c/NVIDIA/search?query=Omniverse) (YouTube)</br> [PathCopyCopy](https://pathcopycopy.github.io/) [^3] ## Visual Studio Code [Visual Studio Code](https://code.visualstudio.com/) ###### Visual Studio Code Extensions Fully-featured TOML support: [Even Better TOML](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml)</br> Pixar USD Language Extension by Animal Logic: [USD Language](https://marketplace.visualstudio.com/items?itemName=AnimalLogic.vscode-usda-syntax)</br> Material Definition Language by NVIDIA: [vscode-mdl-language](https://marketplace.visualstudio.com/items?itemName=OmerShapira.mdl)</br> [^1]: NVIDIA Account may be required to access content. [^2]: Includes Omniverse Engine values. [^3]: Provides a way to copy Omniverse compliant UNIX paths.
2,580
Markdown
72.742855
149
0.770155
terrylincn/omniverse-tutorials/README.md
# omniverse-tutorials</br> animatedTop 皮克斯的陀螺例子程序</br> code_demo_mesh100 100个球体的代码控制程序</br> kaolin_data_generator_patch koalin 2021.2.0 bug fix for dirb_tutorials
163
Markdown
31.799994
70
0.803681
terrylincn/omniverse-tutorials/animatedTop/generate_examples.py
# This is an example script from the USD tutorial, # "Transformations, Time-sampled Animation, and Layer Offsets". # # When run, it will generate a series of usda files in the current # directory that illustrate each of the steps in the tutorial. # from pxr import Usd, UsdGeom, Gf, Sdf def MakeInitialStage(path): stage = Usd.Stage.CreateNew(path) UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z) stage.SetStartTimeCode(0) stage.SetEndTimeCode(192) return stage def Step1(): stage = MakeInitialStage('Step1.usda') stage.SetMetadata('comment', 'Step 1: Start and end time codes') stage.Save() def AddReferenceToGeometry(stage, path): geom = UsdGeom.Xform.Define(stage, path) geom.GetPrim().GetReferences().AddReference('./top.geom.usd') return geom def Step2(): stage = MakeInitialStage('Step2.usda') stage.SetMetadata('comment', 'Step 2: Geometry reference') top = AddReferenceToGeometry(stage, '/Top') stage.Save() def AddSpin(top): spin = top.AddRotateZOp(opSuffix='spin') spin.Set(time=0, value=0) spin.Set(time=192, value=1440) def Step3(): stage = MakeInitialStage('Step3.usda') stage.SetMetadata('comment', 'Step 3: Adding spin animation') top = AddReferenceToGeometry(stage, '/Top') AddSpin(top) stage.Save() def AddTilt(top): tilt = top.AddRotateXOp(opSuffix='tilt') tilt.Set(value=12) def Step4(): stage = MakeInitialStage('Step4.usda') stage.SetMetadata('comment', 'Step 4: Adding tilt') top = AddReferenceToGeometry(stage, '/Top') AddTilt(top) AddSpin(top) stage.Save() def Step4A(): stage = MakeInitialStage('Step4A.usda') stage.SetMetadata('comment', 'Step 4A: Adding spin and tilt') top = AddReferenceToGeometry(stage, '/Top') AddSpin(top) AddTilt(top) stage.Save() def AddOffset(top): top.AddTranslateOp(opSuffix='offset').Set(value=(0, 0.1, 0)) def AddPrecession(top): precess = top.AddRotateZOp(opSuffix='precess') precess.Set(time=0, value=0) precess.Set(time=192, value=360) def Step5(): stage = MakeInitialStage('Step5.usda') stage.SetMetadata('comment', 'Step 5: Adding precession and offset') top = AddReferenceToGeometry(stage, '/Top') AddPrecession(top) AddOffset(top) AddTilt(top) AddSpin(top) stage.Save() def Step6(): # Use animated layer from Step5 anim_layer_path = './Step5.usda' stage = MakeInitialStage('Step6.usda') stage.SetMetadata('comment', 'Step 6: Layer offsets and animation') left = UsdGeom.Xform.Define(stage, '/Left') left_top = UsdGeom.Xform.Define(stage, '/Left/Top') left_top.GetPrim().GetReferences().AddReference( assetPath = anim_layer_path, primPath = '/Top') middle = UsdGeom.Xform.Define(stage, '/Middle') middle.AddTranslateOp().Set(value=(2, 0, 0)) middle_top = UsdGeom.Xform.Define(stage, '/Middle/Top') middle_top.GetPrim().GetReferences().AddReference( assetPath = anim_layer_path, primPath = '/Top', layerOffset = Sdf.LayerOffset(offset=96)) right = UsdGeom.Xform.Define(stage, '/Right') right.AddTranslateOp().Set(value=(4, 0, 0)) right_top = UsdGeom.Xform.Define(stage, '/Right/Top') right_top.GetPrim().GetReferences().AddReference( assetPath = anim_layer_path, primPath = '/Top', layerOffset = Sdf.LayerOffset(scale=0.25)) stage.Save() if __name__ == '__main__': Step1() Step2() Step3() Step4() Step4A() Step5() Step6()
3,547
Python
29.852174
72
0.666479
terrylincn/omniverse-tutorials/kaolin_data_generator_patch/extension.py
import os import re import json import random import asyncio import posixpath import threading import webbrowser from queue import Queue import glob from functools import partial import pathlib import carb import omni.ext import omni.syntheticdata as sd from omni import ui from carb import settings from pxr import Usd, UsdGeom, UsdShade, UsdLux, Vt, Gf, Sdf, Tf, Semantics import numpy as np import omni.syntheticdata as sd from omni.kit.pointcloud_generator import PointCloudGenerator from kaolin_app.research import utils from .utils import ( delete_sublayer, omni_shader, bottom_to_elevation, save_to_log, save_numpy_array, save_image, save_pointcloud, wait_for_loaded, ) from .sensors import _build_ui_sensor_selection from .ui import build_component_frame from .dr_components import sample_component _extension_instance = None FILE_DIR = os.path.dirname(os.path.realpath(__file__)) CACHE = os.path.join(FILE_DIR, ".cache") EXTENSION_NAME = "Data Generator" SCENE_PATH = "/World/visualize" NUM_WORKERS = 10 VALID_EXTENSIONS = ["*.usd", "*.usda", "*.usdc"] RENDERERS = ["RaytracedLighting", "PathTracing"] CAMERAS = ["UniformSampling", "Trajectory"] TRAJ_OPTIONS = ["Spiral", "CustomJson"] DEMO_URL = "https://docs.omniverse.nvidia.com/app_kaolin/app_kaolin/user_manual.html#data-generator" MAX_RESOLUTION = {"width": 7680, "height": 4320} MIN_RESOLUTION = {"width": 1024, "height": 1024} DR_COMPONENTS = [ "LightComponent", "MovementComponent", "RotationComponent", "ColorComponent", "TextureComponent", "MaterialComponent", "VisibilityComponent", ] class KaolinDataGeneratorError(Exception): pass class IOWorkerPool: def __init__(self, num_workers: int): self.save_queue = Queue() for _ in range(num_workers): t = threading.Thread(target=self._do_work) t.start() def add_to_queue(self, data: object): self.save_queue.put(data) def _do_work(self): while True: fn = self.save_queue.get(block=True) fn() self.save_queue.task_done() class Extension(omni.ext.IExt): def __init__(self): self.root_dir = None self._ref_idx = 0 self._filepicker = None self._outpicker = None self._configpicker = None self._jsonpicker = None self.camera = None self._preset_layer = None self.dr_components = {} self.asset_list = None self._progress_tup = None self.option_frame = None self.config = {} self.start_config = {} def get_name(self): return EXTENSION_NAME def on_startup(self, ext_id: str): global _extension_instance _extension_instance = self self._settings = carb.settings.get_settings() self.progress = None self._context = omni.usd.get_context() self._window = ui.Window(EXTENSION_NAME, width=500, height=500) self._menu_entry = omni.kit.ui.get_editor_menu().add_item( f"Window/Kaolin/{EXTENSION_NAME}", self._toggle_menu, toggle=True, value=True ) self._preview_window = ui.Window("Preview", width=500, height=500) self._preview_window.deferred_dock_in("Property") self._preview_window.visible = False self._filepicker = omni.kit.window.filepicker.FilePickerDialog( "Select Asset(s)", click_apply_handler=lambda f, d: self._on_filepick(f, d), apply_button_label="Open", item_filter_options=["usd", "usda", "usdc"], ) self._filepicker.hide() self._outpicker = omni.kit.window.filepicker.FilePickerDialog( "Select Output Directory", click_apply_handler=lambda _, x: self._on_outpick(x), apply_button_label="Select", enable_filename_input=False, ) self._outpicker.hide() self._configpicker = omni.kit.window.filepicker.FilePickerDialog( "Import Preset", click_apply_handler=self._on_load_config, apply_button_label="Open", item_filter_options=["usda"], ) self._configpicker.hide() self._jsonpicker = omni.kit.window.filepicker.FilePickerDialog( "Import Json trajectory file", click_apply_handler=lambda f, d: asyncio.ensure_future( self._import_trajectory_from_json(posixpath.join(d, f)) ), apply_button_label="Open", item_filter_fn=self._on_filter_json, ) self._jsonpicker.hide() self._configsaver = omni.kit.window.filepicker.FilePickerDialog( "Save Preset As...", click_apply_handler=self._on_save_config, apply_button_label="Save", item_filter_options=["usda"], ) cache = {} if not os.path.exists(CACHE): os.makedirs(CACHE, exist_ok=True) if posixpath.exists(os.path.join(CACHE, ".log")): with open(os.path.join(CACHE, ".log"), "r") as f: cache = json.load(f) self._cache = cache self._hide_filepickers() self.start_config = self._set_start_config() self.presets = [str(pathlib.Path(p).as_posix()) for p in glob.glob(posixpath.join(FILE_DIR, "presets/*.usda"))] self.stage_events_sub = self._context.get_stage_event_stream().create_subscription_to_pop(self._on_stage_event) self.sdv = sd.Extension.get_instance() self._vp_iface = omni.kit.viewport.get_viewport_interface() self.timeline = omni.timeline.get_timeline_interface() self._build_ui() def on_shutdown(self): global _extension_instance _extension_instance = None if self._preset_layer: delete_sublayer(self._preset_layer) self.progress = None if self._window: del self._window if self._filepicker: self._filepicker = None if self._outpicker: self._outpicker = None if self._configpicker: self._configpicker = None if self._jsonpicker: self._jsonpicker = None def _toggle_menu(self, *args): self._window.visible = not self._window.visible def clear(self): if self._preset_layer: delete_sublayer(self._preset_layer) # reset resolution self._settings.set("/app/renderer/resolution/width", self.start_config["width"]) self._settings.set("/app/renderer/resolution/height", self.start_config["height"]) # reset rendering mode self._settings.set("/rtx/rendermode", self.start_config["renderer"]) self._settings.set("/rtx-defaults/pathtracing/clampSpp", self.start_config["clampSpp"]) self._settings.set("/rtx-defaults/pathtracing/totalSpp", self.start_config["totalSpp"]) self._settings.set("/rtx/post/aa/op", self.start_config["aa"]) def _on_stage_event(self, e): pass def _reset(self): self._ref_idx = 0 self.asset_list = None def _show_filepicker(self, filepicker, default_dir: str = "", default_file: str = ""): cur_dir = filepicker.get_current_directory() show_dir = cur_dir if cur_dir else default_dir filepicker.show(show_dir) filepicker.set_filename(default_file) def _hide_filepickers(self): # Hide all filepickers self._configsaver.hide() self._filepicker._click_cancel_handler = self._filepicker.hide() self._outpicker._click_cancel_handler = self._outpicker.hide() self._jsonpicker._click_cancel_handler = self._jsonpicker.hide() self._configpicker._click_cancel_handler = self._configpicker.hide() self._configsaver._click_cancel_handler = self._configsaver.hide() def _set_start_config(self): return { "width": self._settings.get("/app/renderer/resolution/width"), "height": self._settings.get("/app/renderer/resolution/height"), "renderer": self._settings.get("/rtx/rendermode"), "clampSpp": self._settings.get("/rtx-defaults/pathtracing/clampSpp"), "totalSpp": self._settings.get("/rtx/pathtracing/totalSpp"), "aa": self._settings.get("/rtx/post/aa/op"), } def _on_filter_json(self, item: omni.kit.widget.filebrowser.filesystem_model.FileSystemItem): file_exts = ["json", "JSON"] for fex in file_exts: if item.name.endswith(fex) or item.is_folder: return True async def _import_trajectory_from_json(self, path: str): """ Import a trajectory from a JSON file in a predefined format. """ trajectory = self._on_load_json(path) self.config["jsonpath"] = path assert isinstance(trajectory, list) assert len(trajectory) > 0 # add trajectory prim stage = omni.usd.get_context().get_stage() timestamp_prim = stage.DefinePrim(f"{SCENE_PATH}/timestamp", "Xform") trajectory_rig = stage.DefinePrim(f"{timestamp_prim.GetPath()}/rig", "Xform") UsdGeom.Xformable(trajectory_rig).ClearXformOpOrder() UsdGeom.Xformable(trajectory_rig).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble) UsdGeom.Xformable(trajectory_rig).AddOrientOp() # Set translation and orientation according to trajectory origins, scales, orientations = [], [], [] for idx, entry in enumerate(trajectory): # Set camera based on time, translation, quaternion in the json file. trans, quaternion, time = entry["t"], entry["q"], entry["time"] # The JSON format has different camera coordinate system conventions: # +X points right, +Y points down, camera faces in +Z. # Compared to Kit's conventions: # +X points right, -Y points down, camera faces in -Z. # So the Y and Z axes need to be flipped, and orientations need to be # rotated around X by 180 degrees for the coordinate systems to match. trans[1] = -trans[1] # Flip Y trans[2] = -trans[2] # Flip Z # Set translation and orientations according to time. trajectory_rig.GetAttribute("xformOp:translate").Set(Gf.Vec3d(trans), time=time) # Both the JSON format and Gf.Quatd use a "scalar first" ordering. # Flip Y and Z axes. quaternion[2] = -quaternion[2] quaternion[3] = -quaternion[3] trajectory_rig.GetAttribute("xformOp:orient").Set(Gf.Quatf(*quaternion), time=time) # Use prev and curr translation to generate a trajectory vis as PointInstancer orientation = Gf.Quath(*quaternion).GetNormalized() orientations.append(orientation) origins.append(Gf.Vec3d(trans)) scales.append([1.0, 1.0, 1.0]) # Define prim for visualization, each component will be a cone (like 3d vector) cone_height = 0.03 proto_prim = stage.DefinePrim(f"{SCENE_PATH}/proto", "Xform") proto_prim.GetAttribute("visibility").Set("invisible") cone_rig = stage.DefinePrim(f"{proto_prim.GetPath()}/cone", "Xform") cone = UsdGeom.Cone.Define(stage, (f"{cone_rig.GetPath()}/cone")) cone.GetRadiusAttr().Set(0.01) cone.GetHeightAttr().Set(cone_height) cone.GetAxisAttr().Set("Z") # cone rig UsdGeom.Xformable(cone_rig).ClearXformOpOrder() UsdGeom.Xformable(cone_rig).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble).Set((0.0, cone_height / 2, 0.0)) # Setup point instancer instancer_prim = stage.DefinePrim(f"{SCENE_PATH}/Viz", "PointInstancer") instancer = UsdGeom.PointInstancer(instancer_prim) assert instancer instancer.CreatePrototypesRel().SetTargets([cone_rig.GetPath()]) # Populate point instancer with the calculated scales, positions, and orientations instancer.GetPositionsAttr().Set(origins) instancer.GetScalesAttr().Set(scales) indices = [0] * len(origins) instancer.GetProtoIndicesAttr().Set(indices) instancer.GetOrientationsAttr().Set(orientations) await self._preview_trajectory() def _move_camera(self, centre: Gf.Vec3d, azimuth: float, elevation: float, distance: float): stage = omni.usd.get_context().get_stage() rig = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig") boom = stage.GetPrimAtPath(f"{rig.GetPath()}/Boom") camera = stage.GetPrimAtPath(f"{boom.GetPath()}/Camera") UsdGeom.Xformable(rig).ClearXformOpOrder() centre_op = UsdGeom.Xformable(rig).AddTranslateOp() centre_op.Set(tuple(centre)) rig_rotate_op = UsdGeom.Xformable(rig).AddRotateXYZOp() rig_rotate_op.Set((0.0, azimuth, 0.0)) UsdGeom.Xformable(boom).ClearXformOpOrder() boom_rotate_op = UsdGeom.Xformable(boom).AddRotateXYZOp() boom_rotate_op.Set((-elevation, 0.0, 0.0)) # Reset camera UsdGeom.Xformable(camera).ClearXformOpOrder() distance_op = UsdGeom.Xformable(camera).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble) distance_op.Set((0.0, 0.0, distance)) UsdGeom.Xformable(camera).ComputeLocalToWorldTransform(0) def _get_value(self, option, default=None): if option not in self.config: self.config[option] = default if self.config[option]["mode"] == 0: return self.config[option]["fixed"] else: v_min, v_max = self.config[option]["random"] if isinstance(v_min, list): return [random.random() * (v_max_el - v_min_el) + v_min_el for v_min_el, v_max_el in zip(v_min, v_max)] else: return random.random() * (v_max - v_min) + v_min def _set_trajectory_camera_pose(self, cur_frame: int, num_frames: int): """ Calculate the camera pose based on a trajectory, number of frames to generate and current frame """ stage = omni.usd.get_context().get_stage() viz_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Viz") # Match transform of visualization prim tf = UsdGeom.Xformable(viz_prim).ComputeLocalToWorldTransform(0.0) # .GetInverse() camera_rig = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig") UsdGeom.Xformable(camera_rig).ClearXformOpOrder() UsdGeom.Xformable(camera_rig).AddTransformOp().Set(tf) trajectory_rig = stage.GetPrimAtPath(f"{SCENE_PATH}/timestamp/rig") translations = trajectory_rig.GetAttribute("xformOp:translate") time_samples = translations.GetTimeSamples() if num_frames <= 1: cur_time = (time_samples[-1] - time_samples[0]) / 2.0 else: cur_time = (time_samples[-1] - time_samples[0]) / (num_frames - 1) * cur_frame translate = trajectory_rig.GetAttribute("xformOp:translate").Get(time=cur_time) orientation = trajectory_rig.GetAttribute("xformOp:orient").Get(time=cur_time) UsdGeom.Xformable(self.camera).ClearXformOpOrder() UsdGeom.Xformable(self.camera).AddTranslateOp(UsdGeom.XformOp.PrecisionDouble).Set(translate) UsdGeom.Xformable(self.camera).AddOrientOp().Set(orientation) def _get_spiral_camera_pose(self, frame, total_frames): """ Calculate the rotation with respect to X & Y based on the current iteration of all the sampling """ distance = self._get_value("distance") min_ele, max_ele = tuple(self.config["elevation"]["random"]) numrot = self.config["num_rotations"] if total_frames > 1: az_step = 360 * numrot / (total_frames - 1) ele_step = (max_ele - min_ele) / (total_frames - 1) else: az_step = 0 ele_step = 0 az = frame * az_step ele = min_ele + frame * ele_step return az, ele, distance def _normalize(self, prim: Usd.Prim): prim_range = UsdGeom.Imageable(prim).ComputeLocalBound(0, "default").GetRange() range_min = prim_range.GetMin() range_max = prim_range.GetMax() size = prim_range.GetSize() sf = 1.0 / max(size) offset = (range_max + range_min) / 2 * sf UsdGeom.Xformable(prim).AddTranslateOp().Set(-offset) UsdGeom.Xformable(prim).AddScaleOp().Set((sf, sf, sf)) def _change_up_axis(self, model): # TODO type self.config["up_axis"] = model.as_int def add_semantics(self, prim: Usd.Prim, semantic_label: str): if not prim.HasAPI(Semantics.SemanticsAPI): sem = Semantics.SemanticsAPI.Apply(prim, "Semantics") sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() sem.GetSemanticTypeAttr().Set("class") sem.GetSemanticDataAttr().Set(semantic_label) def create_asset_prim(self): stage = omni.usd.get_context().get_stage() asset_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset") if not asset_prim: asset_prim = stage.DefinePrim(f"{SCENE_PATH}/Asset", "Xform") rig_prim = stage.GetPrimAtPath(f"{asset_prim.GetPath()}/Rig") if not rig_prim: rig_prim = stage.DefinePrim(f"{asset_prim.GetPath()}/Rig", "Xform") UsdGeom.Xformable(rig_prim).AddTranslateOp() UsdGeom.Xformable(rig_prim).AddRotateXOp() translate_op = rig_prim.GetAttribute("xformOp:translate") if not translate_op: translate_op = UsdGeom.Xformable(rig_prim).AddTranslateOp() translate_op.Set((0.0, 0.0, 0.0)) rotatex_op = rig_prim.GetAttribute("xformOp:rotateX") if not rotatex_op: UsdGeom.Xformable(rig_prim).AddRotateXOp() ref_prim = stage.DefinePrim(f"{SCENE_PATH}/Asset/Rig/Preview") self.add_semantics(ref_prim, "asset") return asset_prim async def _run(self): i = 0 while i < len(self.asset_list): self.progress["bar1"].set_value(i / len(self.asset_list)) if self.progress["stop_signal"]: break load_success = False # If asset fails to load, remove from list and try the next one while not load_success and i < len(self.asset_list): carb.log_info(f"[kaolin_app.research.data_generator] Loading asset {self.asset_list[i]}...") load_success = await self.load_asset(self.asset_list[i], use_cache=True) if not load_success: self.asset_list.pop(i) if self.progress["stop_signal"]: break for j in range(self.config["renders_per_asset"]): self.progress["bar2"].set_value(j / self.config["renders_per_asset"]) if self.progress["stop_signal"]: break app = omni.kit.app.get_app_interface() await app.next_update_async() await self.render_asset(j, self.config["renders_per_asset"]) self._preview_window.visible = False await self._save_gt(i * self.config["renders_per_asset"] + j) i += 1 self._ref_idx += 1 async def run(self): root_layer = omni.usd.get_context().get_stage().GetRootLayer() if len(root_layer.subLayerPaths) == 0 or self._preset_layer != Sdf.Find(root_layer.subLayerPaths[-1]): self._on_preset_changed(self.presets[self._preset_model.get_item_value_model().as_int], update_config=False) if not self.config["out_dir"]: m = self._ui_modal("Output Dir Not Specified", "Please specify an output directory.") # TODO Notification return is_custom_json_mode = ( self.config["cameramode"] == "Trajectory" and self.config["trajectorymode"] == "CustomJson" ) if is_custom_json_mode and not os.path.exists(self.config.get("jsonpath", "")): if not self.config.get("jsonpath"): title = "JSON Path Not Specified" else: title = "Invalid JSON Path Specified" m = self._ui_modal(title, "Please specify a valid path to a trajectory JSON file.") # TODO Notification return # Set small camera near plane cur_clipping_range = self.camera.GetAttribute("clippingRange").Get() self.camera.GetAttribute("clippingRange").Set((0.01, cur_clipping_range[1])) # Hide path visualization if exists if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"): self._set_visible(f"{SCENE_PATH}/Viz", False) # Set SPP per config self._settings.set("/rtx/pathtracing/spp", self.config["spp"]) # Capture scene state cur_sel = omni.usd.get_context().get_selection().get_selected_prim_paths() display_mode = self._settings.get("/persistent/app/viewport/displayOptions") # Clear scene state omni.usd.get_context().get_selection().clear_selected_prim_paths() self._settings.set("/persistent/app/viewport/displayOptions", 0) if self.asset_list is None: self.asset_list = await utils.path.get_usd_files_async(self.root_dir) self._ui_toggle_visible([self.option_frame, self.progress["block"]]) # Reset Camera if not self.camera.GetAttribute("xformOp:translate"): UsdGeom.Xformable(self.camera).AddTranslateOp() self.camera.GetAttribute("xformOp:translate").Set((0, 0, 0)) if not self.camera.GetAttribute("xformOp:rotateXYZ"): UsdGeom.Xformable(self.camera).AddRotateXYZOp() self.camera.GetAttribute("xformOp:rotateXYZ").Set((0, 0, 0)) try: await self._run() except Exception as e: raise e finally: self.progress["stop_signal"] = False self._ui_toggle_visible([self.option_frame, self.progress["block"]]) # Re-apply scene state omni.usd.get_context().get_selection().set_selected_prim_paths(cur_sel, True) self._settings.set("/persistent/app/viewport/displayOptions", display_mode) self._settings.set("/rtx/pathtracing/spp", 1) self.camera.GetAttribute("clippingRange").Set((1.0, cur_clipping_range[1])) if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"): self._set_visible(f"{SCENE_PATH}/Viz", True) async def preview(self): root_layer = omni.usd.get_context().get_stage().GetRootLayer() if len(root_layer.subLayerPaths) == 0 or self._preset_layer != Sdf.Find(root_layer.subLayerPaths[-1]): self._on_preset_changed(self.presets[self._preset_model.get_item_value_model().as_int], update_config=False) if self.asset_list is None: self.asset_list = await utils.path.get_usd_files_async(self.root_dir) # Hide path visualization if exists if omni.usd.get_context().get_stage().GetPrimAtPath(f"{SCENE_PATH}/Viz"): self._set_visible(f"{SCENE_PATH}/Viz", False) success = False # draw assets at random. Remove invalid assets if detected. while not success and len(self.asset_list) > 0: sel = random.randrange(len(self.asset_list)) success = await self.load_asset(self.asset_list[sel], use_cache=False) if not success: self.asset_list.pop(sel) await self.render_asset(random.randrange(100), 100) # ensure material is loaded await wait_for_loaded() self.sdv.build_visualization_ui(self._preview_window, "Viewport") self._preview_window.visible = True # Set camera target to facilitate camera control viewport = omni.kit.viewport.get_viewport_interface().get_viewport_window() viewport.set_camera_target(str(self.camera.GetPath()), 0.0, 0.0, 0.0, True) def _add_ref(self, ref_prim, file): # Check if file has a default prim - if not, use the first prim ref_prim.GetReferences().ClearReferences() file_stage = Usd.Stage.Open(file) if file_stage.HasDefaultPrim(): ref_prim.GetPrim().GetReferences().AddReference(file) else: top_level_prims = file_stage.GetPseudoRoot().GetChildren() if len(top_level_prims) == 0: raise KaolinDataGeneratorError(f"Asset at {file} appears to be empty") root_prim = top_level_prims[0] ref_prim.GetPrim().GetReferences().AddReference(file, str(root_prim.GetPath())) return True async def load_asset(self, path: str, use_cache: bool = False): # TODO docstring stage = omni.usd.get_context().get_stage() ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview") if not ref_prim: self.create_asset_prim() ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview") self._set_visible(str(ref_prim.GetPath()), True) try: self._add_ref(ref_prim, path) except Tf.ErrorException: carb.log_warn(f"Error opening {path}.") return False except KaolinDataGeneratorError as e: carb.log_warn(e.args[0]) return False # set transforms UsdGeom.Xformable(ref_prim).ClearXformOpOrder() if self.config.get("up_axis", 0): UsdGeom.Xformable(ref_prim).AddRotateXOp().Set(-90.0) # If Z up, rotate about X axis if self.config.get("asset_normalize"): self._normalize(ref_prim) if self.config["asset_override_bottom_elev"]: bottom_to_elevation(ref_prim.GetParent(), 0.0) else: ref_prim.GetParent().GetAttribute("xformOp:translate").Set((0.0, 0.0, 0.0)) # ensure material is loaded await asyncio.sleep(1) await wait_for_loaded() asset_size = UsdGeom.Imageable(ref_prim).ComputeLocalBound(0, "default").GetRange().GetSize() if all([s < 1e-10 for s in asset_size]): # Stage is empty, skip asset carb.log_warn(f"Asset at {path} appears to be empty.") print( asset_size, ref_prim, ref_prim.GetAttribute("visibility").Get(), ref_prim.GetMetadata("references").GetAddedOrExplicitItems()[0].assetPath, ) return False return True async def render_asset(self, cur_frame: int = 0, num_frames: int = 0) -> None: # TODO docstring self._settings.set("/app/hydraEngine/waitIdle", True) # Necessary, waitIdle resets itself to false stage = omni.usd.get_context().get_stage() if not self.camera: rig = stage.DefinePrim(f"{SCENE_PATH}/CameraRig", "Xform") boom = stage.DefinePrim(f"{rig.GetPath()}/Boom", "Xform") self.camera = stage.DefinePrim(f"{boom.GetPath()}/Camera", "Camera") self.camera.GetAttribute("clippingRange").Set((1.0, 1000000)) self._vp_iface.get_viewport_window().set_active_camera(str(self.camera.GetPath())) if self.config.get("cameramode") == "Trajectory": if self.config["trajectorymode"] == "Spiral": centre = self._get_value("centre") azimuth, elevation, distance = self._get_spiral_camera_pose(cur_frame, num_frames) self._move_camera(centre, azimuth, elevation, distance) elif self.config["trajectorymode"] == "CustomJson": self._move_camera((0, 0, 0), 0, 0, 0) self._set_trajectory_camera_pose(cur_frame, num_frames) else: centre = self._get_value("centre") azimuth = self._get_value("azimuth") elevation = self._get_value("elevation") distance = self._get_value("distance") self._move_camera(centre, azimuth, elevation, distance) # Set focal length focal_length_defaults = {"fixed": 24.0, "mode": 0, "random": Gf.Vec2f([1.0, 120.0])} focal_length = self._get_value("camera_focal_length", focal_length_defaults) self.camera.GetAttribute("focalLength").Set(focal_length) self.move_asset() self.sample_components() app = omni.kit.app.get_app_interface() await app.next_update_async() # This next frame await is needed to avoid camera transform remaining in place def _get_camera_properties(self): width = self._settings.get("/app/renderer/resolution/width") height = self._settings.get("/app/renderer/resolution/height") tf_mat = np.array(UsdGeom.Xformable(self.camera).ComputeLocalToWorldTransform(0.0).GetInverse()).tolist() tf_mat[-1][2] *= 100 clippingrange = self.camera.GetAttribute("clippingRange").Get() clippingrange[0] = 1 cam_props = { "resolution": {"width": width, "height": height}, "clipping_range": tuple(clippingrange),#tuple(self.camera.GetAttribute("clippingRange").Get()), "horizontal_aperture": self.camera.GetAttribute("horizontalAperture").Get(), "focal_length": self.camera.GetAttribute("focalLength").Get(), "tf_mat": tf_mat,#np.array(UsdGeom.Xformable(self.camera).ComputeLocalToWorldTransform(0.0).GetInverse()).tolist(), } return cam_props def _get_filepath_from_primpath(self, prim_path): """ Called to get file path from a prim object. """ if not prim_path: return "" prim = omni.usd.get_context().get_stage().GetPrimAtPath(prim_path) if prim: metadata = prim.GetMetadata("references") if prim and metadata: return metadata.GetAddedOrExplicitItems()[0].assetPath return "" def _get_frame_metadata( self, bbox_2d_tight: np.ndarray = None, bbox_2d_loose: np.ndarray = None, bbox_3d: np.ndarray = None ): frame = {"camera_properties": self._get_camera_properties()} if bbox_2d_tight is not None: frame["bbox_2d_tight"] = self._get_bbox_2d_data(bbox_2d_tight) if bbox_2d_loose is not None: frame["bbox_2d_loose"] = self._get_bbox_2d_data(bbox_2d_loose) if bbox_3d is not None: frame["bbox_3d"] = self._get_bbox_3d_data(bbox_3d) ref_prim_path = f"{SCENE_PATH}/Asset/Rig/Preview" stage = omni.usd.get_context().get_stage() ref_prim = stage.GetPrimAtPath(ref_prim_path) tf = np.array(UsdGeom.Xformable(ref_prim).ComputeLocalToWorldTransform(0.0)).tolist() ref = self._get_filepath_from_primpath(ref_prim_path) if os.path.isfile(self.root_dir): rel_ref = os.path.basename(ref) else: rel_ref = posixpath.relpath(ref, self.root_dir) frame["asset_transforms"] = [(rel_ref, tf)] json_buffer = bytes(json.dumps(frame, indent=4), encoding="utf-8") return json_buffer def _get_bbox_2d_data(self, bboxes): # TODO type bbox_2d_list = [] for bb_data in bboxes: ref = self._get_filepath_from_primpath(bb_data["name"]) rel_ref = posixpath.relpath(ref, self.root_dir) if ref else "" bb_dict = { "file": rel_ref, "class": bb_data["semanticLabel"], "bbox": {a: bb_data[a].item() for a in ["x_min", "y_min", "x_max", "y_max"]}, } bbox_2d_list.append(bb_dict) return bbox_2d_list def _get_bbox_3d_data(self, bboxes): # TODO type bbox_3d_list = [] for bb_data in bboxes: ref = self._get_filepath_from_primpath(bb_data["name"]) rel_ref = posixpath.relpath(ref, self.root_dir) if ref else "" bb_dict = { "file": rel_ref, "class": bb_data["semanticLabel"], "bbox": {a: bb_data[a].item() for a in ["x_min", "y_min", "x_max", "y_max", "z_min", "z_max"]}, } bb_dict["transform"] = bb_data["transform"].tolist() bbox_3d_list.append(bb_dict) return bbox_3d_list def move_asset(self): stage = omni.usd.get_context().get_stage() if self.config["asset_override_bottom_elev"]: ref_prim = stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig/Preview") bottom_to_elevation(ref_prim.GetParent(), self.config["asset_bottom_elev"]) async def _save_gt(self, idx: int): vp = self._vp_iface.get_viewport_window() self._sensors = self.sdv._sensors["Viewport"] await sd.sensors.initialize_async( vp, [st for _, s in self._sensors.items() if s["enabled"] for st in s["sensors"]] ) io_tasks = [] img_funcs = {"rgb": partial(sd.sensors.get_rgb, vp), "normals": partial(sd.visualize.get_normals, vp)} np_funcs = { "depth": partial(sd.sensors.get_depth_linear, vp), "instance": partial(sd.sensors.get_instance_segmentation, vp, parsed=(self._sensors["instance"]["mode"])), "semantic": partial(sd.sensors.get_semantic_segmentation, vp), } for sensor, write_fn in img_funcs.items(): if self._sensors[sensor]["enabled"]: filepath = posixpath.join(self.config["out_dir"], f"{idx}_{sensor}.png") data = write_fn() io_tasks.append(save_image(filepath, data)) carb.log_info(f"[kaolin.data_generator] Saving {sensor} to {filepath}") for sensor, write_fn in np_funcs.items(): if self._sensors[sensor]["enabled"]: filepath = posixpath.join(self.config["out_dir"], f"{idx}_{sensor}.npy") data = write_fn() io_tasks.append(save_numpy_array(filepath, data)) carb.log_info(f"[kaolin.data_generator] Saving {sensor} to {filepath}") bbox_2d_tight, bbox_2d_loose, bbox_3d = None, None, None if self._sensors["bbox_2d_tight"]["enabled"]: bbox_2d_tight = sd.sensors.get_bounding_box_2d_tight(vp) if self._sensors["bbox_2d_loose"]["enabled"]: bbox_2d_loose = sd.sensors.get_bounding_box_2d_loose(vp) if self._sensors["bbox_3d"]["enabled"]: bbox_3d = sd.sensors.get_bounding_box_3d(vp, parsed=self._sensors["bbox_3d"]["mode"]) if self._sensors["pointcloud"]["enabled"]: pc_gen = PointCloudGenerator() pc_gen.stage = omni.usd.get_context().get_stage() pc_gen.ref = pc_gen.stage.GetPrimAtPath(f"{SCENE_PATH}/Asset/Rig") pc_gen.height_resolution = self._sensors["pointcloud"]["sampling_resolution"] pc_gen.width_resolution = self._sensors["pointcloud"]["sampling_resolution"] pointcloud = await pc_gen.generate_pointcloud() filepath = posixpath.join(self.config["out_dir"], f"{idx}_pointcloud.usd") up_axis = ["Y", "Z"][self.config.get("up_axis", 0)] io_tasks.append(save_pointcloud(filepath, pointcloud, up_axis)) filepath = posixpath.join(self.config["out_dir"], f"{idx}_metadata.json") frame = self._get_frame_metadata(bbox_2d_tight, bbox_2d_loose, bbox_3d) # TODO: fix and remove this io_tasks.append(omni.client.write_file_async(filepath, frame)) await asyncio.gather(*io_tasks) def sample_components(self): # TODO docstring for _, components in self.dr_components.items(): for component in components: sample_component(component) def _set_visible(self, path: str, value: bool): opts = ["invisible", "inherited"] stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) if prim and prim.GetAttribute("visibility"): prim.GetAttribute("visibility").Set(opts[value]) def _on_value_changed(self, option, value, idx: int = None, idx_opt=None): # TODO type has_mode = isinstance(self.config[option], dict) if has_mode: mode = ["fixed", "random"][self.config[option]["mode"]] if idx is not None and idx_opt is not None: self.config[option][mode][idx_opt][idx] = value elif idx is not None: self.config[option][mode][idx] = value else: self.config[option][mode] = value else: if idx is not None and idx_opt is not None: self.config[option][idx_opt][idx] = value elif idx is not None: self.config[option][idx] = value else: self.config[option] = value def _on_mode_changed(self, option, model): # TODO type idx = model.get_item_value_model().get_value_as_int() self.config[option]["mode"] = idx self._build_ui() def _on_filepick(self, filename: str, dirpath: str): if dirpath: path = posixpath.join(dirpath, filename) if utils.path.exists(path): self._filepicker.hide() save_to_log(CACHE, {"root_dir": dirpath, "root_file": filename}) self._ui_root_dir.set_value(path) def _on_outpick(self, path: str): self._outpicker.hide() save_to_log(CACHE, {"out_dir": path}) self._ui_out_dir.set_value(path) def _on_load_config(self, filename: str, dirpath: str): self._configpicker.hide() path = posixpath.join(dirpath, filename) assert re.search("^.*\.(usd|usda|usdc|USD|USDA|USDC)$", path) # Confirm path is a valid USD assert utils.path.exists(path) # Ensure path exists save_to_log(CACHE, {"config_dir": dirpath}) if path not in self.presets: self.presets.append(path) self._preset_model.append_child_item(None, ui.SimpleStringModel(posixpath.splitext(filename)[0])) self._preset_model.get_item_value_model().set_value(self.presets.index(path)) def _on_load_json(self, path: str): self._jsonpicker.hide() assert re.search("^.*\.(json)$", path) # Confirm path is a valid json file assert utils.path.exists(path) # Ensure path exists save_to_log(CACHE, {"json_dir": posixpath.dirname(path)}) with open(path, "r") as f: data = json.load(f) return data async def _on_root_dir_changed(self, path: str): """ root usd directory changed """ if utils.path.exists(path): self._settings.set("/kaolin/mode", 2) # Set app in data generation mode self._reset() self._settings.set("/app/asyncRendering", False) # Necessary to ensure correct GT output self._settings.set("/app/hydraEngine/waitIdle", True) # Necessary to ensure correct GT output omni.usd.get_context().new_stage() stage = omni.usd.get_context().get_stage() vis_prim = stage.GetPrimAtPath(SCENE_PATH) if vis_prim and self._preset_layer is None: omni.kit.commands.execute("DeletePrimsCommand", paths=[vis_prim.GetPath()]) elif vis_prim and stage.GetPrimAtPath(f"{vis_prim.GetPath()}/Asset/Rig"): rig = stage.GetPrimAtPath(f"{vis_prim.GetPath()}/Asset/Rig") for child in rig.GetChildren(): self._set_visible(str(child.GetPath()), False) self.root_dir = path self.asset_list = await utils.path.get_usd_files_async(self.root_dir) if not self.option_frame: self._build_ui() if self.option_frame: self.option_frame.visible = True await self.preview() self._preview_window.visible = False else: carb.log_error(f"[kaolin_app.research.data_generator] Directory not found: '{path}'") def _set_settings(self, width: int, height: int, renderer: str, **kwargs): self._settings.set("/app/renderer/resolution/width", width) self._settings.set("/app/renderer/resolution/height", height) self._settings.set("/rtx/rendermode", renderer) self._settings.set("/app/viewport/grid/enabled", False) self._settings.set("/app/viewport/grid/showOrigin", False) def _on_save_config(self, filename: str, dirname: str): assert utils.path.exists(dirname) self._configsaver.hide() # add sensor config to main config self.config["sensors"] = {s: True for s, v in self.sdv._sensors["Viewport"].items() if v["enabled"]} save_to_log(CACHE, {"config_dir": dirname}) if self._preset_layer is None: raise ValueError("Something went wrong, Unable to save config.") # Create new layer filename = f"{posixpath.splitext(filename)[0]}.usda" new_path = posixpath.join(dirname, filename) if Sdf.Find(new_path) == self._preset_layer: new_layer = self._preset_layer else: # Transfer layer content over to new layer new_layer = Sdf.Layer.CreateNew(new_path) new_layer.TransferContent(self._preset_layer) new_layer.customLayerData = {"DataGenerator": self.config} new_layer.Save() self._on_load_config(filename, dirname) def _on_resolution_changed(self, model, option): # TODO type value = model.as_int self.config.update({option: value}) self._settings.set(f"/app/renderer/resolution/{option}", value) model.set_value(value) def _on_preset_changed(self, path: str, update_config: bool = True) -> None: stage = omni.usd.get_context().get_stage() root_layer = stage.GetRootLayer() if self._preset_layer is not None: delete_sublayer(self._preset_layer) vis_prim = stage.GetPrimAtPath(SCENE_PATH) if vis_prim: omni.kit.commands.execute("DeletePrimsCommand", paths=[vis_prim.GetPath()]) omni.kit.commands.execute( "CreateSublayerCommand", layer_identifier=root_layer.identifier, sublayer_position=-1, new_layer_path=path, transfer_root_content=False, create_or_insert=False, ) self._preset_layer = Sdf.Find(root_layer.subLayerPaths[-1]) if update_config: config = self._preset_layer.customLayerData.get("DataGenerator") if config: self.config = config if "sensors" in self.config: # Enable sensors for s in self.config["sensors"]: self.sdv._sensors["Viewport"][s]["enabled"] = True # Set preset as authoring layer edit_target = Usd.EditTarget(self._preset_layer) stage = omni.usd.get_context().get_stage() if not stage.IsLayerMuted(self._preset_layer.identifier): stage.SetEditTarget(edit_target) self.dr_components = {} for prim in stage.Traverse(): if str(prim.GetTypeName()) in DR_COMPONENTS: key = prim.GetParent().GetName() self.dr_components.setdefault(key, []).append(prim) self.camera = stage.GetPrimAtPath(f"{SCENE_PATH}/CameraRig/Boom/Camera") self.create_asset_prim() self.option_frame.clear() with self.option_frame: self._build_ui_options() async def _preview_trajectory(self): stage = omni.usd.get_context().get_stage() trajectory_viz = stage.GetPrimAtPath(f"{SCENE_PATH}/Viz") if not trajectory_viz: carb.log_warn("Unable to preview trajectory, no trajectory detected.") return trajectory_viz.GetAttribute("visibility").Set("inherited") viewport = omni.kit.viewport.get_viewport_interface() omni.usd.get_context().get_selection().set_selected_prim_paths([f"{SCENE_PATH}/Viz"], True) await omni.kit.app.get_app_interface().next_update_async() viewport.get_viewport_window().focus_on_selected() omni.usd.get_context().get_selection().clear_selected_prim_paths() def _set_trajecotry_preview_visibility(self): show_preview = ( self.config.get("cameramode") == "Trajectory" and self.config.get("trajectory_mode") == "CustomJson" ) self._set_visible(f"{SCENE_PATH}/Viz", show_preview) def _on_trajectory_mode_changed(self, trajectory_mode_model): trajectory_mode = TRAJ_OPTIONS[trajectory_mode_model.get_item_value_model().as_int] self.config.update({"trajectorymode": trajectory_mode}) self._set_trajecotry_preview_visibility() def _ui_modal(self, title: str, text: str, no_close: bool = False, ok_btn: bool = True): """ Create a modal window. """ window_flags = ui.WINDOW_FLAGS_NO_RESIZE window_flags |= ui.WINDOW_FLAGS_NO_SCROLLBAR window_flags |= ui.WINDOW_FLAGS_MODAL if no_close: window_flags |= ui.WINDOW_FLAGS_NO_CLOSE modal = ui.Window(title, width=400, height=100, flags=window_flags) with modal.frame: with ui.VStack(spacing=5): text = ui.Label(text, word_wrap=True, style={"alignment": ui.Alignment.CENTER}) if ok_btn: btn = ui.Button("OK") btn.set_clicked_fn(lambda: self._ui_toggle_visible([modal])) return modal def _ui_create_xyz(self, option, value=(0, 0, 0), idx=None, dtype=float): # TODO type colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F} with ui.HStack(): for i, (label, colour) in enumerate(colors.items()): if i != 0: ui.Spacer(width=4) with ui.ZStack(height=14): with ui.ZStack(width=16): ui.Rectangle(name="vector_label", style={"background_color": colour, "border_radius": 3}) ui.Label(label, alignment=ui.Alignment.CENTER) with ui.HStack(): ui.Spacer(width=14) self._ui_create_value(option, value[i], idx_opt=idx, idx=i, dtype=dtype) ui.Spacer(width=4) def _ui_create_value(self, option, value=0.0, idx=None, idx_opt=None, dtype=float): # TODO type if dtype == int: widget = ui.IntDrag(min=0, max=int(1e6)) elif dtype == float: widget = ui.FloatDrag(min=-1e6, max=1e6, step=0.1, style={"border_radius": 1}) elif dtype == bool: widget = ui.CheckBox() else: raise NotImplementedError widget.model.set_value(value) widget.model.add_value_changed_fn( lambda m: self._on_value_changed(option, m.get_value_as_float(), idx=idx, idx_opt=idx_opt) ) # widget.model.add_value_changed_fn(lambda _: asyncio.ensure_future(self.render_asset()) return widget def _ui_simple_block(self, label, option, is_xyz=False, dtype=float): # TODO type ui_fn = self._ui_create_xyz if is_xyz else self._ui_create_value with ui.HStack(spacing=5): ui.Label(label, width=120, height=10) ui_fn(option, value=self.config[option], dtype=dtype) def _ui_option_block(self, label, option, is_xyz=False, dtype=float): """ Create option block on the UI """ if option not in self.config: return None ui_fn = self._ui_create_xyz if is_xyz else self._ui_create_value option_block = ui.HStack(spacing=5) with option_block: ui.Label(label, width=120, height=10) model = ui.ComboBox(self.config[option]["mode"], "Fixed", "Random", width=80).model # create option based on "fixed" or "random" option_0 = ui.HStack(spacing=5) # fixed option_1 = ui.VStack(spacing=5) # random with option_0: ui_fn(option, value=self.config[option]["fixed"], dtype=dtype) with option_1: for i, m in enumerate(["Min", "Max"]): with ui.HStack(spacing=5): ui.Label(m, width=30) ui_fn(option, value=self.config[option]["random"][i], idx=i, dtype=dtype) if self.config[option]["mode"] == 0: option_1.visible = False else: option_0.visible = False model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([option_0, option_1])) model.add_item_changed_fn( lambda m, i: self.config[option].update({"mode": m.get_item_value_model().as_int}) ) return option_block def _ui_toggle_visible(self, ui_elements): # TODO type for ui_el in ui_elements: ui_el.visible = not ui_el.visible def _build_run_ui(self): with self._window.frame: pass def _ui_up_axis(self): collection = ui.RadioCollection() with ui.HStack(): ui.Label("Up Axis", width=120) with ui.HStack(): ui.RadioButton(text="Y", radio_collection=collection, height=30) ui.RadioButton(text="Z", radio_collection=collection, height=30) collection.model.add_value_changed_fn(self._change_up_axis) collection.model.set_value(self.config.get("up_axis", 0)) def _build_ui(self): with self._window.frame: with ui.ScrollingFrame(): with ui.VStack(spacing=5): with ui.HStack(spacing=5, height=15): ui.Label("Root Dir", width=55) self._ui_root_dir = ui.StringField().model if self.root_dir: self._ui_root_dir.set_value(self.root_dir) self._ui_root_dir.add_value_changed_fn( lambda m: asyncio.ensure_future(self._on_root_dir_changed(m.as_string)) ) browse = ui.Button( image_url="resources/icons/folder.png", width=30, height=25, style={"Button": {"margin": 0, "padding": 5, "alignment": ui.Alignment.CENTER}}, ) browse.set_clicked_fn( lambda f=self._filepicker: self._show_filepicker(f, self._cache.get("root_dir", "")) ) if self.root_dir: with ui.HStack(height=0): ui.Label("Presets", width=60) self._preset_model = ui.ComboBox( 0, *[posixpath.splitext(posixpath.basename(p))[0] for p in self.presets] ).model config_dir = self._cache.get("config_dir", "") config_file = self._cache.get("config_file", "") ui.Button( "Save As...", clicked_fn=lambda f=self._configsaver: self._show_filepicker( f, config_dir, config_file ), ) ui.Button( "Import", clicked_fn=lambda f=self._configpicker: self._show_filepicker( f, config_dir, config_file ), ) self.option_frame = ui.VStack(spacing=5) self.option_frame.visible = False self._preset_model.add_item_changed_fn( lambda m, i: self._on_preset_changed(self.presets[m.get_item_value_model().as_int]) ) if self.presets and not self._preset_layer: self._on_preset_changed(self.presets[0]) self._build_progress_ui() ui.Spacer() ui.Button("Demo", clicked_fn=lambda: webbrowser.open(DEMO_URL), height=60) def _build_ui_options(self): # Output with ui.CollapsableFrame(title="Output", height=10): with ui.VStack(spacing=5): with ui.HStack(spacing=5, height=10): ui.Label( "Output Dir", width=120, height=10, tooltip="Select directory to save output to. Existing files of the same name will be overwritten.", ) self._ui_out_dir = ui.StringField().model self._ui_out_dir.set_value(self.config["out_dir"]) self._ui_out_dir.add_value_changed_fn(lambda m: self.config.update({"out_dir": m.as_string})) browse = ui.Button( image_url="resources/icons/folder.png", width=30, height=25, style={"Button": {"margin": 0, "padding": 5, "alignment": ui.Alignment.CENTER}}, ) browse.set_clicked_fn( lambda f=self._outpicker: self._show_filepicker(f, self._cache.get("out_dir", "")) ) with ui.HStack(spacing=5, height=10): ui.Label( "Renders per Scene", width=120, height=10, tooltip="Number of randomized scenes to be captured before re-sampling a new scene.", ) model = ui.IntDrag(min=1, max=int(1e6)).model model.set_value(self.config["renders_per_asset"]) model.add_value_changed_fn( lambda m: self.config.update({"renders_per_asset": m.get_value_as_int()}) ) _build_ui_sensor_selection("Viewport") # Assets with ui.CollapsableFrame(title="Assets", height=10): with ui.VStack(spacing=5): self._ui_simple_block("Fix Bottom Elevation", "asset_override_bottom_elev", dtype=bool) self._ui_simple_block("Normalize", "asset_normalize", dtype=bool) self._ui_up_axis() ui.Spacer() # Camera with ui.CollapsableFrame(title="Camera", height=10): with ui.VStack(spacing=5): with ui.HStack(spacing=5): ui.Label( "Camera Mode", width=120, height=10, tooltip="Select random camera poses or follow a trajectory.", ) cur_camera_idx = CAMERAS.index(self.config.get("cameramode", "UniformSampling")) camera_mode_model = ui.ComboBox(cur_camera_idx, *CAMERAS, width=150).model camera_mode_model.add_item_changed_fn( lambda m, i: self.config.update({"cameramode": CAMERAS[m.get_item_value_model().as_int]}) ) if "camera_focal_length" not in self.config: self.config["camera_focal_length"] = {"fixed": 24.0, "mode": 0, "random": Gf.Vec2f([1.0, 120.0])} uniform_options = [ self._ui_option_block("Focal Length", "camera_focal_length"), self._ui_option_block("Look-at Position", "centre", is_xyz=True), self._ui_option_block("Distance", "distance"), self._ui_option_block("Elevation", "elevation"), self._ui_option_block("Azimuth", "azimuth"), ] if cur_camera_idx == 1: self._ui_toggle_visible(uniform_options) camera_mode_model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible(uniform_options)) camera_mode_model.add_item_changed_fn(lambda *_: self._set_trajecotry_preview_visibility()) # an indicator on turning on the trajectory traject_block = ui.VStack(spacing=5) with traject_block: with ui.HStack(spacing=5): ui.Label("Trajectory Mode", width=120, height=10, tooltip="Trajectory mode") if "trajectorymode" not in self.config: self.config["trajectorymode"] = "Spiral" cur_traj_idx = TRAJ_OPTIONS.index(self.config.get("trajectorymode", "Spiral")) trajmodel = ui.ComboBox(cur_traj_idx, *TRAJ_OPTIONS, width=150).model trajmodel.add_item_changed_fn(lambda m, _: self._on_trajectory_mode_changed(m)) # spiral option spiral_block = ui.VStack(spacing=5) with spiral_block: self._ui_option_block("Distance", "distance") # distance block with ui.HStack(spacing=5): # elevation range block ui.Label("Elevation Range", width=120, height=10, tooltip="Elevation range two numbers") ui.Spacer(width=10) for i, m in enumerate(["Min", "Max"]): with ui.HStack(spacing=5): ui.Label(m, width=30) val = self.config["elevation"]["random"] self._ui_create_value("elevation", value=val[i], idx=i, dtype=float) with ui.HStack(spacing=5): # rotation block ui.Label("Number of Rotations", width=120, height=10) self.config["num_rotations"] = 3 n_rot = self.config.get("num_rotations") self._ui_create_value("num_rotations", value=n_rot, dtype=int) ui.Spacer() spiral_block.visible = cur_traj_idx == 0 trajmodel.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([spiral_block])) # jsonoption json_block = ui.VStack(spacing=5) with json_block: with ui.HStack(spacing=5, height=15): ui.Label("Json path", width=55) ui.Button( "Json File", clicked_fn=lambda f=self._jsonpicker: self._show_filepicker( f, self._cache.get("json_dir", "") ), ) if self.config.get("jsonpath") and os.path.exists(self.config["jsonpath"]): asyncio.ensure_future(self._import_trajectory_from_json(self.config["jsonpath"])) ui.Button( "View Trajectory", clicked_fn=lambda: asyncio.ensure_future(self._preview_trajectory()) ) ui.Spacer() json_block.visible = cur_traj_idx == 1 trajmodel.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([json_block])) traject_block.visible = cur_camera_idx == 1 camera_mode_model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([traject_block])) ui.Spacer() ui.Spacer() # Create UI elements for DR Components for title, components in self.dr_components.items(): build_component_frame(title, components) # Render with ui.CollapsableFrame(title="Render Settings", height=10): self._settings.set("/rtx/rendermode", self.config["renderer"]) self._settings.set("/rtx/pathtracing/totalSpp", self.config["spp"]) self._settings.set("/rtx/pathtracing/optixDenoiser/enabled", self.config["denoiser"]) self._settings.set("/rtx/pathtracing/clampSpp", 0) # Disable spp clamping self._settings.set("/rtx/post/aa/op", 2) with ui.VStack(spacing=5): with ui.HStack(spacing=5): ui.Label("Resolution", width=120) ui.Label("Width", width=40, tooltip="Rendered resolution width, in pixels.") width = ui.IntDrag(min=MIN_RESOLUTION["width"], max=MAX_RESOLUTION["width"]).model width.add_value_changed_fn(lambda m: self._on_resolution_changed(m, "width")) ui.Spacer(width=10) ui.Label("Height", width=40, tooltip="Rendered resolution height, in pixels.") height = ui.IntDrag(min=MIN_RESOLUTION["height"], max=MAX_RESOLUTION["height"]).model height.add_value_changed_fn(lambda m: self._on_resolution_changed(m, "height")) width.set_value(self.config.get("width", self._settings.get("/app/renderer/resolution/width"))) height.set_value(self.config.get("height", self._settings.get("/app/renderer/resolution/height"))) with ui.HStack(spacing=5): ui.Label("Renderer", width=120, tooltip="Render Mode") cur_renderer_idx = RENDERERS.index(self.config["renderer"]) model = ui.ComboBox(cur_renderer_idx, *RENDERERS, width=200).model model.add_item_changed_fn( lambda m, i: self.config.update({"renderer": RENDERERS[m.get_item_value_model().as_int]}) ) model.add_item_changed_fn( lambda m, i: self._settings.set("/rtx/rendermode", RENDERERS[m.get_item_value_model().as_int]) ) pt_block = ui.VStack(spacing=5) with pt_block: with ui.HStack(spacing=5): ui.Label( "Samples Per Pixel", width=120, tooltip="Number of samples taken at each pixel, per frame." ) spp = ui.IntDrag().model spp.set_value(self.config["spp"]) spp.add_value_changed_fn( lambda m: self.config.update({"spp": m.as_int}) ) # Only change SPP during run spp.add_value_changed_fn( lambda m: self._settings.set("/rtx/pathtracing/totalSpp", m.as_int) ) # SPP Max with ui.HStack(spacing=5): ui.Label("Denoiser", width=120, tooltip="Toggle denoiser") denoiser = ui.CheckBox().model denoiser.set_value(self.config["denoiser"]) denoiser.add_value_changed_fn(lambda m: self.config.update({"denoiser": m.as_bool})) denoiser.add_value_changed_fn( lambda m: self._settings.set("/rtx/pathtracing/optixDenoiser/enabled", m.as_bool) ) ui.Spacer() pt_block.visible = bool(cur_renderer_idx) model.add_item_changed_fn(lambda m, i: self._ui_toggle_visible([pt_block])) with ui.HStack(): ui.Label("Subdiv", width=120, tooltip="Subdivision Global Refinement Level") with ui.HStack(): ui.Label("Refinement Level", width=100, tooltip="Subdivision Global Refinement Level") subdiv = ui.IntDrag(min=0, max=2).model subdiv.add_value_changed_fn(lambda m: self.config.update({"subdiv": m.as_int})) subdiv.add_value_changed_fn( lambda m: self._settings.set("/rtx/hydra/subdivision/refinementLevel", m.as_int) ) ui.Spacer() with ui.HStack(spacing=5): btn = ui.Button("Preview", height=40, tooltip="Render a preview with the current settings.") btn.set_clicked_fn(lambda: asyncio.ensure_future(self.preview())) btn = ui.Button("Run", height=40, tooltip="Generate and save groundtruth with the current settings.") btn.set_clicked_fn(lambda: asyncio.ensure_future(self.run())) def _build_progress_ui(self): self.progress = {"block": ui.VStack(spacing=5), "stop_signal": False} self.progress["block"].visible = False with self.progress["block"]: with ui.HStack(height=0): ui.Label( "TOTAL", width=80, style={"font_size": 20.0}, tooltip="Render progress of all scenes to be rendered.", ) self.progress["bar1"] = ui.ProgressBar(height=40, style={"font_size": 20.0}).model with ui.HStack(height=0): ui.Label( "Per Scene", width=80, style={"font_size": 16.0}, tooltip="Render progress of the total number of renders for this scenes", ) self.progress["bar2"] = ui.ProgressBar(height=20, style={"font_size": 16.0}).model btn = ui.Button("Cancel", height=60) btn.set_clicked_fn(lambda: self.progress.update({"stop_signal": True})) @staticmethod def get_instance(): return _extension_instance
66,049
Python
45.612562
127
0.570062
terrylincn/omniverse-tutorials/kaolin_data_generator_patch/README.md
1. try this link to download the pxr kitchen set models http://graphics.pixar.com/usd/downloads.html and unzip it.</br> 2. follow this link to install kaolin https://kaolin.readthedocs.io/en/latest/notes/installation.html </br> 3. install kaolin from omniverse lanucher </br> 4. copy extension.py to kaolin_app.research.data_generator/kaolin_app/research/data_generator/ </br>
376
Markdown
93.249977
119
0.781915
terrylincn/omniverse-tutorials/code_demo_mesh100/demo.py
import omni from pxr import Usd, UsdLux, UsdGeom, UsdShade, Sdf, Gf, Vt, UsdPhysics from omni.physx import get_physx_interface from omni.physx.bindings._physx import SimulationEvent from omni.physx.scripts.physicsUtils import * import random stage = omni.usd.get_context().get_stage() # set up axis to z UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z) UsdGeom.SetStageMetersPerUnit(stage, 0.01) defaultPrimPath = str(stage.GetDefaultPrim().GetPath()) # light sphereLight = UsdLux.SphereLight.Define(stage, defaultPrimPath + "/SphereLight") sphereLight.CreateRadiusAttr(150) sphereLight.CreateIntensityAttr(30000) sphereLight.AddTranslateOp().Set(Gf.Vec3f(650.0, 0.0, 1150.0)) # Physics scene UsdPhysics.Scene.Define(stage, defaultPrimPath + "/physicsScene") rows = 10 cols = 10 sphereCount = rows*cols _colors = [] material_scope_path = defaultPrimPath + "/Looks" UsdGeom.Scope.Define(stage, material_scope_path) # Trianglemesh materials for i in range(rows): for j in range(cols): mtl_path = material_scope_path + "/OmniPBR" + str(i*cols+j) mat_prim = stage.DefinePrim(mtl_path, "Material") material_prim = UsdShade.Material.Get(stage, mat_prim.GetPath()) material = UsdPhysics.MaterialAPI.Apply(material_prim.GetPrim()) mu = 0.0 + ((i * cols + j) % sphereCount) * 0.01 material.CreateRestitutionAttr().Set(mu) if material_prim: shader_mtl_path = stage.DefinePrim("{}/Shader".format(mtl_path), "Shader") shader_prim = UsdShade.Shader.Get(stage, shader_mtl_path.GetPath()) if shader_prim: shader_out = shader_prim.CreateOutput("out", Sdf.ValueTypeNames.Token) material_prim.CreateSurfaceOutput("mdl").ConnectToSource(shader_out) material_prim.CreateVolumeOutput("mdl").ConnectToSource(shader_out) material_prim.CreateDisplacementOutput("mdl").ConnectToSource(shader_out) shader_prim.GetImplementationSourceAttr().Set(UsdShade.Tokens.sourceAsset) shader_prim.SetSourceAsset(Sdf.AssetPath("OmniPBR.mdl"), "mdl") shader_prim.SetSourceAssetSubIdentifier("OmniPBR", "mdl") color = Gf.Vec3f(random.random(), random.random(), random.random()) shader_prim.GetPrim().CreateAttribute("inputs:diffuse_tint", Sdf.ValueTypeNames.Color3f).Set(color) _colors.append(color) # Triangle mesh with multiple materials path = defaultPrimPath + "/triangleMesh" _mesh_path = path mesh = UsdGeom.Mesh.Define(stage, path) # Fill in VtArrays points = [] normals = [] indices = [] vertexCounts = [] for i in range(rows): for j in range(cols): subset = UsdGeom.Subset.Define(stage, path + "/subset" + str(i*cols+j)) subset.CreateElementTypeAttr().Set("face") subset_indices = [i*cols+j] rel = subset.GetPrim().CreateRelationship("material:binding", False) rel.SetTargets([Sdf.Path(material_scope_path + "/OmniPBR" + str(i*cols+j))]) points.append(Gf.Vec3f(-stripSize/2 + stripSize * i, -stripSize/2 + stripSize * j, 0.0)) points.append(Gf.Vec3f(-stripSize/2 + stripSize * (i + 1), -stripSize/2 + stripSize * j, 0.0)) points.append(Gf.Vec3f(-stripSize/2 + stripSize * (i + 1), -stripSize/2 + stripSize * (j + 1), 0.0)) points.append(Gf.Vec3f(-stripSize/2 + stripSize * i,-stripSize/2 + stripSize * (j + 1), 0.0)) for k in range(4): normals.append(Gf.Vec3f(0, 0, 1)) indices.append(k + (i * cols + j) * 4) subset.CreateIndicesAttr().Set(subset_indices) vertexCounts.append(4) mesh.CreateFaceVertexCountsAttr().Set(vertexCounts) mesh.CreateFaceVertexIndicesAttr().Set(indices) mesh.CreatePointsAttr().Set(points) mesh.CreateDoubleSidedAttr().Set(False) mesh.CreateNormalsAttr().Set(normals) UsdPhysics.CollisionAPI.Apply(mesh.GetPrim()) meshCollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(mesh.GetPrim()) meshCollisionAPI.CreateApproximationAttr().Set("none") # Sphere material sphereMaterialpath = defaultPrimPath + "/sphereMaterial" UsdShade.Material.Define(stage, sphereMaterialpath) material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(sphereMaterialpath)) material.CreateRestitutionAttr().Set(0.9) # Spheres stripSize = 100.0 for i in range(rows): for j in range(cols): spherePath = "/sphere" + str(i) size = 25.0 position = Gf.Vec3f(i * stripSize, j * stripSize, 250.0) sphere_prim = add_rigid_sphere(stage, spherePath, size, position) # Add material collisionSpherePath = defaultPrimPath + spherePath add_physics_material_to_prim(stage, sphere_prim, Sdf.Path(sphereMaterialpath)) # apply contact report contactReportAPI = PhysxSchema.PhysxContactReportAPI.Apply(sphere_prim) contactReportAPI.CreateThresholdAttr().Set(200000) collider0 = None collider1 = None def _on_simulation_event(event): global collider0, collider1, _mesh_path, stage, _colors if event.type == int(SimulationEvent.CONTACT_DATA): if collider1 == _mesh_path: usdGeom = UsdGeom.Mesh.Get(stage, collider0) color = Vt.Vec3fArray([_colors[event.payload['faceIndex1']]]) usdGeom.GetDisplayColorAttr().Set(color) if event.type == int(SimulationEvent.CONTACT_FOUND): contactDict = resolveContactEventPaths(event) collider0 = contactDict["collider0"] collider1 = contactDict["collider1"] if event.type == int(SimulationEvent.CONTACT_PERSISTS): contactDict = resolveContactEventPaths(event) collider0 = contactDict["collider0"] collider1 = contactDict["collider1"] events = get_physx_interface().get_simulation_event_stream() _simulation_event_sub = events.create_subscription_to_pop(_on_simulation_event)
5,922
Python
40.41958
117
0.681189
j3soon/OmniIsaacGymEnvs-DofbotReacher/README.md
# Dofbot Reacher Reinforcement Learning Sim2Real Environment for Omniverse Isaac Gym/Sim This repository adds a DofbotReacher environment based on [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) (commit [cc1aab0](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/tree/cc1aab0f904ade860fc0761d62edb6e706ab89ec)), and includes Sim2Real code to control a real-world [Dofbot](https://category.yahboom.net/collections/r-robotics-arm/products/dofbot-jetson_nano) with the policy learned by reinforcement learning in Omniverse Isaac Gym/Sim. - We suggest using [the isaac-sim-2022.1.1 branch](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/tree/isaac-sim-2022.1.1) to prevent any potential issues. The RL code is tested on both Windows and Linux, while the Sim2Real code is tested on Linux and a real Dofbot using Isaac Sim 2022.1.1 and ROS Melodic. - **WARNING**: The RL code in this branch is only tested on Linux using Isaac Sim 2023.1.0. The Sim2Real code isn't fully tested yet. This repo is compatible with the following repositories: - [OmniIsaacGymEnvs-DofbotReacher](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher) - [OmniIsaacGymEnvs-UR10Reacher](https://github.com/j3soon/OmniIsaacGymEnvs-UR10Reacher) - [OmniIsaacGymEnvs-KukaReacher](https://github.com/j3soon/OmniIsaacGymEnvs-KukaReacher) - [OmniIsaacGymEnvs-HiwinReacher](https://github.com/j3soon/OmniIsaacGymEnvs-HiwinReacher) ## Preview ![](docs/media/DofbotReacher-Vectorized.gif) ![](docs/media/DofbotReacher-Sim2Real.gif) ## Installation Prerequisites: - Before starting, please make sure your hardware and software meet the [system requirements](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html#system-requirements). - [Install Omniverse Isaac Sim 2023.1.0](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) (Must setup Cache and Nucleus) - You may try out newer versions of Isaac Sim along with [their corresponding patch](https://github.com/j3soon/isaac-extended#conda-issue-on-linux), but it is not guaranteed to work. - Double check that Nucleus is correctly installed by [following these steps](https://github.com/j3soon/isaac-extended#nucleus). - Your computer & GPU should be able to run the Cartpole example in [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) - (Optional) [Set up a Dofbot with Jetson Nano](http://www.yahboom.net/study/Dofbot-Jetson_nano) in the real world Make sure to install Isaac Sim in the default directory and clone this repository to the home directory. Otherwise, you will encounter issues if you didn't modify the commands below accordingly. We will use Anaconda to manage our virtual environment: 1. Clone this repository and the patches repo: - Linux ```sh cd ~ git clone https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher.git git clone https://github.com/j3soon/isaac-extended.git ``` - Windows ```sh cd %USERPROFILE% git clone https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher.git git clone https://github.com/j3soon/isaac-extended.git ``` 2. Generate [instanceable](https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html) Dofbot assets for training: [Launch the Script Editor](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gui_interactive_scripting.html#script-editor) in Isaac Sim. Copy the content in `omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot.py` and execute it inside the Script Editor window. Wait until you see the text `Done!`. 3. [Download and Install Anaconda](https://www.anaconda.com/products/distribution#Downloads). ```sh # For 64-bit Linux (x86_64/x64/amd64/intel64) wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh bash Anaconda3-2022.10-Linux-x86_64.sh ``` For Windows users, make sure to use `Anaconda Prompt` instead of `Anaconda Powershell Prompt`, `Command Prompt`, or `Powershell` for the following commands. 4. Patch Isaac Sim 2023.1.0 - Linux ```sh export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cp $ISAAC_SIM/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh.bak cp ~/isaac-extended/isaac_sim-2023.1.0-patch/linux/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh ``` - Windows > (To be updated) 5. [Set up conda environment for Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html#advanced-running-with-anaconda) - Linux ```sh # conda remove --name isaac-sim --all export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cd $ISAAC_SIM conda env create -f environment.yml conda activate isaac-sim cd ~/OmniIsaacGymEnvs-DofbotReacher pip install -e . ``` - Windows > (To be updated) 6. Activate conda environment - Linux ```sh export ISAAC_SIM="$HOME/.local/share/ov/pkg/isaac_sim-2023.1.0" cd $ISAAC_SIM conda activate isaac-sim source setup_conda_env.sh ``` - Windows ```sh set ISAAC_SIM="%LOCALAPPDATA%\ov\pkg\isaac_sim-2023.1.0" cd %ISAAC_SIM% conda activate isaac-sim call setup_conda_env.bat ``` Please note that you should execute the commands in Step 6 for every new shell. For Windows users, replace `~` to `%USERPROFILE%` for all the following commands. ## Dummy Policy This is a sample to make sure you have setup the environment correctly. You should see a single Dofbot in Isaac Sim. ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/dummy_dofbot_policy.py task=DofbotReacher test=True num_envs=1 ``` Alternatively, you can replace the dummy policy with a random policy with `omniisaacgymenvs/scripts/random_policy.py`. ## Training You can launch the training in `headless` mode as follows: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True ``` The number of environments is set to 2048 by default. If your GPU has small memory, you can decrease the number of environments by changing the arguments `num_envs` as below: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True num_envs=2048 ``` You can also skip training by downloading the pre-trained model checkpoint by: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher wget https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/releases/download/v1.1.0/runs.zip unzip runs.zip ``` The learning curve of the pre-trained model: ![](docs/media/DofbotReacher-Learning-Curve.png) ## Testing Make sure you have stored the model checkpoints at `~/OmniIsaacGymEnvs-DofbotReacher/runs`, you can check it with the following command: ```sh ls ~/OmniIsaacGymEnvs-DofbotReacher/runs/DofbotReacher/nn/ ``` In order to achieve the highest rewards, you may not want to use the latest checkpoint `./runs/DofbotReacher/nn/DofbotReacher.pth`. Instead, use the checkpoint with highest rewards such as `./runs/DofbotReacher/nn/last_DofbotReacher_ep_1000_rew_XXX.pth`. You can replace `DofbotReacher.pth` with the latest checkpoint before following the steps below, or simply modify the commands below to use the latest checkpoint. You can visualize the learned policy by the following command: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=512 checkpoint=./runs/DofbotReacher/nn/DofbotReacher.pth ``` Likewise, you can decrease the number of environments by modifying the parameter `num_envs=512`. ## Using the Official URDF File The official URDF file in `/thirdparty/dofbot_info` is provided by Yahboom. The details on how to download this file can be found in the commit message of [e866618](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/commit/e86661813cd941133b4dc68da4c20a21efa00a0b). The only additional step is to generate [instanceable](https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html) Dofbot assets based on the official URDF file: [Launch the Script Editor](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gui_interactive_scripting.html#script-editor) in Isaac Sim. Copy the content in `omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot_from_urdf.py` and execute it inside the Script Editor window. Wait until you see the text `Done!`. You can now use the official URDF file by appending the `use_urdf=True` flag to any command above. For example: - Try out the dummy policy script with the official URDF file: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/dummy_dofbot_policy.py task=DofbotReacher test=True num_envs=1 use_urdf=True ``` - Or download the pre-trained model checkpoint and run it: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher wget https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/releases/download/v1.2.0/runs_urdf.zip unzip runs_urdf.zip ``` ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=512 checkpoint=./runs_urdf/DofbotReacher/nn/DofbotReacher.pth use_urdf=True ``` Please note that the model trained with the USD file provided by Isaac Sim is not compatible with the official URDF file. Fortunately, we also provide a pre-trained checkpoint for the official URDF file. The learning curve of the pre-trained model: ![](docs/media/DofbotReacher-URDF-Learning-Curve.png) ## Sim2Real The learned policy has a very conservative constraint on the joint limits. Therefore, the gripper would not hit the ground under such limits. However, you should still make sure there are no other obstacles within Dofbot's workspace (reachable area). That being said, if things go wrong, press `Ctrl+C` twice in the terminal to kill the process. > It would be possible to remove the conservative joint limit constraints by utilizing self-collision detection in Isaac Sim. We are currently investigating this feature. For simplicity, we'll use TCP instead of ROS to control the real-world dofbot. Copy the server notebook file (`omniisaacgymenvs/sim2real/dofbot-server.ipynb`) to the Jetson Nano on your Dofbot. Launch a Jupyter Notebook on Jetson Nano and execute the server notebook file. You should be able to reset the Dofbot's joints by the following script: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/sim2real/dofbot.py ``` Edit `omniisaacgymenvs/cfg/task/DofbotReacher.yaml`. Set `sim2real.enabled` to `True`, and set `sim2real.ip` to the IP of your Dofbot: ```yaml sim2real: enabled: True fail_quietely: False verbose: False ip: <IP_OF_YOUR_DOFBOT> port: 65432 ``` Now you can control the real-world Dofbot in real-time by the following command: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher test=True num_envs=1 checkpoint=./runs/DofbotReacher/nn/DofbotReacher.pth ``` ## Demo We provide an interactable demo based on the `DofbotReacher` RL example. In this demo, you can click on any of the Dofbot in the scene to manually control the robot with your keyboard as follows: - `Q`/`A`: Control Joint 0. - `W`/`S`: Control Joint 1. - `E`/`D`: Control Joint 2. - `R`/`F`: Control Joint 3. - `T`/`G`: Control Joint 4. - `Y`/`H`: Control Joint 5. - `ESC`: Unselect a selected Dofbot and yields manual control Launch this demo with the following command. Note that this demo limits the maximum number of Dofbot in the scene to 128. ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_demo.py task=DofbotReacher num_envs=64 ``` ## Running in Docker If you have a [NVIDIA Enterprise subscription](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/planning.html), you can run all services with Docker Compose. For users without a subscription, you can pull the [Isaac Docker image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim), but should still install Omniverse Nucleus beforehand. (only Isaac itself is dockerized) Follow [this tutorial](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_container.html#isaac-sim-setup-remote-headless-container) to generate your NGC API Key. Please note that you should clone this repositories in your home directory and generate instanceable assets beforehand as mentioned in the [Installation](#installation) section. We will now set up the docker environment. 1. Build the docker image ```sh docker pull nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1 docker build . -t j3soon/isaac-sim ``` 2. Launch an Isaac Container in Headless mode: ```sh scripts/run_docker_headless.sh ./runheadless.native.sh ``` Alternatively, launch an Isaac Container with GUI (The host machine should include a desktop environment): ```sh scripts/run_docker.sh ./runapp.sh ``` 3. Install this repository ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher pip install -e . ``` 4. Run any command in the docker container > Make sure to add `headless=True` if the container is launched in headless mode. For an example, running the training script: ```sh cd ~/OmniIsaacGymEnvs-DofbotReacher python omniisaacgymenvs/scripts/rlgames_train.py task=DofbotReacher headless=True num_envs=2048 ``` You can watch the training progress with: ```sh docker exec -it isaac-sim /bin/bash cd ~/OmniIsaacGymEnvs-DofbotReacher tensorboard --logdir=./runs ``` ## Acknowledgement This project has been made possible through the support of [ElsaLab][elsalab] and [NVIDIA AI Technology Center (NVAITC)][nvaitc]. For a complete list of contributors to the code of this repository, please visit the [contributor list](https://github.com/j3soon/OmniIsaacGymEnvs-DofbotReacher/graphs/contributors). [![](docs/media/logos/elsalab.png)][elsalab] [![](docs/media/logos/nvaitc.png)][nvaitc] [elsalab]: https://github.com/elsa-lab [nvaitc]: https://github.com/NVAITC Disclaimer: this is not an official NVIDIA product. > **Note**: below are the original README of [OmniIsaacGymEnvs](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs). # Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim ## About this repository This repository contains Reinforcement Learning examples that can be run with the latest release of [Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html). RL examples are trained using PPO from [rl_games](https://github.com/Denys88/rl_games) library and examples are built on top of Isaac Sim's `omni.isaac.core` and `omni.isaac.gym` frameworks. Please see [release notes](docs/release_notes.md) for the latest updates. <img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/>&emsp;<img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="614" height="307"/> ## Installation Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release. *Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2023.1.0, to ensure examples work as expected.* Note that the 2022.2.1 OmniIsaacGymEnvs release will no longer work with the latest Isaac Sim 2023.1.0 release. Due to a change in USD APIs, line 138 in rl_task.py is no longer valid. To run the previous OIGE release with the latest Isaac Sim release, please comment out lines 137 and 138 in rl_task.py or set `add_distant_light` to `False` in the task config file. No changes are required if running with the latest release of OmniIsaacGymEnvs. Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim. To install `omniisaacgymenvs`, first clone this repository: ```bash git clone https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs.git ``` Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`. To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path. ``` For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $* For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh ``` Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`: ```bash PYTHON_PATH -m pip install -e . ``` The following error may appear during the initial installation. This error is harmless and can be ignored. ``` ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. ``` ### Running the examples *Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.* To train your first policy, run: ```bash PYTHON_PATH scripts/rlgames_train.py task=Cartpole ``` An Isaac Sim app window should be launched. Once Isaac Sim initialization completes, the Cartpole scene will be constructed and simulation will start running automatically. The process will terminate once training finishes. Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar. To achieve maximum performance, launch training in `headless` mode as follows: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True ``` #### A Note on the Startup Time of the Simulation Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually be optimized in future releases. ### Extension Workflow The extension workflow provides a simple user interface for creating and launching RL tasks. To launch Isaac Sim for the extension workflow, run: ```bash ./<isaac_sim_root>/isaac-sim.gym.sh --ext-folder </parent/directory/to/OIGE> ``` Note: `isaac_sim_root` should be located in the same directory as `python.sh`. The UI window can be activated from `Isaac Examples > RL Examples` by navigating the top menu bar. For more details on the extension workflow, please refer to the [documentation](docs/extension_workflow.md). ### Loading trained models // Checkpoints Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME` defaults to the task name, but can also be overridden via the `experiment` argument. To load a trained checkpoint and continue training, use the `checkpoint` argument: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth ``` To load a trained checkpoint and only perform inference (no training), pass `test=True` as an argument, along with the checkpoint name. To avoid rendering overhead, you may also want to run with fewer environments using `num_envs=64`: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64 ``` Note that if there are special characters such as `[` or `=` in the checkpoint names, you will need to escape them and put quotes around the string. For example, `checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"` We provide pre-trained checkpoints on the [Nucleus](https://docs.omniverse.nvidia.com/nucleus/latest/index.html) server under `Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints`. Run the following command to launch inference with pre-trained checkpoint: Localhost (To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html)): ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64 ``` Production server: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64 ``` When running with a pre-trained checkpoint for the first time, we will automatically download the checkpoint file to `omniisaacgymenvs/checkpoints`. For subsequent runs, we will re-use the file that has already been downloaded, and will not overwrite existing checkpoints with the same name in the `checkpoints` folder. ## Runing from Docker Latest Isaac Sim Docker image can be found on [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim). A utility script is provided at `docker/run_docker.sh` to help initialize this repository and launch the Isaac Sim docker container. The script can be run with: ```bash ./docker/run_docker.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py headless=True task=Ant ``` To run the Isaac Sim docker with UI, use the following script: ```bash ./docker/run_docker_viewer.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py task=Ant ``` To avoid re-installing OIGE each time a container is launched, we also provide a dockerfile that can be used to build an image with OIGE installed. To build the image, run: ```bash docker build -t isaac-sim-oige -f docker/dockerfile . ``` Then, start a container with the built image: ```bash ./docker/run_dockerfile.sh ``` Then, training can be launched from the container with: ```bash /isaac-sim/python.sh scripts/rlgames_train.py task=Ant headless=True ``` ## Livestream OmniIsaacGymEnvs supports livestream through the [Omniverse Streaming Client](https://docs.omniverse.nvidia.com/app_streaming-client/app_streaming-client/overview.html). To enable this feature, add the commandline argument `enable_livestream=True`: ```bash PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True enable_livestream=True ``` Connect from the Omniverse Streaming Client once the SimulationApp has been created. Note that enabling livestream is equivalent to training with the viewer enabled, thus the speed of training/inferencing will decrease compared to running in headless mode. ## Training Scripts All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`. To test out a task without RL in the loop, run the random policy script with: ```bash PYTHON_PATH scripts/random_policy.py task=Cartpole ``` This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated. To run a simple form of PPO from `rl_games`, use the single-threaded training script: ```bash PYTHON_PATH scripts/rlgames_train.py task=Cartpole ``` This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI. ### Configuration and command line arguments We use [Hydra](https://hydra.cc/docs/intro/) to manage the config. Common arguments for the training scripts are: * `task=TASK` - Selects which task to use. Any of `AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `CartpoleCamera`, `Crazyflie`, `FactoryTaskNutBoltPick`, `FactoryTaskNutBoltPlace`, `FactoryTaskNutBoltScrew`, `FrankaCabinet`, `FrankaDeformable`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM` (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task`) * `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`). * `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config). * `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config * `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step. * `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU. * `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU. * `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax. * `multi_gpu=MULTI_GPU` - Whether to train using multiple GPUs. Defaults to `False`. Note that this option is only available with `rlgames_train.py`. * `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training. * `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing. * `headless=HEADLESS` - Whether to run in headless mode. * `enable_livestream=ENABLE_LIVESTREAM` - Whether to enable Omniverse streaming. * `experiment=EXPERIMENT` - Sets the name of the experiment. * `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments. * `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation). * `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used. Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`. #### Hydra Notes Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file. The way that the `task` and `train` portions of the config works are through the use of config groups. You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/) The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`. In some places in the config you will find other variables referenced (for example, `num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy. This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation). ### Tensorboard Tensorboard can be launched during training via the following command: ```bash PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries ``` ## WandB support You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating. ## Training with Multiple GPUs To train with multiple GPUs, use the following command, where `--proc_per_node` represents the number of available GPUs: ```bash PYTHON_PATH -m torch.distributed.run --nnodes=1 --nproc_per_node=2 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` ## Multi-Node Training To train across multiple nodes/machines, it is required to launch an individual process on each node. For the master node, use the following command, where `--proc_per_node` represents the number of available GPUs, and `--nnodes` represents the number of nodes: ```bash PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` Note that the port (`5555`) can be replaced with any other available port. For non-master nodes, use the following command, replacing `--node_rank` with the index of each machine: ```bash PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True ``` For more details on multi-node training with PyTorch, please visit [here](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance. ## Tasks Source code for tasks can be found in `omniisaacgymenvs/tasks`. Each task follows the frameworks provided in `omni.isaac.core` and `omni.isaac.gym` in Isaac Sim. Refer to [docs/framework.md](docs/framework.md) for how to create your own tasks. Full details on each of the tasks available can be found in the [RL examples documentation](docs/rl_examples.md). ## Demo We provide an interactable demo based on the `AnymalTerrain` RL example. In this demo, you can click on any of the ANYmals in the scene to go into third-person mode and manually control the robot with your keyboard as follows: - `Up Arrow`: Forward linear velocity command - `Down Arrow`: Backward linear velocity command - `Left Arrow`: Leftward linear velocity command - `Right Arrow`: Rightward linear velocity command - `Z`: Counterclockwise yaw angular velocity command - `X`: Clockwise yaw angular velocity command - `C`: Toggles camera view between third-person and scene view while maintaining manual control - `ESC`: Unselect a selected ANYmal and yields manual control Launch this demo with the following command. Note that this demo limits the maximum number of ANYmals in the scene to 128. ``` PYTHON_PATH scripts/rlgames_demo.py task=AnymalTerrain num_envs=64 checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth ``` <img src="https://user-images.githubusercontent.com/34286328/184688654-6e7899b2-5847-4184-8944-2a96b129b1ff.gif" width="600" height="300"/>
32,868
Markdown
50.843849
473
0.771967
j3soon/OmniIsaacGymEnvs-DofbotReacher/config/extension.toml
[gym] reloadable = true [package] version = "0.0.0" category = "Simulation" title = "Isaac Gym Envs" description = "RL environments" authors = ["Isaac Sim Team"] repository = "https://gitlab-master.nvidia.com/carbon-gym/omniisaacgymenvs" keywords = ["isaac"] changelog = "docs/CHANGELOG.md" readme = "docs/README.md" icon = "data/icon.png" writeTarget.kit = true [dependencies] "omni.isaac.gym" = {} "omni.isaac.core" = {} "omni.isaac.cloner" = {} "omni.isaac.ml_archive" = {} # torch [[python.module]] name = "omniisaacgymenvs"
532
TOML
20.319999
75
0.693609
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/extension.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import asyncio import inspect import os import traceback import weakref from abc import abstractmethod import hydra import omni.ext import omni.timeline import omni.ui as ui import omni.usd from hydra import compose, initialize from omegaconf import OmegaConf from omni.isaac.cloner import GridCloner from omni.isaac.core.utils.extensions import disable_extension, enable_extension from omni.isaac.core.utils.torch.maths import set_seed from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.core.world import World from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.rlgames.rlgames_train_mt import RLGTrainer, Trainer from omniisaacgymenvs.utils.task_util import import_tasks, initialize_task from omni.isaac.ui.callbacks import on_open_folder_clicked, on_open_IDE_clicked from omni.isaac.ui.menu import make_menu_item_description from omni.isaac.ui.ui_utils import ( btn_builder, dropdown_builder, get_style, int_builder, multi_btn_builder, multi_cb_builder, scrolling_frame_builder, setup_ui_headers, str_builder, ) from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items from omni.kit.viewport.utility import get_active_viewport, get_viewport_from_window_name from omni.kit.viewport.utility.camera_state import ViewportCameraState from pxr import Gf ext_instance = None class RLExtension(omni.ext.IExt): def on_startup(self, ext_id: str): self._render_modes = ["Full render", "UI only", "None"] self._env = None self._task = None self._ext_id = ext_id ext_manager = omni.kit.app.get_app().get_extension_manager() extension_path = ext_manager.get_extension_path(ext_id) self._ext_path = os.path.dirname(extension_path) if os.path.isfile(extension_path) else extension_path self._ext_file_path = os.path.abspath(__file__) self._initialize_task_list() self.start_extension( "", "", "RL Examples", "RL Examples", "", "A set of reinforcement learning examples.", self._ext_file_path, ) self._task_initialized = False self._task_changed = False self._is_training = False self._render = True self._resume = False self._test = False self._evaluate = False self._checkpoint_path = "" self._timeline = omni.timeline.get_timeline_interface() self._viewport = get_active_viewport() self._viewport.updates_enabled = True global ext_instance ext_instance = self def _initialize_task_list(self): self._task_map, _ = import_tasks() self._task_list = list(self._task_map.keys()) self._task_list.sort() self._task_list.remove("CartpoleCamera") # we cannot run camera-based training from extension workflow for now. it requires a specialized app file. self._task_name = self._task_list[0] self._parse_config(self._task_name) self._update_task_file_paths(self._task_name) def _update_task_file_paths(self, task): self._task_file_path = os.path.abspath(inspect.getfile(self._task_map[task])) self._task_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/task/{task}.yaml") self._train_cfg_file_path = os.path.join(os.path.dirname(self._ext_file_path), f"cfg/train/{task}PPO.yaml") def _parse_config(self, task, num_envs=None, overrides=None): hydra.core.global_hydra.GlobalHydra.instance().clear() initialize(version_base=None, config_path="cfg") overrides_list = [f"task={task}"] if overrides is not None: overrides_list += overrides if num_envs is None: self._cfg = compose(config_name="config", overrides=overrides_list) else: self._cfg = compose(config_name="config", overrides=overrides_list + [f"num_envs={num_envs}"]) self._cfg_dict = omegaconf_to_dict(self._cfg) self._sim_config = SimConfig(self._cfg_dict) def start_extension( self, menu_name: str, submenu_name: str, name: str, title: str, doc_link: str, overview: str, file_path: str, number_of_extra_frames=1, window_width=550, keep_window_open=False, ): window = ui.Workspace.get_window("Property") if window: window.visible = False window = ui.Workspace.get_window("Render Settings") if window: window.visible = False menu_items = [make_menu_item_description(self._ext_id, name, lambda a=weakref.proxy(self): a._menu_callback())] if menu_name == "" or menu_name is None: self._menu_items = menu_items elif submenu_name == "" or submenu_name is None: self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)] else: self._menu_items = [ MenuItemDescription( name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)] ) ] add_menu_items(self._menu_items, "Isaac Examples") self._task_dropdown = None self._cbs = None self._build_ui( name=name, title=title, doc_link=doc_link, overview=overview, file_path=file_path, number_of_extra_frames=number_of_extra_frames, window_width=window_width, keep_window_open=keep_window_open, ) return def _build_ui( self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width, keep_window_open ): self._window = omni.ui.Window( name, width=window_width, height=0, visible=keep_window_open, dockPreference=ui.DockPreference.LEFT_BOTTOM ) with self._window.frame: self._main_stack = ui.VStack(spacing=5, height=0) with self._main_stack: setup_ui_headers(self._ext_id, file_path, title, doc_link, overview) self._controls_frame = ui.CollapsableFrame( title="World Controls", width=ui.Fraction(1), height=0, collapsed=False, style=get_style(), horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with self._controls_frame: with ui.VStack(style=get_style(), spacing=5, height=0): with ui.HStack(style=get_style()): with ui.VStack(style=get_style(), width=ui.Fraction(20)): dict = { "label": "Select Task", "type": "dropdown", "default_val": 0, "items": self._task_list, "tooltip": "Select a task", "on_clicked_fn": self._on_task_select, } self._task_dropdown = dropdown_builder(**dict) with ui.Frame(tooltip="Open Source Code"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) with ui.Frame(tooltip="Open Task Config"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._task_cfg_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) with ui.Frame(tooltip="Open Training Config"): ui.Button( name="IconButton", width=20, height=20, clicked_fn=lambda: on_open_IDE_clicked(self._ext_path, self._train_cfg_file_path), style=get_style()["IconButton.Image::OpenConfig"], alignment=ui.Alignment.LEFT_CENTER, tooltip="Open in IDE", ) dict = { "label": "Number of environments", "tooltip": "Enter the number of environments to construct", "min": 0, "max": 8192, "default_val": self._cfg.task.env.numEnvs, } self._num_envs_int = int_builder(**dict) dict = { "label": "Load Environment", "type": "button", "text": "Load", "tooltip": "Load Environment and Task", "on_clicked_fn": self._on_load_world, } self._load_env_button = btn_builder(**dict) dict = { "label": "Rendering Mode", "type": "dropdown", "default_val": 0, "items": self._render_modes, "tooltip": "Select a rendering mode", "on_clicked_fn": self._on_render_mode_select, } self._render_dropdown = dropdown_builder(**dict) dict = { "label": "Configure Training", "count": 3, "text": ["Resume from Checkpoint", "Test", "Evaluate"], "default_val": [False, False, False], "tooltip": [ "", "Resume training from checkpoint", "Play a trained policy", "Evaluate a policy during training", ], "on_clicked_fn": [ self._on_resume_cb_update, self._on_test_cb_update, self._on_evaluate_cb_update, ], } self._cbs = multi_cb_builder(**dict) dict = { "label": "Load Checkpoint", "tooltip": "Enter path to checkpoint file", "on_clicked_fn": self._on_checkpoint_update, } self._checkpoint_str = str_builder(**dict) dict = { "label": "Train/Test", "count": 2, "text": ["Start", "Stop"], "tooltip": [ "", "Launch new training/inference run", "Terminate current training/inference run", ], "on_clicked_fn": [self._on_train, self._on_train_stop], } self._buttons = multi_btn_builder(**dict) return def create_task(self): headless = self._cfg.headless enable_viewport = "enable_cameras" in self._cfg.task.sim and self._cfg.task.sim.enable_cameras self._env = VecEnvRLGamesMT( headless=headless, sim_device=self._cfg.device_id, enable_livestream=self._cfg.enable_livestream, enable_viewport=enable_viewport, launch_simulation_app=False, ) self._task = initialize_task(self._cfg_dict, self._env, init_sim=False) self._task_initialized = True def _on_task_select(self, value): if self._task_initialized and value != self._task_name: self._task_changed = True self._task_initialized = False self._task_name = value self._parse_config(self._task_name) self._num_envs_int.set_value(self._cfg.task.env.numEnvs) self._update_task_file_paths(self._task_name) def _on_render_mode_select(self, value): if value == self._render_modes[0]: self._viewport.updates_enabled = True window = ui.Workspace.get_window("Viewport") window.visible = True if self._env: self._env._update_viewport = True self._env._render_mode = 0 elif value == self._render_modes[1]: self._viewport.updates_enabled = False window = ui.Workspace.get_window("Viewport") window.visible = False if self._env: self._env._update_viewport = False self._env._render_mode = 1 elif value == self._render_modes[2]: self._viewport.updates_enabled = False window = ui.Workspace.get_window("Viewport") window.visible = False if self._env: self._env._update_viewport = False self._env._render_mode = 2 def _on_render_cb_update(self, value): self._render = value print("updates enabled", value) self._viewport.updates_enabled = value if self._env: self._env._update_viewport = value if value: window = ui.Workspace.get_window("Viewport") window.visible = True else: window = ui.Workspace.get_window("Viewport") window.visible = False def _on_single_env_cb_update(self, value): visibility = "invisible" if value else "inherited" stage = omni.usd.get_context().get_stage() env_root = stage.GetPrimAtPath("/World/envs") if env_root.IsValid(): for i, p in enumerate(env_root.GetChildren()): p.GetAttribute("visibility").Set(visibility) if value: stage.GetPrimAtPath("/World/envs/env_0").GetAttribute("visibility").Set("inherited") env_pos = self._task._env_pos[0].cpu().numpy().tolist() camera_pos = [env_pos[0] + 10, env_pos[1] + 10, 3] camera_target = [env_pos[0], env_pos[1], env_pos[2]] else: camera_pos = [10, 10, 3] camera_target = [0, 0, 0] camera_state = ViewportCameraState("/OmniverseKit_Persp", get_active_viewport()) camera_state.set_position_world(Gf.Vec3d(*camera_pos), True) camera_state.set_target_world(Gf.Vec3d(*camera_target), True) def _on_test_cb_update(self, value): self._test = value if value is True and self._checkpoint_path.strip() == "": self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth") def _on_resume_cb_update(self, value): self._resume = value if value is True and self._checkpoint_path.strip() == "": self._checkpoint_str.set_value(f"runs/{self._task_name}/nn/{self._task_name}.pth") def _on_evaluate_cb_update(self, value): self._evaluate = value def _on_checkpoint_update(self, value): self._checkpoint_path = value.get_value_as_string() async def _on_load_world_async(self, use_existing_stage): # initialize task if not initialized if not self._task_initialized or not omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid(): self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int()) self.create_task() else: # update config self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int()) self._task.update_config(self._sim_config) # clear scene # self._env._world.scene.clear() self._env._world._sim_params = self._sim_config.get_physics_params() await self._env._world.initialize_simulation_context_async() set_camera_view(eye=[10, 10, 3], target=[0, 0, 0], camera_prim_path="/OmniverseKit_Persp") if not use_existing_stage: # clear scene self._env._world.scene.clear() # clear environments added to world omni.usd.get_context().get_stage().RemovePrim("/World/collisions") omni.usd.get_context().get_stage().RemovePrim("/World/envs") # create scene await self._env._world.reset_async_set_up_scene() # update num_envs in envs self._env.update_task_params() else: self._task.initialize_views(self._env._world.scene) def _on_load_world(self): # stop simulation before updating stage self._timeline.stop() asyncio.ensure_future(self._on_load_world_async(use_existing_stage=False)) def _on_train_stop(self): if self._task_initialized: asyncio.ensure_future(self._env._world.stop_async()) async def _on_train_async(self, overrides=None): try: # initialize task if not initialized print("task initialized:", self._task_initialized) if not self._task_initialized: # if this is the first launch of the extension, we do not want to re-create stage if stage already exists use_existing_stage = False if omni.usd.get_context().get_stage().GetPrimAtPath("/World/envs").IsValid(): use_existing_stage = True print(use_existing_stage) await self._on_load_world_async(use_existing_stage) # update config self._parse_config(task=self._task_name, num_envs=self._num_envs_int.get_value_as_int(), overrides=overrides) sim_config = SimConfig(self._cfg_dict) self._task.update_config(sim_config) cfg_dict = omegaconf_to_dict(self._cfg) # sets seed. if seed is -1 will pick a random one self._cfg.seed = set_seed(self._cfg.seed, torch_deterministic=self._cfg.torch_deterministic) cfg_dict["seed"] = self._cfg.seed self._checkpoint_path = self._checkpoint_str.get_value_as_string() if self._resume or self._test: self._cfg.checkpoint = self._checkpoint_path self._cfg.test = self._test self._cfg.evaluation = self._evaluate cfg_dict["checkpoint"] = self._cfg.checkpoint cfg_dict["test"] = self._cfg.test cfg_dict["evaluation"] = self._cfg.evaluation rlg_trainer = RLGTrainer(self._cfg, cfg_dict) if not rlg_trainer._bad_checkpoint: trainer = Trainer(rlg_trainer, self._env) await self._env._world.reset_async_no_set_up_scene() self._env._render_mode = self._render_dropdown.get_item_value_model().as_int await self._env.run(trainer) await omni.kit.app.get_app().next_update_async() except Exception as e: print(traceback.format_exc()) finally: self._is_training = False def _on_train(self): # stop simulation if still running self._timeline.stop() self._on_render_mode_select(self._render_modes[self._render_dropdown.get_item_value_model().as_int]) if not self._is_training: self._is_training = True asyncio.ensure_future(self._on_train_async()) return def _menu_callback(self): self._window.visible = not self._window.visible return def _on_window(self, status): return def on_shutdown(self): self._extra_frames = [] if self._menu_items is not None: self._sample_window_cleanup() self.shutdown_cleanup() global ext_instance ext_instance = None return def shutdown_cleanup(self): return def _sample_window_cleanup(self): remove_menu_items(self._menu_items, "Isaac Examples") self._window = None self._menu_items = None self._buttons = None self._load_env_button = None self._task_dropdown = None self._cbs = None self._checkpoint_str = None return def get_instance(): return ext_instance
22,236
Python
42.262646
155
0.533189
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/envs/vec_env_rlgames_mt.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch from omni.isaac.gym.vec_env import TaskStopException, VecEnvMT from .vec_env_rlgames import VecEnvRLGames # VecEnv Wrapper for RL training class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT): def _parse_data(self, data): self._obs = data["obs"] self._rew = data["rew"].to(self._task.rl_device) self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._resets = data["reset"].to(self._task.rl_device) self._extras = data["extras"] def step(self, actions): if self._stop: raise TaskStopException() if self._task.randomize_actions: actions = self._task._dr_randomizer.apply_actions_randomization( actions=actions, reset_buf=self._task.reset_buf ) actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device) self.send_actions(actions) data = self.get_data() if self._task.randomize_observations: self._obs = self._task._dr_randomizer.apply_observations_randomization( observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf ) self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) obs_dict = {} obs_dict["obs"] = self._obs obs_dict["states"] = self._states return obs_dict, self._rew, self._resets, self._extras
3,109
Python
42.194444
118
0.705693
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/envs/vec_env_rlgames.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from datetime import datetime import numpy as np import torch from omni.isaac.gym.vec_env import VecEnvBase # VecEnv Wrapper for RL training class VecEnvRLGames(VecEnvBase): def _process_data(self): self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._rew = self._rew.to(self._task.rl_device) self._states = torch.clamp(self._states, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device) self._resets = self._resets.to(self._task.rl_device) self._extras = self._extras def set_task(self, task, backend="numpy", sim_params=None, init_sim=True, rendering_dt=1.0 / 60.0) -> None: super().set_task(task, backend, sim_params, init_sim, rendering_dt) self.num_states = self._task.num_states self.state_space = self._task.state_space def step(self, actions): if self._task.randomize_actions: actions = self._task._dr_randomizer.apply_actions_randomization( actions=actions, reset_buf=self._task.reset_buf ) actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device) self._task.pre_physics_step(actions) if (self.sim_frame_count + self._task.control_frequency_inv) % self._task.rendering_interval == 0: for _ in range(self._task.control_frequency_inv - 1): self._world.step(render=False) self.sim_frame_count += 1 self._world.step(render=self._render) self.sim_frame_count += 1 else: for _ in range(self._task.control_frequency_inv): self._world.step(render=False) self.sim_frame_count += 1 self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step() if self._task.randomize_observations: self._obs = self._task._dr_randomizer.apply_observations_randomization( observations=self._obs.to(device=self._task.rl_device), reset_buf=self._task.reset_buf ) self._states = self._task.get_states() self._process_data() obs_dict = {"obs": self._obs, "states": self._states} return obs_dict, self._rew, self._resets, self._extras def reset(self, seed=None, options=None): """Resets the task and applies default zero actions to recompute observations and states.""" now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print(f"[{now}] Running RL reset") self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.rl_device) obs_dict, _, _, _ = self.step(actions) return obs_dict
4,328
Python
43.628866
116
0.677218
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/allegro_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.allegro_hand import AllegroHand from omniisaacgymenvs.robots.articulations.views.allegro_hand_view import AllegroHandView from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask class AllegroHandTask(InHandManipulationTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) InHandManipulationTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.object_type = self._task_cfg["env"]["objectType"] assert self.object_type in ["block"] self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full"]): raise Exception("Unknown type of observations!\nobservationType should be one of: [full_no_vel, full]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full_no_vel": 50, "full": 72, } self.object_scale = torch.tensor([1.0, 1.0, 1.0]) self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 16 self._num_states = 0 InHandManipulationTask.update_config(self) def get_starting_positions(self): self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device) self.hand_start_orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782], device=self.device) self.pose_dy, self.pose_dz = -0.2, 0.06 def get_hand(self): allegro_hand = AllegroHand( prim_path=self.default_zero_env_path + "/allegro_hand", name="allegro_hand", translation=self.hand_start_translation, orientation=self.hand_start_orientation, ) self._sim_config.apply_articulation_settings( "allegro_hand", get_prim_at_path(allegro_hand.prim_path), self._sim_config.parse_actor_config("allegro_hand"), ) allegro_hand_prim = self._stage.GetPrimAtPath(allegro_hand.prim_path) allegro_hand.set_allegro_hand_properties(stage=self._stage, allegro_hand_prim=allegro_hand_prim) allegro_hand.set_motor_control_mode( stage=self._stage, allegro_hand_path=self.default_zero_env_path + "/allegro_hand" ) def get_hand_view(self, scene): return AllegroHandView(prim_paths_expr="/World/envs/.*/allegro_hand", name="allegro_hand_view") def get_observations(self): self.get_object_goal_observations() self.hand_dof_pos = self._hands.get_joint_positions(clone=False) self.hand_dof_vel = self._hands.get_joint_velocities(clone=False) if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() else: print("Unkown observations type!") observations = {self._hands.name: {"obs_buf": self.obs_buf}} return observations def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, 16:19] = self.object_pos self.obs_buf[:, 19:23] = self.object_rot self.obs_buf[:, 23:26] = self.goal_pos self.obs_buf[:, 26:30] = self.goal_rot self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 34:50] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 32:35] = self.object_pos self.obs_buf[:, 35:39] = self.object_rot self.obs_buf[:, 39:42] = self.object_linvel self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 45:48] = self.goal_pos self.obs_buf[:, 48:52] = self.goal_rot self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 56:72] = self.actions
6,329
Python
42.655172
115
0.658872
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ball_balance.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.maths import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.balance_bot import BalanceBot from pxr import PhysxSchema class BallBalanceTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 12 + 12 self._num_actions = 3 self.anchored = False RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._dt = self._task_cfg["sim"]["dt"] self._table_position = torch.tensor([0, 0, 0.56]) self._ball_position = torch.tensor([0.0, 0.0, 1.0]) self._ball_radius = 0.1 self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] def set_up_scene(self, scene) -> None: self.get_balance_table() self.add_ball() super().set_up_scene(scene, replicate_physics=False) self.set_up_table_anchors() self._balance_bots = ArticulationView( prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False ) scene.add(self._balance_bots) self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False ) scene.add(self._balls) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("balance_bot_view"): scene.remove_object("balance_bot_view", registry_only=True) if scene.object_exists("ball_view"): scene.remove_object("ball_view", registry_only=True) self._balance_bots = ArticulationView( prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False ) scene.add(self._balance_bots) self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False ) scene.add(self._balls) def get_balance_table(self): balance_table = BalanceBot( prim_path=self.default_zero_env_path + "/BalanceBot", name="BalanceBot", translation=self._table_position ) self._sim_config.apply_articulation_settings( "table", get_prim_at_path(balance_table.prim_path), self._sim_config.parse_actor_config("table") ) def add_ball(self): ball = DynamicSphere( prim_path=self.default_zero_env_path + "/Ball/ball", translation=self._ball_position, name="ball_0", radius=self._ball_radius, color=torch.tensor([0.9, 0.6, 0.2]), ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) def set_up_table_anchors(self): from pxr import Gf height = 0.08 stage = get_current_stage() for i in range(self._num_envs): base_path = f"{self.default_base_env_path}/env_{i}/BalanceBot" for j, leg_offset in enumerate([(0.4, 0, height), (-0.2, 0.34641, 0), (-0.2, -0.34641, 0)]): # fix the legs to ground leg_path = f"{base_path}/lower_leg{j}" ground_joint_path = leg_path + "_ground" env_pos = stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}").GetAttribute("xformOp:translate").Get() anchor_pos = env_pos + Gf.Vec3d(*leg_offset) self.fix_to_ground(stage, ground_joint_path, leg_path, anchor_pos) def fix_to_ground(self, stage, joint_path, prim_path, anchor_pos): from pxr import UsdPhysics, Gf # D6 fixed joint d6FixedJoint = UsdPhysics.Joint.Define(stage, joint_path) d6FixedJoint.CreateBody0Rel().SetTargets(["/World/defaultGroundPlane"]) d6FixedJoint.CreateBody1Rel().SetTargets([prim_path]) d6FixedJoint.CreateLocalPos0Attr().Set(anchor_pos) d6FixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0))) d6FixedJoint.CreateLocalPos1Attr().Set(Gf.Vec3f(0, 0, 0.18)) d6FixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0))) # lock all DOF (lock - low is greater than high) d6Prim = stage.GetPrimAtPath(joint_path) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) def get_observations(self) -> dict: ball_positions, ball_orientations = self._balls.get_world_poses(clone=False) ball_positions = ball_positions[:, 0:3] - self._env_pos ball_velocities = self._balls.get_velocities(clone=False) ball_linvels = ball_velocities[:, 0:3] ball_angvels = ball_velocities[:, 3:6] dof_pos = self._balance_bots.get_joint_positions(clone=False) dof_vel = self._balance_bots.get_joint_velocities(clone=False) sensor_force_torques = self._balance_bots.get_measured_joint_forces(joint_indices=self._sensor_indices) # (num_envs, num_sensors, 6) self.obs_buf[..., 0:3] = dof_pos[..., self.actuated_dof_indices] self.obs_buf[..., 3:6] = dof_vel[..., self.actuated_dof_indices] self.obs_buf[..., 6:9] = ball_positions self.obs_buf[..., 9:12] = ball_linvels self.obs_buf[..., 12:15] = sensor_force_torques[..., 0] / 20.0 self.obs_buf[..., 15:18] = sensor_force_torques[..., 3] / 20.0 self.obs_buf[..., 18:21] = sensor_force_torques[..., 4] / 20.0 self.obs_buf[..., 21:24] = sensor_force_torques[..., 5] / 20.0 self.ball_positions = ball_positions self.ball_linvels = ball_linvels observations = {"ball_balance": {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) # update position targets from actions self.dof_position_targets[..., self.actuated_dof_indices] += ( self._dt * self._action_speed_scale * actions.to(self.device) ) self.dof_position_targets[:] = tensor_clamp( self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits ) # reset position targets for reset envs self.dof_position_targets[reset_env_ids] = 0 self._balance_bots.set_joint_position_targets(self.dof_position_targets) # .clone()) def reset_idx(self, env_ids): num_resets = len(env_ids) env_ids_32 = env_ids.type(torch.int32) env_ids_64 = env_ids.type(torch.int64) min_d = 0.001 # min horizontal dist from origin max_d = 0.4 # max horizontal dist from origin min_height = 1.0 max_height = 2.0 min_horizontal_speed = 0 max_horizontal_speed = 2 dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device) dirs = torch_random_dir_2((num_resets, 1), self._device) hpos = dists * dirs speedscales = (dists - min_d) / (max_d - min_d) hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device) hvels = -speedscales * hspeeds * dirs vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze() ball_pos = self.initial_ball_pos.clone() ball_rot = self.initial_ball_rot.clone() # position ball_pos[env_ids_64, 0:2] += hpos[..., 0:2] ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze() # rotation ball_rot[env_ids_64, 0] = 1 ball_rot[env_ids_64, 1:] = 0 ball_velocities = self.initial_ball_velocities.clone() # linear ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2] ball_velocities[env_ids_64, 2] = vspeeds # angular ball_velocities[env_ids_64, 3:6] = 0 # reset root state for bbots and balls in selected envs self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32) self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32) # reset root pose and velocity self._balance_bots.set_world_poses( self.initial_bot_pos[env_ids_64].clone(), self.initial_bot_rot[env_ids_64].clone(), indices=env_ids_32 ) self._balance_bots.set_velocities(self.initial_bot_velocities[env_ids_64].clone(), indices=env_ids_32) # reset DOF states for bbots in selected envs self._balance_bots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): dof_limits = self._balance_bots.get_dof_limits() self.bbot_dof_lower_limits, self.bbot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device)) self.initial_dof_positions = self._balance_bots.get_joint_positions() self.initial_bot_pos, self.initial_bot_rot = self._balance_bots.get_world_poses() # self.initial_bot_pos[..., 2] = 0.559 # tray_height self.initial_bot_velocities = self._balance_bots.get_velocities() self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses() self.initial_ball_velocities = self._balls.get_velocities() self.dof_position_targets = torch.zeros( (self.num_envs, self._balance_bots.num_dof), dtype=torch.float32, device=self._device, requires_grad=False ) actuated_joints = ["lower_leg0", "lower_leg1", "lower_leg2"] self.actuated_dof_indices = torch.tensor( [self._balance_bots._dof_indices[j] for j in actuated_joints], device=self._device, dtype=torch.long ) force_links = ["upper_leg0", "upper_leg1", "upper_leg2"] self._sensor_indices = torch.tensor( [self._balance_bots._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) def calculate_metrics(self) -> None: ball_dist = torch.sqrt( self.ball_positions[..., 0] * self.ball_positions[..., 0] + (self.ball_positions[..., 2] - 0.7) * (self.ball_positions[..., 2] - 0.7) + (self.ball_positions[..., 1]) * self.ball_positions[..., 1] ) ball_speed = torch.sqrt( self.ball_linvels[..., 0] * self.ball_linvels[..., 0] + self.ball_linvels[..., 1] * self.ball_linvels[..., 1] + self.ball_linvels[..., 2] * self.ball_linvels[..., 2] ) pos_reward = 1.0 / (1.0 + ball_dist) speed_reward = 1.0 / (1.0 + ball_speed) self.rew_buf[:] = pos_reward * speed_reward def is_done(self) -> None: reset = torch.where( self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf ) reset = torch.where( self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset ) self.reset_buf[:] = reset
13,958
Python
44.174757
140
0.630391
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole_camera.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from gym import spaces import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.tasks.cartpole import CartpoleTask from omniisaacgymenvs.robots.articulations.cartpole import Cartpole class CartpoleCameraTask(CartpoleTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._max_episode_length = 500 self._num_observations = 4 self._num_actions = 1 # use multi-dimensional observation for camera RGB self.observation_space = spaces.Box( np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * -np.Inf, np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * np.Inf) RLTask.__init__(self, name, env) def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0]) self._reset_dist = self._task_cfg["env"]["resetDist"] self._max_push_effort = self._task_cfg["env"]["maxEffort"] self.camera_type = self._task_cfg["env"].get("cameraType", 'rgb') self.camera_width = self._task_cfg["env"]["cameraWidth"] self.camera_height = self._task_cfg["env"]["cameraHeight"] self.camera_channels = 3 self._export_images = self._task_cfg["env"]["exportImages"] def cleanup(self) -> None: # initialize remaining buffers RLTask.cleanup(self) # override observation buffer for camera data self.obs_buf = torch.zeros( (self.num_envs, self.camera_width, self.camera_height, 3), device=self.device, dtype=torch.float) def set_up_scene(self, scene) -> None: self.get_cartpole() RLTask.set_up_scene(self, scene) # start replicator to capture image data self.rep.orchestrator._orchestrator._is_started = True # set up cameras self.render_products = [] env_pos = self._env_pos.cpu() for i in range(self._num_envs): camera = self.rep.create.camera( position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55)) render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height)) self.render_products.append(render_product) # initialize pytorch writer for vectorized collection self.pytorch_listener = self.PytorchListener() self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter") self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda") self.pytorch_writer.attach(self.render_products) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) return def get_observations(self) -> dict: dof_pos = self._cartpoles.get_joint_positions(clone=False) dof_vel = self._cartpoles.get_joint_velocities(clone=False) self.cart_pos = dof_pos[:, self._cart_dof_idx] self.cart_vel = dof_vel[:, self._cart_dof_idx] self.pole_pos = dof_pos[:, self._pole_dof_idx] self.pole_vel = dof_vel[:, self._pole_dof_idx] # retrieve RGB data from all render products images = self.pytorch_listener.get_rgb_data() if images is not None: if self._export_images: from torchvision.utils import save_image, make_grid img = images/255 save_image(make_grid(img, nrows = 2), 'cartpole_export.png') self.obs_buf = torch.swapaxes(images, 1, 3).clone().float()/255.0 else: print("Image tensor is NONE!") return self.obs_buf
5,824
Python
41.518248
119
0.67342
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/anymal_terrain.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.anymal import Anymal from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView from omniisaacgymenvs.tasks.utils.anymal_terrain_generator import * from omniisaacgymenvs.utils.terrain_utils.terrain_utils import * from pxr import UsdLux, UsdPhysics class AnymalTerrainTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.height_samples = None self.custom_origins = False self.init_done = False self._env_spacing = 0.0 self._num_observations = 188 self._num_actions = 12 self.update_config(sim_config) RLTask.__init__(self, name, env) self.height_points = self.init_height_points() self.measured_heights = None # joint positions offsets self.default_dof_pos = torch.zeros( (self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False ) # reward episode sums torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = { "lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(), "orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(), "air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros(), } return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config # normalization self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"] self.height_meas_scale = self._task_cfg["env"]["learn"]["heightMeasurementScale"] self.action_scale = self._task_cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["termination"] = self._task_cfg["env"]["learn"]["terminalReward"] self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"] self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["ang_vel_xy"] = self._task_cfg["env"]["learn"]["angularVelocityXYRewardScale"] self.rew_scales["orient"] = self._task_cfg["env"]["learn"]["orientationRewardScale"] self.rew_scales["torque"] = self._task_cfg["env"]["learn"]["torqueRewardScale"] self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["base_height"] = self._task_cfg["env"]["learn"]["baseHeightRewardScale"] self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["hip"] = self._task_cfg["env"]["learn"]["hipRewardScale"] self.rew_scales["fallen_over"] = self._task_cfg["env"]["learn"]["fallenOverRewardScale"] # command ranges self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"] # base init state pos = self._task_cfg["env"]["baseInitState"]["pos"] rot = self._task_cfg["env"]["baseInitState"]["rot"] v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"] v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"] self.base_init_state = pos + rot + v_lin + v_ang # default joint positions self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"] # other self.decimation = self._task_cfg["env"]["control"]["decimation"] self.dt = self.decimation * self._task_cfg["sim"]["dt"] self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) self.push_interval = int(self._task_cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5) self.Kp = self._task_cfg["env"]["control"]["stiffness"] self.Kd = self._task_cfg["env"]["control"]["damping"] self.curriculum = self._task_cfg["env"]["terrain"]["curriculum"] self.base_threshold = 0.2 self.knee_threshold = 0.1 for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt self._num_envs = self._task_cfg["env"]["numEnvs"] self._task_cfg["sim"]["default_physics_material"]["static_friction"] = self._task_cfg["env"]["terrain"][ "staticFriction" ] self._task_cfg["sim"]["default_physics_material"]["dynamic_friction"] = self._task_cfg["env"]["terrain"][ "dynamicFriction" ] self._task_cfg["sim"]["default_physics_material"]["restitution"] = self._task_cfg["env"]["terrain"][ "restitution" ] self._task_cfg["sim"]["add_ground_plane"] = False def _get_noise_scale_vec(self, cfg): noise_vec = torch.zeros_like(self.obs_buf[0]) self.add_noise = self._task_cfg["env"]["learn"]["addNoise"] noise_level = self._task_cfg["env"]["learn"]["noiseLevel"] noise_vec[:3] = self._task_cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale noise_vec[3:6] = self._task_cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale noise_vec[6:9] = self._task_cfg["env"]["learn"]["gravityNoise"] * noise_level noise_vec[9:12] = 0.0 # commands noise_vec[12:24] = self._task_cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale noise_vec[24:36] = self._task_cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale noise_vec[36:176] = ( self._task_cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale ) noise_vec[176:188] = 0.0 # previous actions return noise_vec def init_height_points(self): # 1mx1.6m rectangle (without center line) y = 0.1 * torch.tensor( [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False ) # 10-50cm on each side x = 0.1 * torch.tensor( [-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False ) # 20-80cm on each side grid_x, grid_y = torch.meshgrid(x, y, indexing='ij') self.num_height_points = grid_x.numel() points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False) points[:, :, 0] = grid_x.flatten() points[:, :, 1] = grid_y.flatten() return points def _create_trimesh(self, create_mesh=True): self.terrain = Terrain(self._task_cfg["env"]["terrain"], num_robots=self.num_envs) vertices = self.terrain.vertices triangles = self.terrain.triangles position = torch.tensor([-self.terrain.border_size, -self.terrain.border_size, 0.0]) if create_mesh: add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position) self.height_samples = ( torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device) ) def set_up_scene(self, scene) -> None: self._stage = get_current_stage() self.get_terrain() self.get_anymal() super().set_up_scene(scene, collision_filter_global_paths=["/World/terrain"]) self._anymals = AnymalView( prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True ) scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) def initialize_views(self, scene): # initialize terrain variables even if we do not need to re-create the terrain mesh self.get_terrain(create_mesh=False) super().initialize_views(scene) if scene.object_exists("anymal_view"): scene.remove_object("anymal_view", registry_only=True) if scene.object_exists("knees_view"): scene.remove_object("knees_view", registry_only=True) if scene.object_exists("base_view"): scene.remove_object("base_view", registry_only=True) self._anymals = AnymalView( prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True ) scene.add(self._anymals) scene.add(self._anymals._knees) scene.add(self._anymals._base) def get_terrain(self, create_mesh=True): self.env_origins = torch.zeros((self.num_envs, 3), device=self.device, requires_grad=False) if not self.curriculum: self._task_cfg["env"]["terrain"]["maxInitMapLevel"] = self._task_cfg["env"]["terrain"]["numLevels"] - 1 self.terrain_levels = torch.randint( 0, self._task_cfg["env"]["terrain"]["maxInitMapLevel"] + 1, (self.num_envs,), device=self.device ) self.terrain_types = torch.randint( 0, self._task_cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device ) self._create_trimesh(create_mesh=create_mesh) self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float) def get_anymal(self): anymal_translation = torch.tensor([0.0, 0.0, 0.66]) anymal_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) anymal = Anymal( prim_path=self.default_zero_env_path + "/anymal", name="anymal", translation=anymal_translation, orientation=anymal_orientation, ) self._sim_config.apply_articulation_settings( "anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("anymal") ) anymal.set_anymal_properties(self._stage, anymal.prim) anymal.prepare_contacts(self._stage, anymal.prim) self.dof_names = anymal.dof_names for i in range(self.num_actions): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle def post_reset(self): self.base_init_state = torch.tensor( self.base_init_state, dtype=torch.float, device=self.device, requires_grad=False ) self.timeout_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) # initialize some data used later on self.up_axis_idx = 2 self.common_step_counter = 0 self.extras = {} self.noise_scale_vec = self._get_noise_scale_vec(self._task_cfg) self.commands = torch.zeros( self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False ) # x vel, y vel, yaw vel, heading self.commands_scale = torch.tensor( [self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False, ) self.gravity_vec = torch.tensor( get_axis_params(-1.0, self.up_axis_idx), dtype=torch.float, device=self.device ).repeat((self.num_envs, 1)) self.forward_vec = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float, device=self.device).repeat( (self.num_envs, 1) ) self.torques = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.actions = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.last_actions = torch.zeros( self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False ) self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) self.last_dof_vel = torch.zeros((self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.num_envs): self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]] self.num_dof = self._anymals.num_dof self.dof_pos = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device) self.dof_vel = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device) self.base_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device) self.base_quat = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device) self.base_velocities = torch.zeros((self.num_envs, 6), dtype=torch.float, device=self.device) self.knee_pos = torch.zeros((self.num_envs * 4, 3), dtype=torch.float, device=self.device) self.knee_quat = torch.zeros((self.num_envs * 4, 4), dtype=torch.float, device=self.device) indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) self.init_done = True def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities self.update_terrain_level(env_ids) self.base_pos[env_ids] = self.base_init_state[0:3] self.base_pos[env_ids, 0:3] += self.env_origins[env_ids] self.base_pos[env_ids, 0:2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device) self.base_quat[env_ids] = self.base_init_state[3:7] self.base_velocities[env_ids] = self.base_init_state[7:] self._anymals.set_world_poses( positions=self.base_pos[env_ids].clone(), orientations=self.base_quat[env_ids].clone(), indices=indices ) self._anymals.set_velocities(velocities=self.base_velocities[env_ids].clone(), indices=indices) self._anymals.set_joint_positions(positions=self.dof_pos[env_ids].clone(), indices=indices) self._anymals.set_joint_velocities(velocities=self.dof_vel[env_ids].clone(), indices=indices) self.commands[env_ids, 0] = torch_rand_float( self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids, 1] = torch_rand_float( self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids, 3] = torch_rand_float( self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device ).squeeze() self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze( 1 ) # set small commands to zero self.last_actions[env_ids] = 0.0 self.last_dof_vel[env_ids] = 0.0 self.feet_air_time[env_ids] = 0.0 self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"]["rew_" + key] = ( torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s ) self.episode_sums[key][env_ids] = 0.0 self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float()) def update_terrain_level(self, env_ids): if not self.init_done or not self.curriculum: # do not change on initial reset return root_pos, _ = self._anymals.get_world_poses(clone=False) distance = torch.norm(root_pos[env_ids, :2] - self.env_origins[env_ids, :2], dim=1) self.terrain_levels[env_ids] -= 1 * ( distance < torch.norm(self.commands[env_ids, :2]) * self.max_episode_length_s * 0.25 ) self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2) self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]] def refresh_dof_state_tensors(self): self.dof_pos = self._anymals.get_joint_positions(clone=False) self.dof_vel = self._anymals.get_joint_velocities(clone=False) def refresh_body_state_tensors(self): self.base_pos, self.base_quat = self._anymals.get_world_poses(clone=False) self.base_velocities = self._anymals.get_velocities(clone=False) self.knee_pos, self.knee_quat = self._anymals._knees.get_world_poses(clone=False) def pre_physics_step(self, actions): if not self._env._world.is_playing(): return self.actions = actions.clone().to(self.device) for i in range(self.decimation): if self._env._world.is_playing(): torques = torch.clip( self.Kp * (self.action_scale * self.actions + self.default_dof_pos - self.dof_pos) - self.Kd * self.dof_vel, -80.0, 80.0, ) self._anymals.set_joint_efforts(torques) self.torques = torques SimulationContext.step(self._env._world, render=False) self.refresh_dof_state_tensors() def post_physics_step(self): self.progress_buf[:] += 1 if self._env._world.is_playing(): self.refresh_dof_state_tensors() self.refresh_body_state_tensors() self.common_step_counter += 1 if self.common_step_counter % self.push_interval == 0: self.push_robots() # prepare quantities self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3]) self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6]) self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec) forward = quat_apply(self.base_quat, self.forward_vec) heading = torch.atan2(forward[:, 1], forward[:, 0]) self.commands[:, 2] = torch.clip(0.5 * wrap_to_pi(self.commands[:, 3] - heading), -1.0, 1.0) self.check_termination() self.get_states() self.calculate_metrics() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.get_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] return self.obs_buf, self.rew_buf, self.reset_buf, self.extras def push_robots(self): self.base_velocities[:, 0:2] = torch_rand_float( -1.0, 1.0, (self.num_envs, 2), device=self.device ) # lin vel x/y self._anymals.set_velocities(self.base_velocities) def check_termination(self): self.timeout_buf = torch.where( self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.timeout_buf), torch.zeros_like(self.timeout_buf), ) knee_contact = ( torch.norm(self._anymals._knees.get_net_contact_forces(clone=False).view(self._num_envs, 4, 3), dim=-1) > 1.0 ) self.has_fallen = (torch.norm(self._anymals._base.get_net_contact_forces(clone=False), dim=1) > 1.0) | ( torch.sum(knee_contact, dim=-1) > 1.0 ) self.reset_buf = self.has_fallen.clone() self.reset_buf = torch.where(self.timeout_buf.bool(), torch.ones_like(self.reset_buf), self.reset_buf) def calculate_metrics(self): # velocity tracking reward lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"] # other base velocity penalties rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"] rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"] # orientation penalty rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"] # base height penalty rew_base_height = torch.square(self.base_pos[:, 2] - 0.52) * self.rew_scales["base_height"] # torque penalty rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"] # joint acc penalty rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"] # fallen over penalty rew_fallen_over = self.has_fallen * self.rew_scales["fallen_over"] # action rate penalty rew_action_rate = ( torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"] ) # cosmetic penalty for hip motion rew_hip = ( torch.sum(torch.abs(self.dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["hip"] ) # total reward self.rew_buf = ( rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height + rew_torque + rew_joint_acc + rew_action_rate + rew_hip + rew_fallen_over ) self.rew_buf = torch.clip(self.rew_buf, min=0.0, max=None) # add termination reward self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf # log episode reward sums self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy self.episode_sums["ang_vel_z"] += rew_ang_vel_z self.episode_sums["lin_vel_z"] += rew_lin_vel_z self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy self.episode_sums["orient"] += rew_orient self.episode_sums["torques"] += rew_torque self.episode_sums["joint_acc"] += rew_joint_acc self.episode_sums["action_rate"] += rew_action_rate self.episode_sums["base_height"] += rew_base_height self.episode_sums["hip"] += rew_hip def get_observations(self): self.measured_heights = self.get_heights() heights = ( torch.clip(self.base_pos[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.0) * self.height_meas_scale ) self.obs_buf = torch.cat( ( self.base_lin_vel * self.lin_vel_scale, self.base_ang_vel * self.ang_vel_scale, self.projected_gravity, self.commands[:, :3] * self.commands_scale, self.dof_pos * self.dof_pos_scale, self.dof_vel * self.dof_vel_scale, heights, self.actions, ), dim=-1, ) def get_ground_heights_below_knees(self): points = self.knee_pos.reshape(self.num_envs, 4, 3) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale def get_ground_heights_below_base(self): points = self.base_pos.reshape(self.num_envs, 1, 3) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale def get_heights(self, env_ids=None): if env_ids: points = quat_apply_yaw( self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids] ) + (self.base_pos[env_ids, 0:3]).unsqueeze(1) else: points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + ( self.base_pos[:, 0:3] ).unsqueeze(1) points += self.terrain.border_size points = (points / self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0] - 2) py = torch.clip(py, 0, self.height_samples.shape[1] - 2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px + 1, py + 1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale @torch.jit.script def quat_apply_yaw(quat, vec): quat_yaw = quat.clone().view(-1, 4) quat_yaw[:, 1:3] = 0.0 quat_yaw = normalize(quat_yaw) return quat_apply(quat_yaw, vec) @torch.jit.script def wrap_to_pi(angles): angles %= 2 * np.pi angles -= 2 * np.pi * (angles > np.pi) return angles def get_axis_params(value, axis_idx, x_value=0.0, dtype=float, n_dims=3): """construct arguments to `Vec` according to axis index.""" zs = np.zeros((n_dims,)) assert axis_idx < n_dims, "the axis dim should be within the vector dimensions" zs[axis_idx] = 1.0 params = np.where(zs == 1.0, value, zs) params[0] = x_value return list(params.astype(dtype))
29,337
Python
45.568254
120
0.609128
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shadow_hand.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.shadow_hand import ShadowHand from omniisaacgymenvs.robots.articulations.views.shadow_hand_view import ShadowHandView from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask class ShadowHandTask(InHandManipulationTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) InHandManipulationTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.object_type = self._task_cfg["env"]["objectType"] assert self.object_type in ["block"] self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]" ) print("Obs type:", self.obs_type) self.num_obs_dict = { "openai": 42, "full_no_vel": 77, "full": 157, "full_state": 187, } self.asymmetric_obs = self._task_cfg["env"]["asymmetric_observations"] self.use_vel_obs = False self.fingertip_obs = True self.fingertips = [ "robot0:ffdistal", "robot0:mfdistal", "robot0:rfdistal", "robot0:lfdistal", "robot0:thdistal", ] self.num_fingertips = len(self.fingertips) self.object_scale = torch.tensor([1.0, 1.0, 1.0]) self.force_torque_obs_scale = 10.0 num_states = 0 if self.asymmetric_obs: num_states = 187 self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 20 self._num_states = num_states InHandManipulationTask.update_config(self) def get_starting_positions(self): self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device) self.hand_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device) self.pose_dy, self.pose_dz = -0.39, 0.10 def get_hand(self): shadow_hand = ShadowHand( prim_path=self.default_zero_env_path + "/shadow_hand", name="shadow_hand", translation=self.hand_start_translation, orientation=self.hand_start_orientation, ) self._sim_config.apply_articulation_settings( "shadow_hand", get_prim_at_path(shadow_hand.prim_path), self._sim_config.parse_actor_config("shadow_hand"), ) shadow_hand.set_shadow_hand_properties(stage=self._stage, shadow_hand_prim=shadow_hand.prim) shadow_hand.set_motor_control_mode(stage=self._stage, shadow_hand_path=shadow_hand.prim_path) def get_hand_view(self, scene): hand_view = ShadowHandView(prim_paths_expr="/World/envs/.*/shadow_hand", name="shadow_hand_view") scene.add(hand_view._fingers) return hand_view def get_observations(self): self.get_object_goal_observations() self.fingertip_pos, self.fingertip_rot = self._hands._fingers.get_world_poses(clone=False) self.fingertip_pos -= self._env_pos.repeat((1, self.num_fingertips)).reshape( self.num_envs * self.num_fingertips, 3 ) self.fingertip_velocities = self._hands._fingers.get_velocities(clone=False) self.hand_dof_pos = self._hands.get_joint_positions(clone=False) self.hand_dof_vel = self._hands.get_joint_velocities(clone=False) if self.obs_type == "full_state" or self.asymmetric_obs: self.vec_sensor_tensor = self._hands.get_measured_joint_forces( joint_indices=self._hands._sensor_indices ).view(self._num_envs, -1) if self.obs_type == "openai": self.compute_fingertip_observations(True) elif self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state(False) else: print("Unkown observations type!") if self.asymmetric_obs: self.compute_full_state(True) observations = {self._hands.name: {"obs_buf": self.obs_buf}} return observations def compute_fingertip_observations(self, no_vel=False): if no_vel: # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 # Fingertip positions # Object Position, but not orientation # Relative target orientation # 3*self.num_fingertips = 15 self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 15:18] = self.object_pos self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 22:42] = self.actions else: # 13*self.num_fingertips = 65 self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 15:35] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[:, 35:65] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[:, 65:68] = self.object_pos self.obs_buf[:, 68:72] = self.object_rot self.obs_buf[:, 72:75] = self.object_linvel self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 78:81] = self.goal_pos self.obs_buf[:, 81:85] = self.goal_rot self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 89:109] = self.actions def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, 24:37] = self.object_pos self.obs_buf[:, 27:31] = self.object_rot self.obs_buf[:, 31:34] = self.goal_pos self.obs_buf[:, 34:38] = self.goal_rot self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 57:77] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 48:51] = self.object_pos self.obs_buf[:, 51:55] = self.object_rot self.obs_buf[:, 55:58] = self.object_linvel self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 61:64] = self.goal_pos self.obs_buf[:, 64:68] = self.goal_rot self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # (7+6)*self.num_fingertips = 65 self.obs_buf[:, 72:87] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[:, 87:107] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[:, 107:137] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[:, 137:157] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.states_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel # self.states_buf[:, 2*self.num_hand_dofs:3*self.num_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 2 * self.num_hand_dofs # 48 self.states_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos self.states_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot self.states_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.states_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos self.states_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot self.states_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul( self.object_rot, quat_conjugate(self.goal_rot) ) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 72 self.states_buf[ :, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips ] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips ] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips ] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.states_buf[ :, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques ] = (self.force_torque_obs_scale * self.vec_sensor_tensor) # obs_end = 72 + 65 + 30 = 167 # obs_total = obs_end + num_actions = 187 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.states_buf[:, obs_end : obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0 : self.num_hand_dofs] = unscale( self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits ) self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel self.obs_buf[:, 2 * self.num_hand_dofs : 3 * self.num_hand_dofs] = ( self.force_torque_obs_scale * self.dof_force_tensor ) obj_obs_start = 3 * self.num_hand_dofs # 48 self.obs_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos self.obs_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot self.obs_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.obs_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos self.obs_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot self.obs_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul( self.object_rot, quat_conjugate(self.goal_rot) ) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 72 self.obs_buf[ :, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips ] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips ] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips ] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips) self.obs_buf[ :, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques ] = (self.force_torque_obs_scale * self.vec_sensor_tensor) # obs_end = 96 + 65 + 30 = 167 # obs_total = obs_end + num_actions = 187 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end : obs_end + self.num_actions] = self.actions
15,107
Python
48.211726
129
0.609188
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_cabinet.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import math import numpy as np import torch from omni.isaac.cloner import Cloner from omni.isaac.core.objects import DynamicCuboid from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.rotations import * from omni.isaac.core.utils.torch.transformations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.cabinet import Cabinet from omniisaacgymenvs.robots.articulations.franka import Franka from omniisaacgymenvs.robots.articulations.views.cabinet_view import CabinetView from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView from pxr import Usd, UsdGeom class FrankaCabinetTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self.distX_offset = 0.04 self.dt = 1 / 60.0 self._num_observations = 23 self._num_actions = 9 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.num_props = self._task_cfg["env"]["numProps"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] def set_up_scene(self, scene) -> None: self.get_franka() self.get_cabinet() if self.num_props > 0: self.get_props() super().set_up_scene(scene, filter_collisions=False) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view") scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._cabinets) scene.add(self._cabinets._drawers) if self.num_props > 0: self._props = RigidPrimView( prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False ) scene.add(self._props) self.init_data() return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("franka_view"): scene.remove_object("franka_view", registry_only=True) if scene.object_exists("hands_view"): scene.remove_object("hands_view", registry_only=True) if scene.object_exists("lfingers_view"): scene.remove_object("lfingers_view", registry_only=True) if scene.object_exists("rfingers_view"): scene.remove_object("rfingers_view", registry_only=True) if scene.object_exists("cabinet_view"): scene.remove_object("cabinet_view", registry_only=True) if scene.object_exists("drawers_view"): scene.remove_object("drawers_view", registry_only=True) if scene.object_exists("prop_view"): scene.remove_object("prop_view", registry_only=True) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view") scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._cabinets) scene.add(self._cabinets._drawers) if self.num_props > 0: self._props = RigidPrimView( prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False ) scene.add(self._props) self.init_data() def get_franka(self): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka") self._sim_config.apply_articulation_settings( "franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka") ) def get_cabinet(self): cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet") self._sim_config.apply_articulation_settings( "cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet") ) def get_props(self): prop_cloner = Cloner() drawer_pos = torch.tensor([0.0515, 0.0, 0.7172]) prop_color = torch.tensor([0.2, 0.4, 0.6]) props_per_row = int(math.ceil(math.sqrt(self.num_props))) prop_size = 0.08 prop_spacing = 0.09 xmin = -0.5 * prop_spacing * (props_per_row - 1) zmin = -0.5 * prop_spacing * (props_per_row - 1) prop_count = 0 prop_pos = [] for j in range(props_per_row): prop_up = zmin + j * prop_spacing for k in range(props_per_row): if prop_count >= self.num_props: break propx = xmin + k * prop_spacing prop_pos.append([propx, prop_up, 0.0]) prop_count += 1 prop = DynamicCuboid( prim_path=self.default_zero_env_path + "/prop/prop_0", name="prop", color=prop_color, size=prop_size, density=100.0, ) self._sim_config.apply_articulation_settings( "prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop") ) prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)] prop_cloner.clone( source_prim_path=self.default_zero_env_path + "/prop/prop_0", prim_paths=prop_paths, positions=np.array(prop_pos) + drawer_pos.numpy(), replicate_physics=False, ) def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device, ) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device, ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device, ) finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3]) grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine( hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3] ) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device) self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1)) self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat( (self._num_envs, 1) ) self.franka_default_dof_pos = torch.tensor( [1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False) self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos ( self.franka_grasp_rot, self.franka_grasp_pos, self.drawer_grasp_rot, self.drawer_grasp_pos, ) = self.compute_grasp_transforms( hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos, drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos, ) self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) to_target = self.drawer_grasp_pos - self.franka_grasp_pos self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, to_target, self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = {self._frankas.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos # reset cabinet self._cabinets.set_joint_positions( torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices ) self._cabinets.set_joint_velocities( torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices ) # reset props if self.num_props > 0: self._props.set_world_poses( self.default_prop_pos[self.prop_indices[env_ids].flatten()], self.default_prop_rot[self.prop_indices[env_ids].flatten()], self.prop_indices[env_ids].flatten().to(torch.int32), ) self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) if self.num_props > 0: self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses() self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view( self._num_envs, self.num_props ) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: self.rew_buf[:] = self.compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos, self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot, self.franka_lfinger_pos, self.franka_rfinger_pos, self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis, self._num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale, self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self._max_episode_length, self.franka_dof_pos, self.finger_close_reward_scale, ) def is_done(self) -> None: # reset if drawer is open or max length reached self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where( self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf ) def compute_grasp_transforms( self, hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos, drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos, ): global_franka_rot, global_franka_pos = tf_combine( hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos ) global_drawer_rot, global_drawer_pos = tf_combine( drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos ) return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos def compute_franka_reward( self, reset_buf, progress_buf, actions, cabinet_dof_pos, franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot, franka_lfinger_pos, franka_rfinger_pos, gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis, num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale, finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length, joint_positions, finger_close_reward_scale, ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor] # distance from hand to the drawer d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1) dist_reward = 1.0 / (1.0 + d**2) dist_reward *= dist_reward dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward) axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis) axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis) dot1 = ( torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) ) # alignment of forward axis for gripper dot2 = ( torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) ) # alignment of up axis for gripper # reward for matching the orientation of the hand to the drawer (fingers wrapped) rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2) # bonus if left finger is above the drawer handle and right below around_handle_reward = torch.zeros_like(rot_reward) around_handle_reward = torch.where( franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where( franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward ), around_handle_reward, ) # reward for distance of each finger from the drawer finger_dist_reward = torch.zeros_like(rot_reward) lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) finger_dist_reward = torch.where( franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where( franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], (0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward, ), finger_dist_reward, ) finger_close_reward = torch.zeros_like(rot_reward) finger_close_reward = torch.where( d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward ) # regularization on the actions (summed for each environment) action_penalty = torch.sum(actions**2, dim=-1) # how far the cabinet has been opened out open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint rewards = ( dist_reward_scale * dist_reward + rot_reward_scale * rot_reward + around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward + finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty + finger_close_reward * finger_close_reward_scale ) # bonus for opening drawer properly rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards) # # prevent bad style in opening drawer # rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, # torch.ones_like(rewards) * -1, rewards) # rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, # torch.ones_like(rewards) * -1, rewards) return rewards
22,939
Python
41.324723
222
0.599895
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/crazyflie.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import torch from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.crazyflie import Crazyflie from omniisaacgymenvs.robots.articulations.views.crazyflie_view import CrazyflieView EPS = 1e-6 # small constant to avoid divisions by 0 and log(0) class CrazyflieTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 18 self._num_actions = 4 self._crazyflie_position = torch.tensor([0, 0, 1.0]) self._ball_position = torch.tensor([0, 0, 1.0]) RLTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] self.dt = self._task_cfg["sim"]["dt"] # parameters for the crazyflie self.arm_length = 0.05 # parameters for the controller self.motor_damp_time_up = 0.15 self.motor_damp_time_down = 0.15 # I use the multiplier 4, since 4*T ~ time for a step response to finish, where # T is a time constant of the first-order filter self.motor_tau_up = 4 * self.dt / (self.motor_damp_time_up + EPS) self.motor_tau_down = 4 * self.dt / (self.motor_damp_time_down + EPS) # thrust max self.mass = 0.028 self.thrust_to_weight = 1.9 self.motor_assymetry = np.array([1.0, 1.0, 1.0, 1.0]) # re-normalizing to sum-up to 4 self.motor_assymetry = self.motor_assymetry * 4.0 / np.sum(self.motor_assymetry) self.grav_z = -1.0 * self._task_cfg["sim"]["gravity"][2] def set_up_scene(self, scene) -> None: self.get_crazyflie() self.get_target() RLTask.set_up_scene(self, scene) self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view") scene.add(self._copters) scene.add(self._balls) for i in range(4): scene.add(self._copters.physics_rotors[i]) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("crazyflie_view"): scene.remove_object("crazyflie_view", registry_only=True) if scene.object_exists("ball_view"): scene.remove_object("ball_view", registry_only=True) for i in range(1, 5): scene.remove_object(f"m{i}_prop_view", registry_only=True) self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view") self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view") scene.add(self._copters) scene.add(self._balls) for i in range(4): scene.add(self._copters.physics_rotors[i]) def get_crazyflie(self): copter = Crazyflie( prim_path=self.default_zero_env_path + "/Crazyflie", name="crazyflie", translation=self._crazyflie_position ) self._sim_config.apply_articulation_settings( "crazyflie", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("crazyflie") ) def get_target(self): radius = 0.2 color = torch.tensor([1, 0, 0]) ball = DynamicSphere( prim_path=self.default_zero_env_path + "/ball", translation=self._ball_position, name="target_0", radius=radius, color=color, ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) ball.set_collision_enabled(False) def get_observations(self) -> dict: self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) root_positions = self.root_pos - self._env_pos root_quats = self.root_rot rot_x = quat_axis(root_quats, 0) rot_y = quat_axis(root_quats, 1) rot_z = quat_axis(root_quats, 2) root_linvels = self.root_velocities[:, :3] root_angvels = self.root_velocities[:, 3:] self.obs_buf[..., 0:3] = self.target_positions - root_positions self.obs_buf[..., 3:6] = rot_x self.obs_buf[..., 6:9] = rot_y self.obs_buf[..., 9:12] = rot_z self.obs_buf[..., 12:15] = root_linvels self.obs_buf[..., 15:18] = root_angvels observations = {self._copters.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1) if len(set_target_ids) > 0: self.set_targets(set_target_ids) actions = actions.clone().to(self._device) self.actions = actions # clamp to [-1.0, 1.0] thrust_cmds = torch.clamp(actions, min=-1.0, max=1.0) # scale to [0.0, 1.0] thrust_cmds = (thrust_cmds + 1.0) / 2.0 # filtering the thruster and adding noise motor_tau = self.motor_tau_up * torch.ones((self._num_envs, 4), dtype=torch.float32, device=self._device) motor_tau[thrust_cmds < self.thrust_cmds_damp] = self.motor_tau_down motor_tau[motor_tau > 1.0] = 1.0 # Since NN commands thrusts we need to convert to rot vel and back thrust_rot = thrust_cmds**0.5 self.thrust_rot_damp = motor_tau * (thrust_rot - self.thrust_rot_damp) + self.thrust_rot_damp self.thrust_cmds_damp = self.thrust_rot_damp**2 ## Adding noise thrust_noise = 0.01 * torch.randn(4, dtype=torch.float32, device=self._device) thrust_noise = thrust_cmds * thrust_noise self.thrust_cmds_damp = torch.clamp(self.thrust_cmds_damp + thrust_noise, min=0.0, max=1.0) thrusts = self.thrust_max * self.thrust_cmds_damp # thrusts given rotation root_quats = self.root_rot rot_x = quat_axis(root_quats, 0) rot_y = quat_axis(root_quats, 1) rot_z = quat_axis(root_quats, 2) rot_matrix = torch.cat((rot_x, rot_y, rot_z), 1).reshape(-1, 3, 3) force_x = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device) force_y = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device) force_xy = torch.cat((force_x, force_y), 1).reshape(-1, 4, 2) thrusts = thrusts.reshape(-1, 4, 1) thrusts = torch.cat((force_xy, thrusts), 2) thrusts_0 = thrusts[:, 0] thrusts_0 = thrusts_0[:, :, None] thrusts_1 = thrusts[:, 1] thrusts_1 = thrusts_1[:, :, None] thrusts_2 = thrusts[:, 2] thrusts_2 = thrusts_2[:, :, None] thrusts_3 = thrusts[:, 3] thrusts_3 = thrusts_3[:, :, None] mod_thrusts_0 = torch.matmul(rot_matrix, thrusts_0) mod_thrusts_1 = torch.matmul(rot_matrix, thrusts_1) mod_thrusts_2 = torch.matmul(rot_matrix, thrusts_2) mod_thrusts_3 = torch.matmul(rot_matrix, thrusts_3) self.thrusts[:, 0] = torch.squeeze(mod_thrusts_0) self.thrusts[:, 1] = torch.squeeze(mod_thrusts_1) self.thrusts[:, 2] = torch.squeeze(mod_thrusts_2) self.thrusts[:, 3] = torch.squeeze(mod_thrusts_3) # clear actions for reset envs self.thrusts[reset_env_ids] = 0 # spin spinning rotors prop_rot = self.thrust_cmds_damp * self.prop_max_rot self.dof_vel[:, 0] = prop_rot[:, 0] self.dof_vel[:, 1] = -1.0 * prop_rot[:, 1] self.dof_vel[:, 2] = prop_rot[:, 2] self.dof_vel[:, 3] = -1.0 * prop_rot[:, 3] self._copters.set_joint_velocities(self.dof_vel) # apply actions for i in range(4): self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices) def post_reset(self): thrust_max = self.grav_z * self.mass * self.thrust_to_weight * self.motor_assymetry / 4.0 self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device) self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_max = torch.tensor(thrust_max, device=self._device, dtype=torch.float32) self.motor_linearity = 1.0 self.prop_max_rot = 433.3 self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32) self.target_positions[:, 2] = 1 self.actions = torch.zeros((self._num_envs, 4), device=self._device, dtype=torch.float32) self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device) # Extra info self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = { "rew_pos": torch_zeros(), "rew_orient": torch_zeros(), "rew_effort": torch_zeros(), "rew_spin": torch_zeros(), "raw_dist": torch_zeros(), "raw_orient": torch_zeros(), "raw_effort": torch_zeros(), "raw_spin": torch_zeros(), } self.root_pos, self.root_rot = self._copters.get_world_poses() self.root_velocities = self._copters.get_velocities() self.dof_pos = self._copters.get_joint_positions() self.dof_vel = self._copters.get_joint_velocities() self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses(clone=False) self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone() # control parameters self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device) self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device) self.set_targets(self.all_indices) def set_targets(self, env_ids): num_sets = len(env_ids) envs_long = env_ids.long() # set target position randomly with x, y in (0, 0) and z in (2) self.target_positions[envs_long, 0:2] = torch.zeros((num_sets, 2), device=self._device) self.target_positions[envs_long, 2] = torch.ones(num_sets, device=self._device) * 2.0 # shift the target up so it visually aligns better ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long] ball_pos[:, 2] += 0.0 self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids) def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_pos[env_ids, :] = torch_rand_float(-0.0, 0.0, (num_resets, self._copters.num_dof), device=self._device) self.dof_vel[env_ids, :] = 0 root_pos = self.initial_root_pos.clone() root_pos[env_ids, 0] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 1] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 2] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1) root_velocities = self.root_velocities.clone() root_velocities[env_ids] = 0 # apply resets self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids) self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids) self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids) self._copters.set_velocities(root_velocities[env_ids], indices=env_ids) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self.thrust_cmds_damp[env_ids] = 0 self.thrust_rot_damp[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0.0 def calculate_metrics(self) -> None: root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_angvels = self.root_velocities[:, 3:] # pos reward target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + target_dist) self.target_dist = target_dist self.root_positions = root_positions # orient reward ups = quat_axis(root_quats, 2) self.orient_z = ups[..., 2] up_reward = torch.clamp(ups[..., 2], min=0.0, max=1.0) # effort reward effort = torch.square(self.actions).sum(-1) effort_reward = 0.05 * torch.exp(-0.5 * effort) # spin reward spin = torch.square(root_angvels).sum(-1) spin_reward = 0.01 * torch.exp(-1.0 * spin) # combined reward self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spin_reward) - effort_reward # log episode reward sums self.episode_sums["rew_pos"] += pos_reward self.episode_sums["rew_orient"] += up_reward self.episode_sums["rew_effort"] += effort_reward self.episode_sums["rew_spin"] += spin_reward # log raw info self.episode_sums["raw_dist"] += target_dist self.episode_sums["raw_orient"] += ups[..., 2] self.episode_sums["raw_effort"] += effort self.episode_sums["raw_spin"] += spin def is_done(self) -> None: # resets due to misbehavior ones = torch.ones_like(self.reset_buf) die = torch.zeros_like(self.reset_buf) die = torch.where(self.target_dist > 5.0, ones, die) # z >= 0.5 & z <= 5.0 & up > 0 die = torch.where(self.root_positions[..., 2] < 0.5, ones, die) die = torch.where(self.root_positions[..., 2] > 5.0, ones, die) die = torch.where(self.orient_z < 0.0, ones, die) # resets due to episode length self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
16,830
Python
41.502525
120
0.61937
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/humanoid.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.humanoid import Humanoid from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask from pxr import PhysxSchema class HumanoidLocomotionTask(LocomotionTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 87 self._num_actions = 21 self._humanoid_positions = torch.tensor([0, 0, 1.34]) LocomotionTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config LocomotionTask.update_config(self) def set_up_scene(self, scene) -> None: self.get_humanoid() RLTask.set_up_scene(self, scene) self._humanoids = ArticulationView( prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False ) scene.add(self._humanoids) return def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("humanoid_view"): scene.remove_object("humanoid_view", registry_only=True) self._humanoids = ArticulationView( prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False ) scene.add(self._humanoids) def get_humanoid(self): humanoid = Humanoid( prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions ) self._sim_config.apply_articulation_settings( "Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid") ) def get_robot(self): return self._humanoids def post_reset(self): self.joint_gears = torch.tensor( [ 67.5000, # lower_waist 67.5000, # lower_waist 67.5000, # right_upper_arm 67.5000, # right_upper_arm 67.5000, # left_upper_arm 67.5000, # left_upper_arm 67.5000, # pelvis 45.0000, # right_lower_arm 45.0000, # left_lower_arm 45.0000, # right_thigh: x 135.0000, # right_thigh: y 45.0000, # right_thigh: z 45.0000, # left_thigh: x 135.0000, # left_thigh: y 45.0000, # left_thigh: z 90.0000, # right_knee 90.0000, # left_knee 22.5, # right_foot 22.5, # right_foot 22.5, # left_foot 22.5, # left_foot ], device=self._device, ) self.max_motor_effort = torch.max(self.joint_gears) self.motor_effort_ratio = self.joint_gears / self.max_motor_effort dof_limits = self._humanoids.get_dof_limits() self.dof_limits_lower = dof_limits[0, :, 0].to(self._device) self.dof_limits_upper = dof_limits[0, :, 1].to(self._device) force_links = ["left_foot", "right_foot"] self._sensor_indices = torch.tensor( [self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): return get_dof_at_limit_cost(self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale) @torch.jit.script def get_dof_at_limit_cost(obs_buf, motor_effort_ratio, joints_at_limit_cost_scale): # type: (Tensor, Tensor, float) -> Tensor scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02 dof_at_limit_cost = torch.sum( (torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1 ) return dof_at_limit_cost
5,980
Python
41.119718
117
0.651003
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_deformable.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.franka import Franka from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils from omni.isaac.core.materials.deformable_material import DeformableMaterial from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView from omni.physx.scripts import deformableUtils, physicsUtils import numpy as np import torch import math from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema class FrankaDeformableTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self.update_config(sim_config) self.dt = 1/60. self._num_observations = 39 self._num_actions = 9 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.action_scale = self._task_cfg["env"]["actionScale"] def set_up_scene(self, scene) -> None: self.stage = get_current_stage() self.assets_root_path = get_assets_root_path() if self.assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") self.get_franka() self.get_beaker() self.get_deformable_tube() super().set_up_scene(scene=scene, replicate_physics=False) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") self.deformableView = DeformablePrimView( prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view" ) scene.add(self.deformableView) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("franka_view"): scene.remove_object("franka_view", registry_only=True) if scene.object_exists("hands_view"): scene.remove_object("hands_view", registry_only=True) if scene.object_exists("lfingers_view"): scene.remove_object("lfingers_view", registry_only=True) if scene.object_exists("rfingers_view"): scene.remove_object("rfingers_view", registry_only=True) if scene.object_exists("deformabletube_view"): scene.remove_object("deformabletube_view", registry_only=True) self._frankas = FrankaView( prim_paths_expr="/World/envs/.*/franka", name="franka_view" ) self.deformableView = DeformablePrimView( prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view" ) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self.deformableView) def get_franka(self): franka = Franka( prim_path=self.default_zero_env_path + "/franka", name="franka", orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]), translation=torch.tensor([0.0, 0.0, 0.0]), ) self._sim_config.apply_articulation_settings( "franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka") ) franka.set_franka_properties(stage=self.stage, prim=franka.prim) def get_beaker(self): _usd_path = self.assets_root_path + "/Isaac/Props/Beaker/beaker_500ml.usd" mesh_path = self.default_zero_env_path + "/beaker" add_reference_to_stage(_usd_path, mesh_path) beaker = RigidPrim( prim_path=mesh_path+"/beaker", name="beaker", position=torch.tensor([0.5, 0.2, 0.095]), ) self._sim_config.apply_articulation_settings("beaker", beaker.prim, self._sim_config.parse_actor_config("beaker")) def get_deformable_tube(self): _usd_path = self.assets_root_path + "/Isaac/Props/DeformableTube/tube.usd" mesh_path = self.default_zero_env_path + "/deformableTube/tube" add_reference_to_stage(_usd_path, mesh_path) skin_mesh = get_prim_at_path(mesh_path) physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh) physicsUtils.set_or_add_translate_op(skin_mesh, (0.6, 0.0, 0.005)) physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([0, 0, 1]), 90).GetQuat()) def get_observations(self) -> dict: franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False) self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False) self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos tube_positions = self.deformableView.get_simulation_mesh_nodal_positions(clone=False) tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities(clone=False) self.tube_front_positions = tube_positions[:, 200, :] - self._env_pos self.tube_front_velocities = tube_velocities[:, 200, :] self.tube_back_positions = tube_positions[:, -1, :] - self._env_pos self.tube_back_velocities = tube_velocities[:, -1, :] front_to_gripper = self.tube_front_positions - self.gripper_site_pos to_front_goal = self.front_goal_pos - self.tube_front_positions to_back_goal = self.back_goal_pos - self.tube_back_positions self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, front_to_gripper, to_front_goal, to_back_goal, self.tube_front_positions, self.tube_front_velocities, self.tube_back_positions, self.tube_back_velocities, ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) self.franka_dof_targets[:, -1] = self.franka_dof_targets[:, -2] env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) pos = self.franka_default_dof_pos dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) self.deformableView.set_simulation_mesh_nodal_positions(self.initial_tube_positions[env_ids], indices) self.deformableView.set_simulation_mesh_nodal_velocities(self.initial_tube_velocities[env_ids], indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self.franka_default_dof_pos = torch.tensor( [0.00, 0.63, 0.00, -2.15, 0.00, 2.76, 0.75, 0.02, 0.02], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) self.front_goal_pos = torch.tensor([0.36, 0.0, 0.23], device=self._device).repeat((self._num_envs, 1)) self.back_goal_pos = torch.tensor([0.5, 0.2, 0.0], device=self._device).repeat((self._num_envs, 1)) self.goal_hand_rot = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self._device).repeat((self.num_envs, 1)) self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False) self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False) self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos self.initial_tube_positions = self.deformableView.get_simulation_mesh_nodal_positions() self.initial_tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities() self.tube_front_positions = self.initial_tube_positions[:, 0, :] - self._env_pos self.tube_front_velocities = self.initial_tube_velocities[:, 0, :] self.tube_back_positions = self.initial_tube_positions[:, -1, :] - self._env_pos self.tube_back_velocities = self.initial_tube_velocities[:, -1, :] self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: goal_distance_error = torch.norm(self.tube_back_positions[:, 0:2] - self.back_goal_pos[:, 0:2], p = 2, dim = -1) goal_dist_reward = 1.0 / (5*goal_distance_error + .025) current_z_level = self.tube_back_positions[:, 2:3] z_lift_level = torch.where( goal_distance_error < 0.07, torch.zeros_like(current_z_level), torch.ones_like(current_z_level)*0.18 ) front_lift_error = torch.norm(current_z_level - z_lift_level, p = 2, dim = -1) front_lift_reward = 1.0 / (5*front_lift_error + .025) rewards = goal_dist_reward + 4*front_lift_reward self.rew_buf[:] = rewards def is_done(self) -> None: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 0] < 0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 0] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 1] < -1.0, torch.ones_like(self.reset_buf), self.reset_buf) self.reset_buf = torch.where(self.tube_front_positions[:, 1] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
13,322
Python
42.825658
136
0.641045
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ant.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.ant import Ant from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask from pxr import PhysxSchema class AntLocomotionTask(LocomotionTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) LocomotionTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_observations = 60 self._num_actions = 8 self._ant_positions = torch.tensor([0, 0, 0.5]) LocomotionTask.update_config(self) def set_up_scene(self, scene) -> None: self.get_ant() RLTask.set_up_scene(self, scene) self._ants = ArticulationView( prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False ) scene.add(self._ants) return def initialize_views(self, scene): RLTask.initialize_views(self, scene) if scene.object_exists("ant_view"): scene.remove_object("ant_view", registry_only=True) self._ants = ArticulationView( prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False ) scene.add(self._ants) def get_ant(self): ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions) self._sim_config.apply_articulation_settings( "Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant") ) def get_robot(self): return self._ants def post_reset(self): self.joint_gears = torch.tensor([15, 15, 15, 15, 15, 15, 15, 15], dtype=torch.float32, device=self._device) dof_limits = self._ants.get_dof_limits() self.dof_limits_lower = dof_limits[0, :, 0].to(self._device) self.dof_limits_upper = dof_limits[0, :, 1].to(self._device) self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self._device) force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"] self._sensor_indices = torch.tensor( [self._ants._body_indices[j] for j in force_links], device=self._device, dtype=torch.long ) LocomotionTask.post_reset(self) def get_dof_at_limit_cost(self): return get_dof_at_limit_cost(self.obs_buf, self._ants.num_dof) @torch.jit.script def get_dof_at_limit_cost(obs_buf, num_dof): # type: (Tensor, int) -> Tensor return torch.sum(obs_buf[:, 12 : 12 + num_dof] > 0.99, dim=-1)
4,691
Python
41.654545
115
0.69708
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.prims import get_prim_at_path from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.cartpole import Cartpole class CartpoleTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._max_episode_length = 500 self._num_observations = 4 self._num_actions = 1 RLTask.__init__(self, name, env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0]) self._reset_dist = self._task_cfg["env"]["resetDist"] self._max_push_effort = self._task_cfg["env"]["maxEffort"] def set_up_scene(self, scene) -> None: self.get_cartpole() super().set_up_scene(scene) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("cartpole_view"): scene.remove_object("cartpole_view", registry_only=True) self._cartpoles = ArticulationView( prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False ) scene.add(self._cartpoles) def get_cartpole(self): cartpole = Cartpole( prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions ) # applies articulation settings from the task configuration yaml file self._sim_config.apply_articulation_settings( "Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole") ) def get_observations(self) -> dict: dof_pos = self._cartpoles.get_joint_positions(clone=False) dof_vel = self._cartpoles.get_joint_velocities(clone=False) self.cart_pos = dof_pos[:, self._cart_dof_idx] self.cart_vel = dof_vel[:, self._cart_dof_idx] self.pole_pos = dof_pos[:, self._pole_dof_idx] self.pole_vel = dof_vel[:, self._pole_dof_idx] self.obs_buf[:, 0] = self.cart_pos self.obs_buf[:, 1] = self.cart_vel self.obs_buf[:, 2] = self.pole_pos self.obs_buf[:, 3] = self.pole_vel observations = {self._cartpoles.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = actions.to(self._device) forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device) forces[:, self._cart_dof_idx] = self._max_push_effort * actions[:, 0] indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device) self._cartpoles.set_joint_efforts(forces, indices=indices) def reset_idx(self, env_ids): num_resets = len(env_ids) # randomize DOF positions dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device) dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) # randomize DOF velocities dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device) dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device)) # apply resets indices = env_ids.to(dtype=torch.int32) self._cartpoles.set_joint_positions(dof_pos, indices=indices) self._cartpoles.set_joint_velocities(dof_vel, indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint") self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint") # randomize all envs indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: reward = 1.0 - self.pole_pos * self.pole_pos - 0.01 * torch.abs(self.cart_vel) - 0.005 * torch.abs(self.pole_vel) reward = torch.where(torch.abs(self.cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward) reward = torch.where(torch.abs(self.pole_pos) > np.pi / 2, torch.ones_like(reward) * -2.0, reward) self.rew_buf[:] = reward def is_done(self) -> None: resets = torch.where(torch.abs(self.cart_pos) > self._reset_dist, 1, 0) resets = torch.where(torch.abs(self.pole_pos) > math.pi / 2, 1, resets) resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) self.reset_buf[:] = resets
7,256
Python
42.981818
121
0.659179
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/dofbot_reacher.py
# Copyright (c) 2018-2022, NVIDIA Corporation # Copyright (c) 2022-2023, Johnson Sun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Ref: /omniisaacgymenvs/tasks/shadow_hand.py import math import numpy as np import torch from omniisaacgymenvs.sim2real.dofbot import RealWorldDofbot from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig from omniisaacgymenvs.robots.articulations.views.dofbot_view import DofbotView from omniisaacgymenvs.robots.articulations.dofbot import Dofbot from omniisaacgymenvs.tasks.shared.reacher import ReacherTask from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch import * from omni.isaac.gym.vec_env import VecEnvBase class DofbotReacherTask(ReacherTask): def __init__( self, name: str, sim_config: SimConfig, env: VecEnvBase, offset=None ) -> None: self.update_config(sim_config) ReacherTask.__init__(self, name=name, env=env) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.obs_type = self._task_cfg["env"]["observationType"] if not (self.obs_type in ["full"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [full]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full": 29, # 6: dofbot joints position (action space) # 6: dofbot joints velocity # 3: goal position # 4: goal rotation # 4: goal relative rotation # 6: previous action } self.object_scale = torch.tensor([0.1] * 3) self.goal_scale = torch.tensor([0.5] * 3) self._num_observations = self.num_obs_dict[self.obs_type] self._num_actions = 6 self._num_states = 0 pi = math.pi # For actions self._dof_limits = torch.tensor([[ [-pi/2, pi/2], [-pi/4, pi/4], [-pi/4, pi/4], [-pi/4, pi/4], [-pi/2, pi/2], [-0.1, 0.1], # The gripper joint will be ignored, since it is not used in the Reacher task ]], dtype=torch.float32, device=self._cfg["sim_device"]) # The last action space cannot be [0, 0] # It will introduce the following error: # ValueError: Expected parameter loc (Tensor of shape (2048, 6)) of distribution Normal(loc: torch.Size([2048, 6]), scale: torch.Size([2048, 6])) to satisfy the constraint Real(), but found invalid values self.useURDF = self._task_cfg["env"]["useURDF"] # Setup Sim2Real sim2real_config = self._task_cfg['sim2real'] if sim2real_config['enabled'] and self.test and self.num_envs == 1: self.real_world_dofbot = RealWorldDofbot( sim2real_config['ip'], sim2real_config['port'], sim2real_config['fail_quietely'], sim2real_config['verbose'] ) ReacherTask.update_config(self) def get_num_dof(self): # assert self._arms.num_dof == 11 return min(self._arms.num_dof, 6) def get_arm(self): if not self.useURDF: usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_instanceable.usd" else: usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf_instanceable.usd" dofbot = Dofbot( prim_path=self.default_zero_env_path + "/Dofbot", name="Dofbot", usd_path=usd_path ) self._sim_config.apply_articulation_settings( "dofbot", get_prim_at_path(dofbot.prim_path), self._sim_config.parse_actor_config("dofbot"), ) def get_arm_view(self, scene): if not self.useURDF: end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5/Wrist_Twist" else: end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5" arm_view = DofbotView( prim_paths_expr="/World/envs/.*/Dofbot", end_effector_prim_paths_expr=end_effector_prim_paths_expr, name="dofbot_view" ) scene.add(arm_view._end_effectors) return arm_view def get_object_displacement_tensor(self): return torch.tensor([0.0, 0.015, 0.1], device=self.device).repeat((self.num_envs, 1)) def get_observations(self): self.arm_dof_pos = self._arms.get_joint_positions() self.arm_dof_vel = self._arms.get_joint_velocities() if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() else: print("Unkown observations type!") observations = {self._arms.name: {"obs_buf": self.obs_buf}} return observations def get_reset_target_new_pos(self, n_reset_envs): # Randomly generate goal positions, although the resulting goal may still not be reachable. new_pos = torch_rand_float(-1, 1, (n_reset_envs, 3), device=self.device) new_pos[:, 0] = new_pos[:, 0] * 0.05 + 0.15 * torch.sign(new_pos[:, 0]) new_pos[:, 1] = new_pos[:, 1] * 0.05 + 0.15 * torch.sign(new_pos[:, 1]) new_pos[:, 2] = torch.abs(new_pos[:, 2] * 0.2) + 0.15 return new_pos def compute_full_observations(self, no_vel=False): if no_vel: raise NotImplementedError() else: # There are many redundant information for the simple Reacher task, but we'll keep them for now. self.obs_buf[:, 0:self.num_arm_dofs] = unscale(self.arm_dof_pos[:, :self.num_arm_dofs], self.arm_dof_lower_limits, self.arm_dof_upper_limits) self.obs_buf[:, self.num_arm_dofs:2*self.num_arm_dofs] = self.vel_obs_scale * self.arm_dof_vel[:, :self.num_arm_dofs] base = 2 * self.num_arm_dofs self.obs_buf[:, base+0:base+3] = self.goal_pos self.obs_buf[:, base+3:base+7] = self.goal_rot self.obs_buf[:, base+7:base+11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, base+11:base+17] = self.actions def send_joint_pos(self, joint_pos): self.real_world_dofbot.send_joint_pos(joint_pos)
7,961
Python
41.57754
212
0.633589
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/quadcopter.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import torch from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.torch.rotations import * from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.quadcopter import Quadcopter from omniisaacgymenvs.robots.articulations.views.quadcopter_view import QuadcopterView class QuadcopterTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self.update_config(sim_config) self._num_observations = 21 self._num_actions = 12 self._copter_position = torch.tensor([0, 0, 1.0]) RLTask.__init__(self, name=name, env=env) max_thrust = 2.0 self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32) self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32) self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device) return def update_config(self, sim_config): self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"] self.dt = self._task_cfg["sim"]["dt"] def set_up_scene(self, scene) -> None: self.get_copter() self.get_target() RLTask.set_up_scene(self, scene) self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view") self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False ) self._balls._non_root_link = True # do not set states for kinematics scene.add(self._copters) scene.add(self._copters.rotors) scene.add(self._balls) return def initialize_views(self, scene): super().initialize_views(scene) if scene.object_exists("quadcopter_view"): scene.remove_object("quadcopter_view", registry_only=True) if scene.object_exists("rotors_view"): scene.remove_object("rotors_view", registry_only=True) if scene.object_exists("targets_view"): scene.remove_object("targets_view", registry_only=True) self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view") self._balls = RigidPrimView( prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False ) scene.add(self._copters) scene.add(self._copters.rotors) scene.add(self._balls) def get_copter(self): copter = Quadcopter( prim_path=self.default_zero_env_path + "/Quadcopter", name="quadcopter", translation=self._copter_position ) self._sim_config.apply_articulation_settings( "copter", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("copter") ) def get_target(self): radius = 0.05 color = torch.tensor([1, 0, 0]) ball = DynamicSphere( prim_path=self.default_zero_env_path + "/ball", name="target_0", radius=radius, color=color, ) self._sim_config.apply_articulation_settings( "ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball") ) ball.set_collision_enabled(False) def get_observations(self) -> dict: self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) self.dof_pos = self._copters.get_joint_positions(clone=False) root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_linvels = self.root_velocities[:, :3] root_angvels = self.root_velocities[:, 3:] self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3 self.obs_buf[..., 3:7] = root_quats self.obs_buf[..., 7:10] = root_linvels / 2 self.obs_buf[..., 10:13] = root_angvels / math.pi self.obs_buf[..., 13:21] = self.dof_pos observations = {self._copters.name: {"obs_buf": self.obs_buf}} return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = actions.clone().to(self._device) dof_action_speed_scale = 8 * math.pi self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8] self.dof_position_targets[:] = tensor_clamp( self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits ) thrust_action_speed_scale = 100 self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12] self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits) self.forces[:, 0, 2] = self.thrusts[:, 0] self.forces[:, 1, 2] = self.thrusts[:, 1] self.forces[:, 2, 2] = self.thrusts[:, 2] self.forces[:, 3, 2] = self.thrusts[:, 3] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids] # apply actions self._copters.set_joint_position_targets(self.dof_position_targets) self._copters.rotors.apply_forces(self.forces, is_global=False) def post_reset(self): # control tensors self.dof_position_targets = torch.zeros( (self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False ) self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False) self.forces = torch.zeros( (self._num_envs, self._copters.rotors.count // self._num_envs, 3), dtype=torch.float32, device=self._device, requires_grad=False, ) self.target_positions = torch.zeros((self._num_envs, 3), device=self._device) self.target_positions[:, 2] = 1.0 self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False) self.root_velocities = self._copters.get_velocities(clone=False) self.dof_pos = self._copters.get_joint_positions(clone=False) self.dof_vel = self._copters.get_joint_velocities(clone=False) self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone() dof_limits = self._copters.get_dof_limits() self.dof_lower_limits = dof_limits[0][:, 0].to(device=self._device) self.dof_upper_limits = dof_limits[0][:, 1].to(device=self._device) def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device) self.dof_vel[env_ids, :] = 0 root_pos = self.initial_root_pos.clone() root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1) root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1) root_velocities = self.root_velocities.clone() root_velocities[env_ids] = 0 # apply resets self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids) self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids) self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids) self._copters.set_velocities(root_velocities[env_ids], indices=env_ids) self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def calculate_metrics(self) -> None: root_positions = self.root_pos - self._env_pos root_quats = self.root_rot root_angvels = self.root_velocities[:, 3:] # distance to target target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + 3 * target_dist * target_dist) # 2 self.target_dist = target_dist self.root_positions = root_positions # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage) rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1 / 400)) rew = torch.clip(rew, 0.0, None) self.rew_buf[:] = rew def is_done(self) -> None: # resets due to misbehavior ones = torch.ones_like(self.reset_buf) die = torch.zeros_like(self.reset_buf) die = torch.where(self.target_dist > 3.0, ones, die) die = torch.where(self.root_positions[..., 2] < 0.3, ones, die) # resets due to episode length self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
11,498
Python
42.889313
120
0.640633