file_path
stringlengths 21
202
| content
stringlengths 13
1.02M
| size
int64 13
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 5.43
98.5
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.91
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/ui.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
from typing import *
import bpy
from bpy.utils import previews
from omni_audio2face.operators import (
OMNI_OT_PrepareScene,
OMNI_OT_MarkExportMesh,
OMNI_OT_ChooseUSDFile,
OMNI_OT_ChooseAnimCache,
OMNI_OT_ExportPreparedScene,
OMNI_OT_ImportRigFile,
OMNI_OT_TransferShapeData,
OMNI_OT_ImportAnimation,
)
## ======================================================================
def preload_icons() -> previews.ImagePreviewCollection:
"""Preload icons used by the interface."""
icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons")
all_icons = {
"AUDIO2FACE": "omni_audio2face.png",
}
preview = previews.new()
for name, filepath in all_icons.items():
preview.load(name, os.path.join(icons_directory, filepath), "IMAGE")
return preview
## ======================================================================
class OBJECT_PT_Audio2FacePanel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Audio2Face"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
icons = preload_icons()
def draw_header(self, context):
self.layout.label(text="", icon_value=self.icons["AUDIO2FACE"].icon_id)
# draw the panel
def draw(self, context):
use_face_selection = context.scene.audio2face.use_face_selection
is_poly_edit_mode = context.tool_settings.mesh_select_mode[2] and context.mode == "EDIT_MESH"
a2f_export_static = bpy.data.collections.get("A2F Export Static", None)
a2f_export_dynamic = bpy.data.collections.get("A2F Export Dynamic", None)
layout = self.layout
layout.label(text="Face Prep and Export", icon="EXPORT")
row = layout.row(align=True)
op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Static")
op.is_dynamic = False
op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Dynamic")
op.is_dynamic = True
row = layout.row(align=True)
row.prop(context.scene.audio2face, "use_face_selection", text="")
if use_face_selection and not is_poly_edit_mode:
row.label(text="Use Faces: Must be in Polygon Edit Mode!", icon="ERROR")
else:
row.label(text="Use Face Selection?")
## mesh selections
col = layout.column(align=True)
if a2f_export_dynamic:
col.prop_search(context.scene.audio2face, "mesh_skin", a2f_export_dynamic, "objects", text="Skin Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_tongue", a2f_export_dynamic, "objects", text="Tongue Mesh: ")
else:
col.label(text="Dynamic Meshes are required to set Skin and Tongue", icon="ERROR")
col.label(text=" ")
if a2f_export_static:
col.prop_search(context.scene.audio2face, "mesh_eye_left", a2f_export_static, "objects", text="Left Eye Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_eye_right", a2f_export_static, "objects", text="Right Eye Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_gums_lower", a2f_export_static, "objects", text="Lower Gums Mesh: ")
else:
col.label(text="Static Meshes are required to set Eyes", icon="ERROR")
col.label(text=" ")
col = layout.column(align=True)
row = col.row(align=True)
row.prop(context.scene.audio2face, "export_filepath", text="Export Path: ")
op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER")
op.operation = "EXPORT"
col.prop(context.scene.audio2face, "export_project", text="Export With Project File")
row = col.row(align=True)
collection = bpy.data.collections.get("A2F Export", None)
child_count = len(collection.all_objects) if collection else 0
args = {
"text": "Export Face USD" if child_count else "No meshes available for Export",
}
op = row.operator(OMNI_OT_ExportPreparedScene.bl_idname, **args)
## Import Side -- after Audio2Face has transferred the shapes
layout.separator()
layout.label(text="Face Shapes Import", icon="IMPORT")
col = layout.column(align=True)
row = col.row(align=True)
row.prop(context.scene.audio2face, "import_filepath", text="Shapes Import Path")
op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER")
op.operation = "IMPORT"
col = layout.column(align=True)
col.operator(OMNI_OT_ImportRigFile.bl_idname)
row = col.row(align=True)
op = row.operator(OMNI_OT_TransferShapeData.bl_idname)
op.apply_fix = context.scene.audio2face.transfer_apply_fix
row.prop(context.scene.audio2face, "transfer_apply_fix", icon="MODIFIER", text="")
col = layout.column(align=True)
col.label(text="Anim Cache Path")
row = col.row(align=True)
row.prop(context.scene.audio2face, "import_anim_path", text="")
row.operator(OMNI_OT_ChooseAnimCache.bl_idname, text="", icon="FILE_FOLDER")
if context.scene.audio2face.import_anim_path.lower().endswith(".json"):
col.prop(context.scene.audio2face, "anim_frame_rate", text="Source Framerate")
row = col.row(align=True)
row.prop(context.scene.audio2face, "anim_start_type", text="Start Frame")
if context.scene.audio2face.anim_start_type == "CUSTOM":
row.prop(context.scene.audio2face, "anim_start_frame", text="")
col.prop(context.scene.audio2face, "anim_load_to", text="Load To")
row = col.row(align=True)
row.prop(context.scene.audio2face, "anim_apply_scale", text="Apply Clip Scale")
if context.scene.audio2face.anim_load_to == "CLIP":
row.prop(context.scene.audio2face, "anim_overwrite")
op_label = ("Please change to Object Mode" if not context.mode == "OBJECT"
else ("Import Animation Clip" if OMNI_OT_ImportAnimation.poll(context)
else "Please Select Target Mesh"))
op = col.operator(OMNI_OT_ImportAnimation.bl_idname, text=op_label)
op.start_type = context.scene.audio2face.anim_start_type
op.frame_rate = context.scene.audio2face.anim_frame_rate
op.start_frame = context.scene.audio2face.anim_start_frame
op.set_range = context.scene.audio2face.anim_set_range
op.load_to = context.scene.audio2face.anim_load_to
op.overwrite = context.scene.audio2face.anim_overwrite
op.apply_scale = context.scene.audio2face.anim_apply_scale
| 6,105 | Python | 36.00606 | 119 | 0.702867 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/ui.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from typing import *
import bpy
from bpy.types import (Context, Object, Material, Scene)
from . particle_bake.operators import *
from . material_bake.background_bake import bgbake_ops
# from .material_bake_complex import OBJECT_OT_omni_material_bake
from os.path import join, dirname
import bpy.utils.previews
from .material_bake import baker
## ======================================================================
def get_icons_directory():
icons_directory = join(dirname(__file__), "icons")
return icons_directory
## ======================================================================
def _get_bake_types(scene:Scene) -> List[str]:
result = []
bake_all = scene.all_maps
if scene.selected_col or bake_all:
result.append("DIFFUSE")
if scene.selected_normal or bake_all:
result.append("NORMAL")
if scene.selected_emission or bake_all:
result.append("EMIT")
if scene.selected_specular or bake_all:
result.append("GLOSSY")
if scene.selected_rough or bake_all:
result.append("ROUGHNESS")
if scene.selected_trans or bake_all:
result.append("TRANSMISSION")
## special types
if scene.omni_bake.bake_metallic or bake_all:
result.append("METALLIC")
return ",".join(result)
## ======================================================================
class OBJECT_PT_omni_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "NVIDIA Omniverse"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
def draw_header(self, context):
self.layout.label(text="", icon_value=self.icons["OMNI"].icon_id)
def draw(self, context):
layout = self.layout
scene = context.scene
# --------Particle Collection Instancing-------------------
particleOptions = scene.particle_options
particleCol = self.layout.column(align=True)
particleCol.label(text="Omni Particles",
icon='PARTICLES')
box = particleCol.box()
column = box.column(align=True)
column.prop(particleOptions, "deletePSystemAfterBake")
row = column.row()
row.prop(particleOptions, "animateData")
if particleOptions.animateData:
row = column.row(align=True)
row.prop(particleOptions, "selectedStartFrame")
row.prop(particleOptions, "selectedEndFrame")
row = column.row()
row.enabled = False
row.label(text="Increased Calculation Time", icon='ERROR')
row = column.row()
row.scale_y = 1.5
row.operator('omni.hair_bake',
text='Convert',
icon='MOD_PARTICLE_INSTANCE')
if len(bpy.context.selected_objects) != 0 and bpy.context.active_object != None:
if bpy.context.active_object.select_get() and bpy.context.active_object.type == "MESH":
layout.separator()
column = layout.column(align=True)
column.label(text="Convert Material to:", icon='SHADING_RENDERED')
box = column.box()
materialCol = box.column(align=True)
materialCol.operator('universalmaterialmap.create_template_omnipbr',
text='OmniPBR')
materialCol.operator('universalmaterialmap.create_template_omniglass',
text='OmniGlass')
## ======================================================================
class OBJECT_PT_omni_bake_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Material Baking"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
icons.load("BAKE",join(icons_directory, "Oven.png"), 'IMAGE')
def draw_header(self, context):
self.layout.label(text="", icon="UV_DATA")
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
#--------PBR Bake Settings-------------------
row = box.row()
if scene.all_maps == True:
row.prop(scene, "all_maps", icon = 'CHECKBOX_HLT')
else:
row.prop(scene, "all_maps", icon = 'CHECKBOX_DEHLT')
column = box.column(align= True)
row = column.row()
row.prop(scene, "selected_col")
row.prop(scene, "selected_normal")
row = column.row()
row.prop(scene, "selected_rough")
row.prop(scene, "selected_specular", text="Gloss")
row = column.row()
row.prop(scene, "selected_trans")
row.prop(scene, "selected_emission")
row = column.row()
row.label(text="Special Maps")
row = column.row()
row.prop(scene.omni_bake, "bake_metallic")
row.label(text=" ")
#--------Texture Settings-------------------
row = box.row()
row.label(text="Texture Resolution:")
row.scale_y = 0.5
row = box.row()
row.prop(scene, "texture_res", expand=True)
row.scale_y = 1
if scene.texture_res == "8k" or scene.texture_res == "4k":
row = box.row()
row.enabled = False
row.label(text="Long Bake Times", icon= 'ERROR')
#--------UV Settings-------------------
column = box.column(align = True)
row = column.row()
row.prop(scene, "newUVoption")
row.prop(scene, "unwrapmargin")
#--------Other Settings-------------------
column= box.column(align=True)
row = column.row()
if scene.bgbake == "fg":
text = "Copy objects and apply bakes"
else:
text = "Copy objects and apply bakes (after import)"
row.prop(scene, "prepmesh", text=text)
if scene.prepmesh == True:
if scene.bgbake == "fg":
text = "Hide source objects after bake"
else:
text = "Hide source objects after bake (after import)"
row = column.row()
row.prop(scene, "hidesourceobjects", text=text)
#-------------Buttons-------------------------
row = box.row()
try:
row.prop(scene.cycles, "device", text="Device")
except:
pass
row = box.row()
row.scale_y = 1.5
op = row.operator("omni.bake_maps", icon_value=self.icons["BAKE"].icon_id)
op.unwrap = scene.newUVoption
op.bake_types = _get_bake_types(scene)
op.merge_textures = scene.omni_bake.merge_textures
op.hide_original = scene.hidesourceobjects
op.width = op.height = {
"0.5k": 512,
"1k": 1024,
"2k": 2048,
"4k": 4096,
"8k": 8192,
}[scene.texture_res]
can_bake_poll, error_data = baker.omni_bake_maps_poll(context)
can_bake_poll_result = {
-1: f"Cannot bake objects in collection {baker.COLLECTION_NAME}",
-2: f"Material cannot be baked:",
-3: "Cycles Renderer Add-on not loaded!"
}
if can_bake_poll < 0:
row = box.row()
row.label(text=can_bake_poll_result[can_bake_poll], icon="ERROR")
if can_bake_poll == -2:
mesh_name, material_name = error_data
row = box.row()
row.label(text=f"{material_name} on {mesh_name}")
row = column.row()
row.scale_y = 1
##!TODO: Restore background baking
# row.prop(context.scene, "bgbake", expand=True)
if scene.bgbake == "bg":
row = column.row(align= True)
# - BG status button
col = row.column()
if len(bgbake_ops.bgops_list) == 0:
enable = False
icon = "TIME"
else:
enable = True
icon = "TIME"
col.operator("object.omni_bake_bgbake_status", text="", icon=icon)
col.enabled = enable
# - BG import button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "IMPORT"
else:
enable = False
icon = "IMPORT"
col.operator("object.omni_bake_bgbake_import", text="", icon=icon)
col.enabled = enable
#BG erase button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "TRASH"
else:
enable = False
icon = "TRASH"
col.operator("object.omni_bake_bgbake_clear", text="", icon=icon)
col.enabled = enable
row.alignment = 'CENTER'
row.label(text=f"Running {len(bgbake_ops.bgops_list)} | Finished {len(bgbake_ops.bgops_list_finished)}")
## ======================================================================
class OmniBakePreferences(bpy.types.AddonPreferences):
# this must match the add-on name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __package__
img_name_format: bpy.props.StringProperty(name="Image format string",
default="%OBJ%_%BATCH%_%BAKEMODE%_%BAKETYPE%")
#Aliases
diffuse_alias: bpy.props.StringProperty(name="Diffuse", default="diffuse")
metal_alias: bpy.props.StringProperty(name="Metal", default="metalness")
roughness_alias: bpy.props.StringProperty(name="Roughness", default="roughness")
glossy_alias: bpy.props.StringProperty(name="Glossy", default="glossy")
normal_alias: bpy.props.StringProperty(name="Normal", default="normal")
transmission_alias: bpy.props.StringProperty(name="Transmission", default="transparency")
transmissionrough_alias: bpy.props.StringProperty(name="Transmission Roughness", default="transparencyroughness")
clearcoat_alias: bpy.props.StringProperty(name="Clearcost", default="clearcoat")
clearcoatrough_alias: bpy.props.StringProperty(name="Clearcoat Roughness", default="clearcoatroughness")
emission_alias: bpy.props.StringProperty(name="Emission", default="emission")
specular_alias: bpy.props.StringProperty(name="Specular", default="specular")
alpha_alias: bpy.props.StringProperty(name="Alpha", default="alpha")
sss_alias: bpy.props.StringProperty(name="SSS", default="sss")
ssscol_alias: bpy.props.StringProperty(name="SSS Colour", default="ssscol")
@classmethod
def reset_img_string(self):
prefs = bpy.context.preferences.addons[__package__].preferences
prefs.property_unset("img_name_format")
bpy.ops.wm.save_userpref()
| 12,271 | Python | 34.98827 | 117 | 0.557412 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/workflow/usd_kind.py | from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
## ======================================================================
usd_kind_items = {
('COMPONENT', 'component', 'kind: component'),
('GROUP', 'group', 'kind: group'),
('ASSEMBLY', 'assembly', 'kind: assembly'),
('CUSTOM', 'custom', 'kind: custom'),
}
## ======================================================================
def get_plural_count(items) -> (str, int):
count = len(items)
plural = '' if count == 1 else 's'
return plural, count
## ======================================================================
class OBJECT_OT_omni_set_usd_kind(bpy.types.Operator):
"""Sets the USD Kind value on the selected objects."""
bl_idname = "omni.set_usd_kind"
bl_label = "Set USD Kind"
bl_options = {"REGISTER", "UNDO"}
kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items)
custom_kind: StringProperty(default="")
verbose: BoolProperty(default=False)
@property ## read-only
def value(self) -> str:
return self.custom_kind if self.kind == "CUSTOM" else self.kind.lower()
@classmethod
def poll(cls, context:Context) -> bool:
return bool(len(context.selected_objects))
def execute(self, context:Context) -> Set[str]:
if self.kind == "NONE":
self.report({"WARNING"}, "No kind specified-- nothing authored.")
return {"CANCELLED"}
for item in context.selected_objects:
props = item.id_properties_ensure()
props["usdkind"] = self.value
props_ui = item.id_properties_ui("usdkind")
props_ui.update(default=self.value, description="USD Kind")
if self.verbose:
plural, count = get_plural_count(context.selected_objects)
self.report({"INFO"}, f"Set USD Kind to {self.value} for {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_set_usd_kind_auto(bpy.types.Operator):
"""Sets the USD Kind value on scene objects, automatically."""
bl_idname = "omni.set_usd_kind_auto"
bl_label = "Set USD Kind Auto"
bl_options = {"REGISTER", "UNDO"}
verbose: BoolProperty(default=False)
def execute(self, context:Context) -> Set[str]:
active = context.active_object
selected = list(context.selected_objects)
bpy.ops.object.select_all(action='DESELECT')
## heuristics
## First, assign "component" to all unparented empties
unparented = [x for x in context.scene.collection.all_objects if not x.parent and x.type == "EMPTY"]
for item in unparented:
item.select_set(True)
bpy.ops.omni.set_usd_kind(kind="COMPONENT")
item.select_set(False)
if self.verbose:
plural, count = get_plural_count(unparented)
self.report({"INFO"}, f"Set USD Kind Automatically on {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_clear_usd_kind(bpy.types.Operator):
"""Clear USD Kind values on the selected objects."""
bl_idname = "omni.clear_usd_kind"
bl_label = "Clear USD Kind"
bl_options = {"REGISTER", "UNDO"}
verbose: BoolProperty(default=False)
@classmethod
def poll(cls, context:Context) -> bool:
return bool(len(context.selected_objects))
def execute(self, context:Context) -> Set[str]:
from rna_prop_ui import rna_idprop_ui_prop_update
total = 0
for item in context.selected_objects:
if "usdkind" in item:
rna_idprop_ui_prop_update(item, "usdkind")
del item["usdkind"]
total += 1
if self.verbose:
plural, count = get_plural_count(range(total))
self.report({"INFO"}, f"Cleared USD Kind from {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_PT_omni_usd_kind_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "USD Kind"
def draw(self, context:Context):
layout = self.layout
scene = context.scene
layout.label(text="USD Kind")
row = layout.row()
row.prop(scene.omni_usd_kind, "kind", text="Kind")
if scene.omni_usd_kind.kind == "CUSTOM":
row = layout.row()
row.prop(scene.omni_usd_kind, "custom_kind", text="Custom Kind")
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_set_usd_kind.bl_idname, icon="PLUS")
op.kind = scene.omni_usd_kind.kind
op.custom_kind = scene.omni_usd_kind.custom_kind
op.verbose = True
op = col.operator(OBJECT_OT_omni_clear_usd_kind.bl_idname, icon="X")
op.verbose = True
op = col.operator(OBJECT_OT_omni_set_usd_kind_auto.bl_idname, icon="BRUSH_DATA")
op.verbose = True
## ======================================================================
class USDKindProperites(bpy.types.PropertyGroup):
kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items)
custom_kind: StringProperty(default="")
## ======================================================================
classes = [
OBJECT_OT_omni_set_usd_kind,
OBJECT_OT_omni_set_usd_kind_auto,
OBJECT_OT_omni_clear_usd_kind,
OBJECT_PT_omni_usd_kind_panel,
USDKindProperites,
]
def unregister():
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except ValueError:
continue
except RuntimeError:
continue
try:
del bpy.types.Scene.omni_usd_kind
except AttributeError:
pass
def register():
unregister()
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_usd_kind = bpy.props.PointerProperty(type=USDKindProperites)
| 5,618 | Python | 27.668367 | 102 | 0.620862 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/material_bake/baker.py | from tempfile import NamedTemporaryFile
from typing import *
import addon_utils
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
from omni_panel.material_bake import material_setup
COLLECTION_NAME = "OmniBake_Bakes"
def get_material_output(tree:NodeTree, engine:str="CYCLES") -> Optional[Node]:
"""
Find the material output node that applies only to a specific engine.
:param tree: The NodeTree to search.
:param engine: The engine to search for.
:return: The Material Output Node associated with the engine, or None if not found.
"""
supported_engines = {"CYCLES", "EEVEE", "ALL"}
assert engine in supported_engines, f"Only the following engines are supported: {','.join(supported_engines)}"
result = [x for x in tree.nodes if x.type == "OUTPUT_MATERIAL" and x.target in {"ALL", engine}]
if len(result):
return result[0]
return None
def prepare_collection(scene:Scene) -> Collection:
"""
Ensures the bake Collection exists in the specified scene.
:param scene: The scene to which you wish to add the bake Collection.
:return: the bake Collection
"""
collection = bpy.data.collections.get(COLLECTION_NAME, None) or bpy.data.collections.new(COLLECTION_NAME)
if not COLLECTION_NAME in scene.collection.children:
scene.collection.children.link(collection)
return collection
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
def smart_unwrap_object(ob:Object, name:str="OmniBake"):
"""
Use Blenders built-in smart unwrap functionality to generate a new UV map.
:param ob: Mesh Object to unwrap.
"""
bpy.ops.object.mode_set(mode="EDIT", toggle=False)
# Unhide any geo that's hidden in edit mode or it'll cause issues.
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.reveal()
if name in ob.data.uv_layers:
ob.data.uv_layers.remove(ob.data.uv_layers[name])
uv_layer = ob.data.uv_layers.new(name=name)
uv_layer.active = True
bpy.ops.uv.select_all(action="SELECT")
bpy.ops.uv.smart_project(island_margin=0.0)
bpy.ops.object.mode_set(mode="OBJECT", toggle=False)
def prepare_mesh(ob:Object, collection: Collection, unwrap=False) -> Object:
"""
Duplicate the specified Object, also duplicating all its materials.
:param ob: The object to duplicate.
:param collection: After duplication, the object will be inserted into this Collection
:param unwrap: If True, also smart unwrap the object's UVs.
:return: The newly created duplicate object.
"""
assert not ob.name in collection.all_objects, f"{ob.name} is a baked mesh (cannot be used)"
new_mesh_name = ob.data.name[:56] + "_baked"
if new_mesh_name in bpy.data.meshes:
bpy.data.meshes.remove(bpy.data.meshes[new_mesh_name])
new_mesh = ob.data.copy()
new_mesh.name = new_mesh_name
new_name = ob.name[:56] + "_baked"
if new_name in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects[new_name])
new_object = bpy.data.objects.new(new_name, new_mesh)
collection.objects.link(new_object)
select_only(new_object)
new_object.matrix_world = ob.matrix_world.copy()
if unwrap:
smart_unwrap_object(new_object)
for index, material in enumerate([x.material for x in new_object.material_slots]):
new_material_name = material.name[:56] + "_baked"
if new_material_name in bpy.data.materials:
bpy.data.materials.remove(bpy.data.materials[new_material_name])
new_material = material.copy()
new_material.name = new_material_name
new_object.material_slots[index].material = new_material
ob.hide_viewport = True
return new_object
##!<--- TODO: Fix these
def find_node_from_label(label:str, nodes:List[Node]) -> Node:
for node in nodes:
if node.label == label:
return node
return False
def find_isocket_from_identifier(idname:str, node:Node) -> NodeSocket:
for inputsocket in node.inputs:
if inputsocket.identifier == idname:
return inputsocket
return False
def find_osocket_from_identifier(idname, node):
for outputsocket in node.outputs:
if outputsocket.identifier == idname:
return outputsocket
return False
def make_link(f_node_label, f_node_ident, to_node_label, to_node_ident, nodetree):
fromnode = find_node_from_label(f_node_label, nodetree.nodes)
if (fromnode == False):
return False
fromsocket = find_osocket_from_identifier(f_node_ident, fromnode)
tonode = find_node_from_label(to_node_label, nodetree.nodes)
if (tonode == False):
return False
tosocket = find_isocket_from_identifier(to_node_ident, tonode)
nodetree.links.new(fromsocket, tosocket)
return True
## --->
## ======================================================================
##!TODO: Shader type identification and bake setup
def _nodes_for_type(node_tree:NodeTree, node_type:str) -> List[Node]:
result = [x for x in node_tree.nodes if x.type == node_type]
## skip unconnected nodes
from_nodes = [x.from_node for x in node_tree.links]
to_nodes = [x.to_node for x in node_tree.links]
all_nodes = set(from_nodes + to_nodes)
result = list(filter(lambda x: x in all_nodes, result))
return result
def output_nodes_for_engine(node_tree:NodeTree, engine:str) -> List[Node]:
nodes = _nodes_for_type(node_tree, "OUTPUT_MATERIAL")
return nodes
def get_principled_nodes(node_tree:NodeTree) -> List[Node]:
return _nodes_for_type(node_tree, "BSDF_PRINCIPLED")
def identify_shader_type(node_tree:NodeTree) -> str:
principled_nodes = get_principled_nodes(node_tree)
emission_nodes = _nodes_for_type(node_tree, "EMISSION")
mix_nodes = _nodes_for_type(node_tree, "MIX_SHADER")
outputs = output_nodes_for_engine(node_tree, "CYCLES")
total_shader_nodes = principled_nodes + emission_nodes + mix_nodes
## first type: principled straight into the output
## ----------------------------------------------------------------------
def create_principled_setup(material:Material, images:Dict[str,Image]):
"""
Creates a new shader setup in the tree of the specified
material using the baked images, removing all old shader nodes.
:param material: The material to change.
:param images: The baked Images dictionary, name:Image pairs.
"""
node_tree = material.node_tree
nodes = node_tree.nodes
material.cycles.displacement_method = 'BOTH'
principled_nodes = get_principled_nodes(node_tree)
for node in filter(lambda x: not x in principled_nodes, nodes):
nodes.remove(node)
# Node Frame
frame = nodes.new("NodeFrame")
frame.location = (0, 0)
frame.use_custom_color = True
frame.color = (0.149763, 0.214035, 0.0590617)
## reuse the old BSDF if it exists to make sure the non-textured constant inputs are correct
pnode = principled_nodes[0] if len(principled_nodes) else nodes.new("ShaderNodeBsdfPrincipled")
pnode.location = (-25, 335)
pnode.label = "pnode"
pnode.use_custom_color = True
pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467)
pnode.parent = nodes["Frame"]
# And the output node
node = nodes.new("ShaderNodeOutputMaterial")
node.location = (500, 200)
node.label = "monode"
node.show_options = False
node.parent = nodes["Frame"]
make_link("pnode", "BSDF", "monode", "Surface", node_tree)
# -----------------------------------------------------------------
# 'COMBINED', 'AO', 'SHADOW', 'POSITION', 'NORMAL', 'UV', 'ROUGHNESS',
# 'EMIT', 'ENVIRONMENT', 'DIFFUSE', 'GLOSSY', 'TRANSMISSION'
## These are the currently supported types.
## More could be supported at a future date.
if "DIFFUSE" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 250)
node.label = "col_tex"
node.image = images["DIFFUSE"]
node.parent = nodes["Frame"]
make_link("col_tex", "Color", "pnode", "Base Color", node_tree)
if "METALLIC" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 140)
node.label = "metallic_tex"
node.image = images["METALLIC"]
node.parent = nodes["Frame"]
make_link("metallic_tex", "Color", "pnode", "Metallic", node_tree)
if "GLOSSY" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 90)
node.label = "specular_tex"
node.image = images["GLOSSY"]
node.parent = nodes["Frame"]
make_link("specular_tex", "Color", "pnode", "Specular", node_tree)
if "ROUGHNESS" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 50)
node.label = "roughness_tex"
node.image = images["ROUGHNESS"]
node.parent = nodes["Frame"]
make_link("roughness_tex", "Color", "pnode", "Roughness", node_tree)
if "TRANSMISSION" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -90)
node.label = "transmission_tex"
node.image = images["TRANSMISSION"]
node.parent = nodes["Frame"]
make_link("transmission_tex", "Color", "pnode", "Transmission", node_tree)
if "EMIT" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -170)
node.label = "emission_tex"
node.image = images["EMIT"]
node.parent = nodes["Frame"]
make_link("emission_tex", "Color", "pnode", "Emission", node_tree)
if "NORMAL" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -318.7)
node.label = "normal_tex"
image = images["NORMAL"]
node.image = image
node.parent = nodes["Frame"]
# Additional normal map node for normal socket
node = nodes.new("ShaderNodeNormalMap")
node.location = (-220, -240)
node.label = "normalmap"
node.show_options = False
node.parent = nodes["Frame"]
make_link("normal_tex", "Color", "normalmap", "Color", node_tree)
make_link("normalmap", "Normal", "pnode", "Normal", node_tree)
# -----------------------------------------------------------------
## wipe all labels
for item in nodes:
item.label = ""
node = nodes["Frame"]
node.label = "OMNI PBR"
for type, image in images.items():
if type in {"DIFFUSE", "EMIT"}:
image.colorspace_settings.name = "sRGB"
else:
image.colorspace_settings.name = "Non-Color"
## ======================================================================
def _selected_meshes(context:Context) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
return [x for x in context.selected_objects if x.type == "MESH"]
def _material_can_be_baked(material:Material) -> bool:
outputs = output_nodes_for_engine(material.node_tree, "CYCLES")
if not len(outputs) == 1:
return False
try:
from_node = outputs[0].inputs["Surface"].links[0].from_node
except IndexError:
return False
##!TODO: Support one level of mix with principled inputs
if not from_node.type == "BSDF_PRINCIPLED":
return False
return True
def omni_bake_maps_poll(context:Context) -> (int, Any):
"""
:return: 1 if we can bake
0 if no meshes are selected
-1 if any selected meshes are already in the bake collection
-2 if mesh contains non-bakeable materials
-3 if Cycles renderer isn't loaded
"""
## Cycles renderer is not available
_, loaded_state = addon_utils.check("cycles")
if not loaded_state:
return (-3, None)
selected = _selected_meshes(context)
if not len(selected):
return (0, None)
for mesh in selected:
for material in [slot.material for slot in mesh.material_slots]:
if not _material_can_be_baked(material):
return (-2, [mesh.name, material.name])
collection = bpy.data.collections.get(COLLECTION_NAME, None)
if collection is None:
## We have selected meshes but no collection-- early out
return (1, None)
in_collection = [x for x in selected if x.name in collection.all_objects]
if len(in_collection):
return (-1, None)
return (1, None)
## ======================================================================
class OmniBakerProperties(bpy.types.PropertyGroup):
bake_metallic: BoolProperty(name="Metallic",
default=True)
merge_textures: BoolProperty(name="Merge Textures",
description="Bake all materials for each object onto a single map",
default=True)
## ======================================================================
class OBJECT_OT_omni_bake_maps(bpy.types.Operator):
"""Bake specified passes on the selected Mesh object."""
bl_idname = "omni.bake_maps"
bl_label = "Bake Maps"
bl_options = {"REGISTER", "UNDO"}
base_bake_types = {
##!TODO: Possibly support these at a later date?
# "COMBINED", "AO", "SHADOW", "POSITION", "UV", "ENVIRONMENT",
"DIFFUSE",
"NORMAL",
"EMIT",
"GLOSSY",
"ROUGHNESS",
"TRANSMISSION",
}
special_bake_types = {
"METALLIC": "Metallic",
}
unwrap: BoolProperty(default=False, description="Unwrap")
hide_original: BoolProperty(default=False, description="Hide Original")
width: IntProperty(default=1024, min=128, max=8192, description="Width")
height: IntProperty(default=1024, min=128, max=8192, description="Height")
bake_types: StringProperty(default="DIFFUSE")
merge_textures: BoolProperty(default=True, description="Merge Textures")
@classmethod
def poll(cls, context:Context) -> bool:
return omni_bake_maps_poll(context)[0] == 1
def draw(self, context:Context):
"""Empty draw to disable the Operator Props Panel."""
pass
def _get_bake_emission_target(self, node_tree:NodeTree) -> Node:
bake_emission_name = "OmniBake_Emission"
if not bake_emission_name in node_tree.nodes:
node = node_tree.nodes.new("ShaderNodeEmission")
node.name = bake_emission_name
output = get_material_output(node_tree, "CYCLES")
node.location = output.location + Vector((-200.0, -100.0))
return node_tree.nodes[bake_emission_name]
def _copy_connection(self, material:Material, bsdf:Node, bake_type:str, target_socket:NodeSocket) -> bool:
if not bake_type in self.special_bake_types:
return False
orig_socket = bsdf.inputs[self.special_bake_types[bake_type]]
if not len(orig_socket.links):
## copy over the color and return
if orig_socket.type == "VECTOR":
for index in range(4):
target_socket.default_value[index] = orig_socket.default_value
elif orig_socket.type in {"VECTOR", "RGBA"}:
for index in range(3):
target_socket.default_value[index] = orig_socket.default_value[index]
target_socket.default_value[3] = 1.0
else:
## should never arrive here
return False
else:
input_socket = orig_socket.links[0].from_socket
material.node_tree.links.new(input_socket, target_socket)
return True
def _create_bake_texture_names(self, ob:Object, bake_types:List[str]) -> List[str]:
result = []
for material in [x.material for x in ob.material_slots]:
material_name = material.name.rpartition('_baked')[0]
for bake_type in bake_types:
if self.merge_textures:
image_name = f"{ob.name}__{bake_type}"
else:
image_name = f"{ob.name}_{material_name}_{bake_type}"
result.append(image_name)
return result
def report(self, type:Set[str], message:str):
print(message)
super(OBJECT_OT_omni_bake_maps, self).report(type, message)
def execute(self, context:Context) -> Set[str]:
wm = context.window_manager
scene = context.scene
scene_engine = scene.render.engine
scene.render.engine = "CYCLES"
scene_use_clear = scene.render.bake.use_clear
scene.render.bake.use_clear = False
collection = prepare_collection(scene)
all_bake_types = self.base_bake_types | self.special_bake_types.keys()
valid_types_str = "Valid types are: " + ", ".join(all_bake_types)
self.report({"INFO"}, f"Bake types: {self.bake_types}")
bake_types = self.bake_types.split(",")
if not len(bake_types):
self.report({"ERROR"}, "No bake type specified. " + valid_types_str)
for bake_type in bake_types:
if not bake_type in all_bake_types:
self.report({"ERROR"}, f"Bake type '{bake_type}' is not valid. " + valid_types_str)
return {"CANCELLED"}
selected_meshes = _selected_meshes(context)
count = 0
total = 0
for mesh in selected_meshes:
count += len(mesh.material_slots) * len(bake_types)
wm.progress_begin(total, count)
bpy.ops.object.mode_set(mode="OBJECT")
for mesh_object in _selected_meshes(context):
mesh_object.hide_select = mesh_object.hide_render = mesh_object.hide_viewport = False
baked_ob = prepare_mesh(mesh_object, collection, unwrap=self.unwrap)
uv_layer = "OmniBake" if self.unwrap else baked_ob.data.uv_layers.active.name
bpy.ops.object.select_all(action="DESELECT")
baked_ob.select_set(True)
context.view_layer.objects.active = baked_ob
self.report({"INFO"}, f"Baking Object {baked_ob.name}")
baked_materials = []
## Because of merge_textures, we have to create the names now and clear them
## before the whole bake process starts
bake_image_names = self._create_bake_texture_names(baked_ob, bake_types)
## if merge_textures is on there'll be some repeats
for image_name in set(bake_image_names):
if image_name in bpy.data.images:
bpy.data.images.remove(bpy.data.images[image_name])
image = bpy.data.images.new(image_name, self.width, self.height,
float_buffer=(image_name.endswith(("NORMAL", "EMIT"))) )
# if bake_type in {"DIFFUSE", "EMIT"}:
# image.colorspace_settings.name = "sRGB"
# else:
# image.colorspace_settings.name = "Non-Color"
image.colorspace_settings.name = "Raw"
if self.merge_textures:
temp_file = NamedTemporaryFile(prefix=bake_type, suffix=".png", delete=False)
image.filepath = temp_file.name
image_index = 0
for material_index, material in enumerate([x.material for x in baked_ob.material_slots]):
self.report({"INFO"}, f" => Material: {material.name}")
tree = material.node_tree
baked_ob.active_material_index = material_index
for node in tree.nodes:
node.select = False
output = get_material_output(tree)
bsdf = output.inputs["Surface"].links[0].from_node
if "OmniBakeImage" in tree.nodes:
tree.nodes.remove(tree.nodes["OmniBakeImage"])
bake_image_node = tree.nodes.new("ShaderNodeTexImage")
bake_image_node.name = "OmniBakeImage"
bake_image_node.location = output.location.copy()
bake_image_node.location.x += 200.0
bake_image_node.select = True
tree.nodes.active = bake_image_node
## for special cases
bake_emission = self._get_bake_emission_target(tree)
original_link = output.inputs["Surface"].links[0]
original_from, original_to = original_link.from_socket, original_link.to_socket
baked_images = {}
for bake_type in bake_types:
image_name = bake_image_names[image_index]
image = bpy.data.images[image_name]
bake_image_node.image = image.original if image.original else image
self.report({"INFO"}, f"====> Baking {material.name} pass {bake_type}...")
kwargs = {}
if bake_type in {"DIFFUSE"}:
## ensure no black due to bad direct / indirect lighting
kwargs["pass_filter"] = {"COLOR"}
scene.render.bake.use_pass_indirect = False
scene.render.bake.use_pass_direct = False
if bake_type in self.special_bake_types:
## cheat by running the bake through emit after reconnecting
real_bake_type = "EMIT"
tree.links.new(bake_emission.outputs["Emission"], original_to)
self._copy_connection(material, bsdf, bake_type, bake_emission.inputs["Color"])
else:
real_bake_type = bake_type
tree.links.new(original_from, original_to)
## have to do this every pass?
if bake_type in {"DIFFUSE", "EMIT"}:
image.colorspace_settings.name = "sRGB"
else:
image.colorspace_settings.name = "Non-Color"
bpy.ops.object.bake(type=real_bake_type, width=self.width, height=self.height, uv_layer=uv_layer,
use_clear=False, margin=1, **kwargs)
if self.merge_textures:
## I know this seems weird, but if you don't save the file here
## post-bake when merging, the texture gets corrupted and you end
## up with a texture that's taking up ram, but can't be loaded
## for rendering (comes up pink in Cycles)
image.save()
self.report({"INFO"}, "... Done.")
baked_images[bake_type] = image
total += 1
image_index += 1
wm.progress_update(total)
wm.update_tag()
for node in bake_image_node, bake_emission:
tree.nodes.remove(node)
tree.links.new(original_from, original_to)
baked_materials.append((material, baked_images))
for material, images in baked_materials:
## Perform conversion after all images are baked
## If this is not done, then errors can arise despite not
## replacing shader indices.
create_principled_setup(material, images)
for image in [bpy.data.images[x] for x in bake_image_names]:
image.pack()
## Set new UV map as active if it exists
if "OmniBake" in baked_ob.data.uv_layers:
baked_ob.data.uv_layers["OmniBake"].active_render = True
if self.hide_original:
mesh_object.hide_set(True)
wm.progress_end()
scene.render.engine = scene_engine
scene.render.bake.use_clear = scene_use_clear
return {"FINISHED"}
## ======================================================================
module_classes = [
OBJECT_OT_omni_bake_maps,
OmniBakerProperties,
]
def register():
for cls in module_classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_bake = bpy.props.PointerProperty(type=OmniBakerProperties)
def unregister():
for cls in reversed(module_classes):
bpy.utils.unregister_class(cls)
try:
del bpy.types.Scene.omni_bake
except (AttributeError, RuntimeError):
pass
| 21,781 | Python | 30.659884 | 111 | 0.678573 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
bl_info = {
"name": "Omni Scene Optimization Panel",
"author": "Nvidia",
"description": "",
"blender": (3, 4, 0),
"version": (2, 0, 0),
"location": "View3D > Toolbar > Omniverse",
"warning": "",
"category": "Omniverse"
}
from . import (operators, ui)
def register():
operators.register()
ui.register()
def unregister():
operators.unregister()
ui.unregister()
| 1,274 | Python | 27.333333 | 74 | 0.678964 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/operators.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import subprocess
import time
from typing import *
from importlib import reload
import bpy
from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty)
from bpy.types import (Context, Event, Object, Modifier, NodeTree, Scene)
from mathutils import Vector
from .properties import (OmniSceneOptChopPropertiesMixin, chopProperties)
## ======================================================================
symmetry_axis_items = [
("X", "X", "X"),
("Y", "Y", "Y"),
("Z", "Z", "Z")
]
generate_type_items = [
("CONVEX_HULL", "Convex Hull", "Convex Hull"),
("BOUNDING_BOX", "Bounding Box", "Bounding Box")
]
generate_name = "OmniSceneOptGenerate"
## ======================================================================
def selected_meshes(scene:Scene) -> List[Object]:
result = [x for x in scene.collection.all_objects if x.type == "MESH" and x.select_get()]
return result
def get_plural_count(items) -> (str, int):
count = len(items)
plural = '' if count == 1 else 's'
return plural, count
## ======================================================================
def preserve_selection(func, *args, **kwargs):
def wrapper(*args, **kwargs):
selection = [x.name for x in bpy.context.selected_objects]
active = bpy.context.active_object.name if bpy.context.active_object else None
result = func(*args, **kwargs)
scene_objects = bpy.context.scene.objects
to_select = [ scene_objects[x] for x in selection if x in scene_objects ]
if active:
active = scene_objects[active] if active in scene_objects else (to_select[-1] if len(to_select) else None)
bpy.ops.object.select_all(action="DESELECT")
for item in to_select:
item.select_set(True)
bpy.context.view_layer.objects.active = active
return result
return wrapper
## ======================================================================
class OmniSceneOptPropertiesMixin:
"""
Blender Properties that are shared between the in-scene preferences pointer
and the various operators.
"""
verbose: BoolProperty(name="Verbose",
description="Print information while running",
default=False)
selected: BoolProperty(name="Selected",
description="Run on Selected Objects (if False, run on whole Scene)",
default=False)
## export options
export_textures: BoolProperty(name="Export Textures",
description="Export textures when doing a background export",
default=True)
## these are deliberate copies from ui.OmniYes.Properties
validate: BoolProperty(name="Validate Meshes",
description="Attempt to remove invalid geometry",
default=True)
weld: BoolProperty(name="Weld Verts",
description="Weld loose vertices",
default=False)
weld_distance: FloatProperty(name="Weld Distance",
description="Distance threshold for welds",
default=0.0001,
min=0.00001,
step=0.00001)
unwrap: BoolProperty(name="Unwrap Mesh UVs",
description="Use the Smart Unwrap feature to add new UVs",
default=False)
unwrap_margin: FloatProperty(name="Margin",
description="Distance between UV islands",
default=0.00,
min=0.0,
step=0.01)
decimate: BoolProperty(name="Decimate",
description="Reduce polygon and vertex counts on meshes",
default=False)
decimate_ratio: IntProperty(name="Ratio",
subtype="PERCENTAGE",
description="Reduce face count to this percentage of original",
default=50,
min=10, max=100,
step=5)
decimate_use_symmetry: BoolProperty(name="Use Symmetry",
description="Decimate with Symmetry across an axis",
default=False)
decimate_symmetry_axis: EnumProperty(name="Symmetry Axis",
description="Axis for symmetry",
items=symmetry_axis_items,
default="X")
decimate_min_face_count: IntProperty(name="Minimum Face Count",
description="Do not decimate objects with less faces",
default=500,
min=100,
step=10)
decimate_remove_shape_keys: BoolProperty(name="Remove Shape Keys",
description="Remove shape keys to allow meshes with shapes to be decimated",
default=False)
chop: BoolProperty(name="Chop Meshes",
description="Physically divide meshes based on size and point count",
default=False)
generate: BoolProperty(name="Generate",
description="Generate convex hulls or bounding boxes",
default=False)
merge: BoolProperty(name="Merge Selected",
description="On Export, merge selected meshes into a single object",
default=False)
## ======================================================================
class OmniSceneOptGeneratePropertiesMixin:
generate_duplicate: BoolProperty(name="Create Duplicate",
description="Generate a new object instead of replacing the original",
default=True)
generate_type: EnumProperty(name="Generate Type",
description="Type of geometry to generate",
items=generate_type_items,
default="CONVEX_HULL")
## ======================================================================
"""
This is a weird one.
The decimate modifier was failing on multiple objects in order, but
wrapping it in an Operator seems to fix the issues with making sure
the correct things are selected in the Context.
"""
class OBJECT_OT_omni_sceneopt_decimate(bpy.types.Operator, OmniSceneOptPropertiesMixin):
"""Decimates the selected object using the Decimation modifier."""
bl_idname = "omni_sceneopt.decimate"
bl_label = "Omni Scene Optimization: Decimate"
bl_options = {"REGISTER", "UNDO"}
ratio: IntProperty(name="Ratio",
subtype="PERCENTAGE",
description="Reduce face count to this percentage of original",
default=50,
min=10, max=100,
step=5)
use_symmetry: BoolProperty(name="Use Symmetry",
description="Decimate with Symmetry across an axis",
default=True)
symmetry_axis: EnumProperty(name="Symmetry Axis",
description="Axis for symmetry",
items=symmetry_axis_items,
default="X")
min_face_count: IntProperty(name="Minimum Face Count",
description="Do not decimate objects with less faces",
default=500,
min=100,
step=10)
@classmethod
def poll(cls, context:Context) -> bool:
return bool(context.active_object)
def execute(self, context:Context) -> Set[str]:
from .batch import lod
result = lod.decimate_object(context.active_object,
ratio=self.ratio / 100.0,
use_symmetry=self.use_symmetry,
symmetry_axis=self.symmetry_axis,
min_face_count=self.min_face_count,
create_duplicate=False)
return {"FINISHED"}
## ======================================================================
class OmniOverrideMixin:
def set_active(self, ob:Object):
try:
bpy.context.view_layer.objects.active = ob
except RuntimeError as e:
print(f"-- unable to set active: {ob.name} ({e}")
def override(self, objects:List[Object], single=False):
assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple"
assert len(objects), "'objects' cannot be empty"
## filter out objects not in current view layer
objects = list(filter(lambda x: x.name in bpy.context.view_layer.objects, objects))
if single:
objects = objects[0:1]
override = {
'active_object': objects[0],
'edit_object': None,
'editable_objects': objects,
'object': objects[0],
'objects_in_mode': [],
'objects_in_mode_unique_data': [],
'selectable_objects': objects,
'selected_editable_objects': objects,
'selected_objects': objects,
'visible_objects': objects,
}
self.set_active(objects[0])
return bpy.context.temp_override(**override)
def edit_override(self, objects:List[Object], single=False):
assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple"
assert len(objects), "'objects' cannot be empty"
if single:
objects = objects[0:1]
override = {
'active_object': objects[0],
'edit_object': objects[0],
'editable_objects': objects,
'object': objects[0],
'objects_in_mode': objects,
'objects_in_mode_unique_data': objects,
'selectable_objects': objects,
'selected_editable_objects': objects,
'selected_objects': objects,
'visible_objects': objects,
}
self.set_active(objects[0])
return bpy.context.temp_override(**override)
## ======================================================================
class OBJECT_OT_omni_sceneopt_optimize(bpy.types.Operator,
OmniSceneOptPropertiesMixin,
OmniSceneOptChopPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin,
OmniOverrideMixin):
"""Run specified optimizations on the scene or on selected objects."""
bl_idname = "omni_sceneopt.optimize"
bl_label = "Omni Scene Optimization: Optimize Scene"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def _object_mode(self):
if not bpy.context.mode == "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
def _edit_mode(self):
if not bpy.context.mode == "EDIT_MESH":
bpy.ops.object.mode_set(mode="EDIT")
@staticmethod
def _remove_shape_keys(ob:Object):
assert ob.type == "MESH", "Cannot be run on non-Mesh Objects."
## Reversed because we want to remove Basis last, or we will end up
## with garbage baked in.
for key in reversed(ob.data.shape_keys.key_blocks):
ob.shape_key_remove(key)
@staticmethod
def _select_one(ob:Object):
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
@staticmethod
def _select_objects(objects:List[Object]):
bpy.ops.object.select_all(action="DESELECT")
for item in objects:
item.select_set(True)
bpy.context.view_layer.objects.active = objects[-1]
@staticmethod
def _get_evaluated(objects:List[Object]) -> List[Object]:
deps = bpy.context.evaluated_depsgraph_get()
return [x.evaluated_get(deps).original for x in objects]
@staticmethod
def _total_vertex_count(target_objects:List[Object]):
deps = bpy.context.evaluated_depsgraph_get()
eval_objs = [x.evaluated_get(deps) for x in target_objects]
return sum([len(x.data.vertices) for x in eval_objs])
def do_validate(self, target_objects:List[Object]) -> List[Object]:
"""Expects to be run in Edit Mode with all meshes selected"""
total_orig = self._total_vertex_count(target_objects)
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.dissolve_degenerate()
total_result = self._total_vertex_count(target_objects)
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Validated {obj_count} object{plural}."
self.report({"INFO"}, message)
return target_objects
def do_weld(self, target_objects:List[Object]) -> List[Object]:
"""Expects to be run in Edit Mode with all meshes selected"""
bpy.ops.mesh.remove_doubles(threshold=self.weld_distance, use_unselected=True)
bpy.ops.mesh.normals_make_consistent(inside=False)
return target_objects
def do_unwrap(self, target_objects:List[Object]) -> List[Object]:
bpy.ops.object.select_all(action="DESELECT")
start = time.time()
for item in target_objects:
with self.edit_override([item]):
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(island_margin=0.0)
bpy.ops.uv.select_all(action="SELECT")
# bpy.ops.uv.average_islands_scale()
# bpy.ops.uv.pack_islands(margin=self.unwrap_margin)
bpy.ops.object.mode_set(mode="OBJECT")
end = time.time()
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Unwrapped {obj_count} object{plural} ({end-start:.02f} seconds)."
self.report({"INFO"}, message)
return target_objects
def do_decimate(self, target_objects:List[Object]) -> List[Object]:
assert bpy.context.mode == "OBJECT", "Decimate must be run in object mode."
total_orig = self._total_vertex_count(target_objects)
total_result = 0
start = time.time()
for item in target_objects:
if item.data.shape_keys and len(item.data.shape_keys.key_blocks):
if not self.decimate_remove_shape_keys:
self.report({"WARNING"}, f"[ Decimate ] Skipping {item.name} because it has shape keys.")
continue
else:
self._remove_shape_keys(item)
if len(item.data.polygons) < self.decimate_min_face_count:
self.report({"INFO"}, f"{item.name} is under face count-- not decimating.")
continue
## We're going to use the decimate modifier
mod = item.modifiers.new("OmniLOD", type="DECIMATE")
mod.decimate_type = "COLLAPSE"
mod.ratio = self.decimate_ratio / 100.0
mod.use_collapse_triangulate = True
mod.use_symmetry = self.decimate_use_symmetry
mod.symmetry_axis = self.decimate_symmetry_axis
## we don't need a full context override here
self.set_active(item)
bpy.ops.object.modifier_apply(modifier=mod.name)
total_result += len(item.data.vertices)
end = time.time()
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Decimated {obj_count} object{plural}. Vertex count original {total_orig} to {total_result} ({end-start:.02f} seconds)."
self.report({"INFO"}, message)
return target_objects
def do_chop(self, target_objects:List[Object]):
"""
Assumes all objects are selected and that we are in Object mode
"""
assert bpy.context.mode == "OBJECT", "Chop must be run in object mode."
scene = bpy.context.scene
attributes = scene.omni_sceneopt_chop.attributes()
attributes["selected_only"] = self.selected
bpy.ops.omni_sceneopt.chop(**attributes)
return target_objects
def do_generate(self, target_objects:List[Object]):
with self.override(target_objects):
bpy.ops.omni_sceneopt.generate(generate_type=self.generate_type,
generate_duplicate=self.generate_duplicate)
return target_objects
def execute(self, context:Context) -> Set[str]:
start = time.time()
active = context.active_object
if self.selected:
targets = selected_meshes(context.scene)
else:
targets = [x for x in context.scene.collection.all_objects if x.type == "MESH"]
bpy.ops.object.select_all(action="DESELECT")
[ x.select_set(True) for x in targets ]
if active:
self.set_active(active)
if not len(targets):
self.info({"ERROR"}, "No targets specified.")
return {"CANCELLED"}
self._object_mode()
## Have to do vertex counts outside edit mode!
total_orig = self._total_vertex_count(targets)
if self.validate or self.weld:
with self.edit_override(targets):
bpy.ops.object.mode_set(mode="EDIT")
## We can run these two operations together because they don't collide
## or cause issues between each other.
if self.validate:
self.do_validate(targets)
if self.weld:
self.do_weld(targets)
## Unfortunately, the rest are object-by-object operations
self._object_mode()
total_result = self._total_vertex_count(targets)
if self.verbose and self.weld:
plural, obj_count = get_plural_count(targets)
message = f"Welded {obj_count} object{plural}. Vertex count original {total_orig} to {total_result}."
self.report({"INFO"}, message)
if self.unwrap:
self.do_unwrap(targets)
if self.decimate:
self.do_decimate(targets)
if self.chop:
self.do_chop(targets)
if self.generate:
self.do_generate(targets)
end = time.time()
if self.verbose:
self.report({"INFO"}, f"Optimization complete-- process took {end-start:.02f} seconds")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_chop(bpy.types.Operator, OmniSceneOptChopPropertiesMixin):
"""Chop the specified object into a grid of smaller ones"""
bl_idname = "omni_sceneopt.chop"
bl_label = "Omni Scene Optimizer: Chop"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def execute(self, context:Context) -> Set[str]:
attributes = dict(
merge=self.merge,
cut_meshes=self.cut_meshes,
max_vertices=self.max_vertices,
min_box_size=self.min_box_size,
max_depth=self.max_depth,
print_updated_results=self.print_updated_results,
create_bounds=self.create_bounds,
selected_only=self.selected_only
)
from .scripts.chop import Chop
chopper = Chop()
chopper.execute(self.attributes())
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_generate(bpy.types.Operator, OmniSceneOptGeneratePropertiesMixin, OmniOverrideMixin):
"""Generate geometry based on selected objects. Currently supported: Bounding Box, Convex Hull"""
bl_idname = "omni_sceneopt.generate"
bl_label = "Omni Scene Optimizer: Generate"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def create_geometry_nodes_group(self, group:NodeTree):
"""Create or return the shared Generate node group."""
node_type = {
"CONVEX_HULL": "GeometryNodeConvexHull",
"BOUNDING_BOX": "GeometryNodeBoundBox",
}[self.generate_type]
geometry_input = group.nodes["Group Input"]
geometry_input.location = Vector((-1.5 * geometry_input.width, 0))
group_output = group.nodes["Group Output"]
group_output.location = Vector((1.5 * group_output.width, 0))
node = group.nodes.new(node_type)
node.name = "Processor"
group.links.new(geometry_input.outputs['Geometry'], node.inputs['Geometry'])
group.links.new(node.outputs[0], group_output.inputs['Geometry'])
return bpy.data.node_groups[generate_name]
def create_geometry_nodes_modifier(self, ob:Object) -> Modifier:
if generate_name in ob.modifiers:
ob.modifiers.remove(ob.modifiers[generate_name])
if generate_name in bpy.data.node_groups:
bpy.data.node_groups.remove(bpy.data.node_groups[generate_name])
mod = ob.modifiers.new(name=generate_name, type="NODES")
bpy.ops.node.new_geometry_node_group_assign()
mod.node_group.name = generate_name
self.create_geometry_nodes_group(mod.node_group)
return mod
def create_duplicate(self, ob:Object, token:str) -> Object:
from .batch import lod
duplicate = lod.duplicate_object(ob, token, weld=False)
return duplicate
@preserve_selection
def apply_modifiers(self, target_objects:List[Object]):
count = 0
for item in target_objects:
if self.generate_duplicate:
token = self.generate_type.rpartition("_")[-1]
duplicate = self.create_duplicate(item, token=token)
duplicate.parent = item.parent
duplicate.matrix_world = item.matrix_world.copy()
bpy.context.scene.collection.objects.unlink(duplicate)
for collection in item.users_collection:
collection.objects.link(duplicate)
item = duplicate
with self.override([item]):
mod = self.create_geometry_nodes_modifier(item)
bpy.context.view_layer.objects.active = item
item.select_set(True)
bpy.ops.object.modifier_apply(modifier=mod.name)
count += 1
def execute(self, context:Context) -> Set[str]:
changed = self.apply_modifiers(context.selected_objects)
if changed:
group = bpy.data.node_groups["OMNI_SCENEOPT_GENERATE"]
bpy.data.node_groups.remove(group)
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_progress(bpy.types.Operator):
bl_idname = "omni.progress"
bl_label = "Export Optimized USD"
bl_options = {"REGISTER", "UNDO"}
message: StringProperty(name="message",
description="Message to print upon completion.",
default="")
_timer = None
def modal(self, context:Context, event:Event) -> Set[str]:
if context.scene.omni_progress_active is False:
message = self.message.strip()
if len(message):
self.report({"INFO"}, message)
return {"FINISHED"}
context.area.tag_redraw()
context.window.cursor_set("WAIT")
return {"RUNNING_MODAL"}
def invoke(self, context:Context, event:Event) -> Set[str]:
context.scene.omni_progress_active = True
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
context.window.cursor_set("WAIT")
return {"RUNNING_MODAL"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_export(bpy.types.Operator,
OmniSceneOptPropertiesMixin,
OmniSceneOptChopPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin):
"""Runs specified optimizations on the scene before running a USD Export"""
bl_idname = "omni_sceneopt.export"
bl_label = "Export USD"
bl_options = {"REGISTER", "UNDO"}
filepath: StringProperty(subtype="FILE_PATH")
filter_glob: StringProperty(default="*.usd;*.usda;*.usdc", options={"HIDDEN"})
check_existing: BoolProperty(default=True, options={"HIDDEN"})
def draw(self, context:Context):
"""Empty draw to disable the Operator Props Panel."""
pass
def invoke(self, context:Context, event:Event) -> Set[str]:
if len(self.filepath.strip()) == 0:
self.filepath = "untitled.usdc"
context.window_manager.fileselect_add(self)
return {"RUNNING_MODAL"}
def execute(self, context:Context) -> Set[str]:
output_path = bpy.path.abspath(self.filepath)
script_path = os.sep.join((os.path.dirname(os.path.abspath(__file__)), "batch", "optimize_export.py"))
bpy.ops.omni.progress(message=f"Finished background write to {output_path}")
bpy.ops.wm.save_mainfile()
command = " ".join([
'"{}"'.format(bpy.app.binary_path),
"--background",
'"{}"'.format(bpy.data.filepath),
"--python",
'"{}"'.format(script_path),
"--",
'"{}"'.format(output_path)
])
print(command)
subprocess.check_output(command, shell=True)
context.scene.omni_progress_active = False
if self.verbose:
self.report({"INFO"}, f"Exported optimized scene to: {output_path}")
return {"FINISHED"}
## ======================================================================
classes = [
OBJECT_OT_omni_sceneopt_decimate,
OBJECT_OT_omni_sceneopt_chop,
OBJECT_OT_omni_sceneopt_generate,
OBJECT_OT_omni_sceneopt_optimize,
OBJECT_OT_omni_progress,
OBJECT_OT_omni_sceneopt_export,
chopProperties
]
def unregister():
try:
del bpy.types.Scene.omni_sceneopt_chop
except AttributeError:
pass
try:
del bpy.types.Scene.omni_progress_active
except AttributeError:
pass
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except (ValueError, AttributeError, RuntimeError):
continue
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_sceneopt_chop = bpy.props.PointerProperty(type=chopProperties)
bpy.types.Scene.omni_progress_active = bpy.props.BoolProperty(default=False)
| 23,131 | Python | 30.687671 | 134 | 0.67001 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/panel.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.types import Panel
from os.path import join, dirname
import bpy.utils.previews
#---------------Custom ICONs----------------------
def get_icons_directory():
icons_directory = join(dirname(__file__), "icons")
return icons_directory
class OPTIMIZE_PT_Panel(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "OPTIMIZE SCENE"
bl_category = "Omniverse"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
icons.load("GEAR", join(icons_directory, "gear.png"), 'IMAGE')
def draw(self, context):
layout = self.layout
layout.label(text="Omniverse", icon_value=self.icons["OMNI"].icon_id)
optimizeOptions = context.scene.optimize_options
modifyOptions = context.scene.modify_options
uvOptions = context.scene.uv_options
chopOptions = context.scene.chop_options
# OPERATOR SETTINGS
box = layout.box()
col = box.column(align= True)
row = col.row(align=True)
row.scale_y = 1.5
row.operator("optimize.scene", text = "Optimize Scene", icon_value=self.icons["GEAR"].icon_id)
col.separator()
row2 = col.row(align=True)
row2.scale_y = 1.3
row2.prop(optimizeOptions, "operation", text="Operation")
col.separator()
col.prop(optimizeOptions, "print_attributes", expand= True)
box2 = layout.box()
box2.label(text= "OPERATION PROPERTIES:")
col2 = box2.column(align= True)
# MODIFY SETTINGS
if optimizeOptions.operation == 'modify':
row = col2.row(align= True)
row.prop(modifyOptions, "modifier", text="Modifier")
row2 = col2.row(align= True)
row3 = col2.row(align= True)
#DECIMATE
if modifyOptions.modifier == 'DECIMATE':
row2.prop(modifyOptions, "decimate_type", expand= True)
if modifyOptions.decimate_type == 'COLLAPSE':
row3.prop(modifyOptions, "ratio", expand= True)
elif modifyOptions.decimate_type == 'UNSUBDIV':
row3.prop(modifyOptions, "iterations", expand= True)
elif modifyOptions.decimate_type == 'DISSOLVE':
row3.prop(modifyOptions, "angle", expand= True)
#REMESH
elif modifyOptions.modifier == 'REMESH':
row2.prop(modifyOptions, "remesh_type", expand= True)
if modifyOptions.remesh_type == 'BLOCKS':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'SMOOTH':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'SHARP':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'VOXEL':
row3.prop(modifyOptions, "voxel_size", expand= True)
#NODES
elif modifyOptions.modifier == 'NODES':
row2.prop(modifyOptions, "geo_type")
if modifyOptions.geo_type == "GeometryNodeSubdivisionSurface":
row2.prop(modifyOptions, "geo_attribute", expand= True)
col2.prop(modifyOptions, "selected_only", expand= True)
col2.prop(modifyOptions, "apply_mod", expand= True)
box3 = col2.box()
col3 = box3.column(align=True)
col3.label(text="FIX MESH BEFORE MODIFY")
col3.prop(modifyOptions, "fix_bad_mesh", expand= True)
if modifyOptions.fix_bad_mesh:
col3.prop(modifyOptions, "dissolve_threshold", expand= True)
col3.prop(modifyOptions, "merge_vertex", expand= True)
if modifyOptions.merge_vertex:
col3.prop(modifyOptions, "merge_threshold", expand= True)
if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex:
col3.prop(modifyOptions, "remove_existing_sharp", expand= True)
col3.prop(modifyOptions, "fix_normals", expand= True)
if modifyOptions.fix_normals:
col3.prop(modifyOptions, "create_new_custom_normals", expand= True)
# use_modifier_stack= modifyOptions.use_modifier_stack,
# modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]],
# FIX MESH SETTINGS
elif optimizeOptions.operation == 'fixMesh':
col2.prop(modifyOptions, "selected_only", expand= True)
col3 = col2.column(align=True)
col3.prop(modifyOptions, "fix_bad_mesh", expand= True)
if modifyOptions.fix_bad_mesh:
col3.prop(modifyOptions, "dissolve_threshold", expand= True)
col3.prop(modifyOptions, "merge_vertex", expand= True)
if modifyOptions.merge_vertex:
col3.prop(modifyOptions, "merge_threshold", expand= True)
if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex:
col3.prop(modifyOptions, "remove_existing_sharp", expand= True)
col3.prop(modifyOptions, "fix_normals", expand= True)
if modifyOptions.fix_normals:
col3.prop(modifyOptions, "create_new_custom_normals", expand= True)
# UV SETTINGS
elif optimizeOptions.operation == 'uv':
if uvOptions.unwrap_type == 'Smart':
col2.label(text= "SMART UV CAN BE SLOW", icon='ERROR')
else:
col2.label(text= "Unwrap Type")
col2.prop(uvOptions, "unwrap_type", expand= True)
col2.prop(uvOptions, "selected_only", expand= True)
col2.prop(uvOptions, "scale_to_bounds", expand= True)
col2.prop(uvOptions, "clip_to_bounds", expand= True)
col2.prop(uvOptions, "use_set_size", expand= True)
if uvOptions.use_set_size:
col2.prop(uvOptions, "set_size", expand= True)
col2.prop(uvOptions, "print_updated_results", expand= True)
# CHOP SETTINGS
elif optimizeOptions.operation == 'chop':
col2.prop(chopOptions, "selected_only", expand= True)
col2.prop(chopOptions, "cut_meshes", expand= True)
col2.prop(chopOptions, "max_vertices", expand= True)
col2.prop(chopOptions, "min_box_size", expand= True)
col2.prop(chopOptions, "max_depth", expand= True)
col2.prop(chopOptions, "merge", expand= True)
col2.prop(chopOptions, "create_bounds", expand= True)
col2.prop(chopOptions, "print_updated_results", expand= True) | 7,603 | Python | 44.532934 | 102 | 0.605682 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/properties.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from typing import *
from bpy.props import *
import bpy
class optimizeProperties(bpy.types.PropertyGroup):
# PROPERTIES
operation: EnumProperty(
name="Operation",
items= [ ('modify', 'MODIFY', 'run modify'),
('fixMesh', 'FIX MESH', 'run fix Mesh'),
('uv', 'UV UNWRAP', "run uv"),
('chop', 'CHOP', 'run chop')],
description= "Choose the operation to run on the scene",
default = 'modify'
)
print_attributes: BoolProperty(
name ="Print Attributes",
description = "Print attributes used at the begging of operation",
default = False
)
class modProperties(bpy.types.PropertyGroup):
# PROPERTIES
selected_only: BoolProperty(
name ="Use Selected Only",
description = "Operate on selected objects only",
default = False
)
apply_mod: BoolProperty(
name ="Apply Modifier",
description = "Apply modifier after adding",
default = True
)
fix_bad_mesh: BoolProperty(
name ="Fix Bad Mesh",
description = "Remove zero area faces and zero length edges",
default = False
)
dissolve_threshold: FloatProperty(
name="Dissolve Threshold",
description = "Threshold value used with Fix Bad Mesh",
default=0.08,
min=0,
max=50
)
merge_vertex: BoolProperty(
name ="Merge Vertex",
description = "Merge vertices by distance",
default = False
)
merge_threshold: FloatProperty(
name="Merge Threshold",
description = "Distance value used with merge vertex",
default=0.01,
min=0,
max=50
)
remove_existing_sharp: BoolProperty(
name ="Remove Existing Sharp",
description = "Remove existing sharp edges from meshes. This helps sometimes after fixing bad meshes",
default = True
)
fix_normals: BoolProperty(
name ="Fix Normals",
description = "Remove existing custom split normals",
default = False
)
create_new_custom_normals: BoolProperty(
name ="Create New Custom Normals",
description = "Create new custom split normals",
default = False
)
# Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL'
modifier: EnumProperty(
name="Modifier",
items= [ ('DECIMATE', 'Decimate', 'decimate geometry'),
('REMESH', 'Remesh', 'remesh geometry'),
('NODES', 'Nodes', 'add geometry node mod'),
('FIX', 'Fix Mesh', "fix mesh")],
description= "Choose the modifier to apply to geometry",
default = 'DECIMATE'
)
# TODO: Implement this modifier stack properly. would allow for multiple modifiers to be queued and run at once
# use_modifier_stack: BoolProperty(
# name ="Use Modifier Stack",
# description = "use stack of modifiers instead of a single modifier",
# default = False
# )
# modifier_stack: CollectionProperty(
# type= optimizeProperties,
# name="Modifiers",
# description= "list of modifiers to be used",
# default = [["DECIMATE", "COLLAPSE", 0.5]]
# )
decimate_type: EnumProperty(
items= [ ('COLLAPSE','collapse',"collapse geometry"),
('UNSUBDIV','unSubdivide',"un subdivide geometry"),
('DISSOLVE','planar',"dissolve geometry")],
description = "Choose which type of decimation to perform.",
default = "COLLAPSE"
)
ratio: FloatProperty(
name="Ratio",
default=0.5,
min=0.0,
max=1.0
)
iterations: IntProperty(
name="Iterations",
default=2,
min=0,
max=50
)
angle: FloatProperty(
name="Angle",
default=15.0,
min=0.0,
max=180.0
)
remesh_type: EnumProperty(
items= [ ('BLOCKS','blocks',"collapse geometry"),
('SMOOTH','smooth',"un subdivide geometry"),
('SHARP','sharp',"un subdivide geometry"),
('VOXEL','voxel',"dissolve geometry")],
description = "Choose which type of remesh to perform.",
default = "VOXEL"
)
oDepth: IntProperty(
name="Octree Depth",
default=4,
min=1,
max=8
)
voxel_size: FloatProperty(
name="Voxel Size",
default=0.1,
min=0.01,
max=2.0
)
geo_type: EnumProperty(
items= [ ('GeometryNodeConvexHull','convex hull',"basic convex hull"),
('GeometryNodeBoundBox','bounding box',"basic bounding box"),
('GeometryNodeSubdivisionSurface','subdiv',"subdivide geometry")],
description = "Choose which type of geo node tree to add",
default = "GeometryNodeBoundBox"
)
geo_attribute: IntProperty(
name="Attribute",
description = "Additional attribute used for certain geo nodes",
default=2,
min=0,
max=8
)
class uvProperties(bpy.types.PropertyGroup):
# PROPERTIES
selected_only: BoolProperty(
name ="Use Selected Only",
description = "Operate on selected objects only",
default = False
)
unwrap_type: EnumProperty(
items= [ ('Cube','cube project',"basic convex hull"),
('Sphere','sphere project',"subdivide geometry"),
('Cylinder','cylinder project',"dissolve geometry"),
('Smart','smart project',"basic bounding box")],
description = "Choose which type of unwrap process to use.",
default = "Cube"
)
scale_to_bounds: BoolProperty(
name ="Scale To Bounds",
description = "Scale UVs to 2D bounds",
default = False
)
clip_to_bounds: BoolProperty(
name ="Clip To Bounds",
description = "Clip UVs to 2D bounds",
default = False
)
use_set_size: BoolProperty(
name ="Use Set Size",
description = "Use a defined UV size for all objects",
default = False
)
set_size : FloatProperty(
name="Set Size",
default=2.0,
min=0.01,
max=100.0
)
print_updated_results: BoolProperty(
name ="Print Updated Results",
description = "Print updated results to console",
default = True
)
class OmniSceneOptChopPropertiesMixin:
selected_only: BoolProperty(
name="Split Selected Only",
description="Operate on selected objects only",
default=False
)
print_updated_results: BoolProperty(
name="Print Updated Results",
description="Print updated results to console",
default=True
)
cut_meshes: BoolProperty(
name="Cut Meshes",
description="Cut meshes",
default=True
)
merge: BoolProperty(
name="Merge",
description="Merge split chunks after splitting is complete",
default=False
)
create_bounds: BoolProperty(
name="Create Boundary Objects",
description="Add generated boundary objects to scene",
default=False
)
max_depth: IntProperty(
name="Max Depth",
description="Maximum recursion depth",
default=8,
min=0,
max=32
)
max_vertices: IntProperty(
name="Max Vertices",
description="Maximum vertices allowed per block",
default=10000,
min=0,
max=1000000
)
min_box_size: FloatProperty(
name="Min Box Size",
description="Minimum dimension for a chunk to be created",
default=1,
min=0,
max=10000
)
def attributes(self) -> Dict:
return dict(
merge=self.merge,
cut_meshes=self.cut_meshes,
max_vertices=self.max_vertices,
min_box_size=self.min_box_size,
max_depth=self.max_depth,
print_updated_results=self.print_updated_results,
create_bounds=self.create_bounds,
selected_only=self.selected_only
)
def set_attributes(self, attributes:Dict):
for attr, value in attributes.items():
if hasattr(self, attr):
setattr(self, attr, value)
else:
raise ValueError(f"OmniSceneOptChopPropertiesMixin: invalid attribute for set {attr}")
class chopProperties(bpy.types.PropertyGroup, OmniSceneOptChopPropertiesMixin):
pass
| 9,344 | Python | 27.842593 | 115 | 0.601241 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/ui.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
from typing import *
import bpy
from bpy.utils import previews
from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty)
from bpy.types import (Context, Object, Operator, Scene)
from .operators import (
OBJECT_OT_omni_sceneopt_optimize,
OBJECT_OT_omni_sceneopt_export,
OmniSceneOptPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin,
selected_meshes,
symmetry_axis_items
)
## ======================================================================
def preload_icons() -> previews.ImagePreviewCollection:
"""Preload icons used by the interface."""
icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons")
all_icons = {
"GEAR": "gear.png",
"ICON": "ICON.png",
}
preview = previews.new()
for name, filepath in all_icons.items():
preview.load(name, os.path.join(icons_directory, filepath), "IMAGE")
return preview
## ======================================================================
class OmniSceneOptProperties(bpy.types.PropertyGroup,
OmniSceneOptPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin):
"""We're only here to register the mixins through the PropertyGroup"""
pass
## ======================================================================
def can_run_optimization(scene:Scene) -> bool:
if scene.omni_sceneopt.selected and not len(selected_meshes(scene)):
return False
has_operations = any((
scene.omni_sceneopt.validate,
scene.omni_sceneopt.weld,
scene.omni_sceneopt.decimate,
scene.omni_sceneopt.unwrap,
scene.omni_sceneopt.chop,
scene.omni_sceneopt.generate,
))
if not has_operations:
return False
return True
## ======================================================================
class OBJECT_PT_OmniOptimizationPanel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Scene Optimizer"
bl_options = {"DEFAULT_CLOSED"}
icons = preload_icons()
@staticmethod
def _apply_parameters(settings, op:Operator):
"""Copy parameters from the scene-level settings blob to an operator"""
invalid = {"bl_rna", "name", "rna_type"}
for property_name in filter(lambda x: not x[0] == '_' and not x in invalid, dir(settings)):
if hasattr(op, property_name):
value = getattr(settings, property_name)
setattr(op, property_name, value)
op.verbose = True
def draw_validate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "validate")
def draw_weld(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "weld")
if not scene.omni_sceneopt.weld:
return
box.prop(scene.omni_sceneopt, "weld_distance")
def draw_decimate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "decimate")
if not scene.omni_sceneopt.decimate:
return
box.prop(scene.omni_sceneopt, "decimate_ratio")
box.prop(scene.omni_sceneopt, "decimate_min_face_count")
row = box.row()
row.prop(scene.omni_sceneopt, "decimate_use_symmetry")
row = row.row()
row.prop(scene.omni_sceneopt, "decimate_symmetry_axis", text="")
row.enabled = scene.omni_sceneopt.decimate_use_symmetry
box.prop(scene.omni_sceneopt, "decimate_remove_shape_keys")
def draw_unwrap(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "unwrap")
if not scene.omni_sceneopt.unwrap:
return
box.prop(scene.omni_sceneopt, "unwrap_margin")
def draw_chop(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "chop")
if not scene.omni_sceneopt.chop:
return
col = box.column(align=True)
col.prop(scene.omni_sceneopt_chop, "max_vertices")
col.prop(scene.omni_sceneopt_chop, "min_box_size")
col.prop(scene.omni_sceneopt_chop, "max_depth")
box.prop(scene.omni_sceneopt_chop, "create_bounds")
def draw_generate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "generate", text="Generate Bounding Mesh")
if not scene.omni_sceneopt.generate:
return
col = box.column(align=True)
col.prop(scene.omni_sceneopt, "generate_type")
col.prop(scene.omni_sceneopt, "generate_duplicate")
def draw_operators(self, layout, context:Context, scene:Scene):
layout.label(text="")
row = layout.row(align=True)
row.label(text="Run Operations", icon="PLAY")
row.prop(scene.omni_sceneopt, "selected", text="Selected Meshes Only")
run_text = f"{'Selected' if scene.omni_sceneopt.selected else 'Scene'}"
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_sceneopt_optimize.bl_idname,
text=f"Optimize {run_text}",
icon_value=self.icons["GEAR"].icon_id)
self._apply_parameters(scene.omni_sceneopt, op)
col.enabled = can_run_optimization(scene)
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_sceneopt_export.bl_idname,
text=f"Export Optimized Scene to USD",
icon='EXPORT')
self._apply_parameters(scene.omni_sceneopt, op)
col.label(text="Export Options")
row = col.row(align=True)
row.prop(scene.omni_sceneopt, "merge")
row.prop(scene.omni_sceneopt, "export_textures")
def draw(self, context:Context):
scene = context.scene
layout = self.layout
self.draw_validate(layout, scene=scene)
self.draw_weld(layout, scene=scene)
self.draw_unwrap(layout, scene=scene)
self.draw_decimate(layout, scene=scene)
self.draw_chop(layout, scene=scene)
self.draw_generate(layout, scene=scene)
self.draw_operators(layout, context, scene=scene)
## ======================================================================
classes = [
OBJECT_PT_OmniOptimizationPanel,
OmniSceneOptProperties,
]
def unregister():
try:
del bpy.types.Scene.omni_sceneopt
except (ValueError, AttributeError, RuntimeError):
pass
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except (ValueError, AttributeError, RuntimeError):
continue
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_sceneopt = bpy.props.PointerProperty(type=OmniSceneOptProperties)
| 6,169 | Python | 27.302752 | 94 | 0.677906 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/lod.py | import argparse
import os
import sys
from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
## ======================================================================
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
## --------------------------------------------------------------------------------
def _selected_meshes(context:Context, use_instancing=True) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
## instances support
meshes = [x for x in context.selected_objects if x.type == "MESH"]
instances = [x for x in context.selected_objects if x.type == "EMPTY" and x.instance_collection]
if use_instancing:
for inst in instances:
instance_meshes = [x for x in inst.instance_collection.all_objects if x.type == "MESH"]
meshes += instance_meshes
meshes = list(set(meshes))
return meshes
## --------------------------------------------------------------------------------
def copy_object_parenting(source_ob:Object, target_ob:Object):
"""
Copy parenting and Collection membership from a source object.
"""
target_collections = list(target_ob.users_collection)
for collection in target_collections:
collection.objects.unlink(target_ob)
for collection in source_ob.users_collection:
collection.objects.link(target_ob)
target_ob.parent = source_ob.parent
## --------------------------------------------------------------------------------
def find_unique_name(name:str, library:Iterable) -> str:
"""
Given a Blender library, find a unique name that does
not exist in it.
"""
if not name in library:
return name
index = 0
result_name = name + f".{index:03d}"
while result_name in library:
index += 1
result_name = name + f".{index:03d}"
print(f"Unique Name: {result_name}")
return result_name
## --------------------------------------------------------------------------------
def duplicate_object(ob:Object, token:str="D", weld=True) -> Object:
"""
Duplicates the specified object, maintaining the same parenting
and collection memberships.
"""
base_name = "__".join((ob.name.rpartition("__")[0] if "__" in ob.name else ob.name, token))
base_data = "__".join((ob.data.name.rpartition("__")[0] if "__" in ob.data.name else ob.data.name, token))
if base_name in bpy.data.objects:
base_name = find_unique_name(base_name, bpy.data.objects)
if base_data in bpy.data.objects:
base_data = find_unique_name(base_data, bpy.data.objects)
data = ob.data.copy()
data.name = base_data
duplicate = bpy.data.objects.new(base_name, data)
## Ensure scene collection membership
## Prototypes might not have this or be in the view layer
if not duplicate.name in bpy.context.scene.collection.all_objects:
bpy.context.scene.collection.objects.link(duplicate)
select_only(duplicate)
## decimate doesn't work on unwelded triangle soups
if weld:
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True)
bpy.ops.object.mode_set(mode="OBJECT")
return duplicate
## --------------------------------------------------------------------------------
def delete_mesh_object(ob:Object):
"""
Removes object from the Blender library.
"""
base_name = ob.name
data_name = ob.data.name
bpy.data.objects.remove(bpy.data.objects[base_name])
bpy.data.meshes.remove(bpy.data.meshes[data_name])
## --------------------------------------------------------------------------------
def decimate_object(ob:Object, token:str=None, ratio:float=0.5,
use_symmetry:bool=False, symmetry_axis="X",
min_face_count:int=3,
create_duplicate=True):
old_mode = bpy.context.mode
scene = bpy.context.scene
token = token or "DCM"
if create_duplicate:
target = duplicate_object(ob, token=token)
else:
target = ob
if len(target.data.polygons) < min_face_count:
print(f"{target.name} is under face count-- not decimating.")
return target
## We're going to use the decimate modifier
mod = target.modifiers.new("OmniLOD", type="DECIMATE")
mod.decimate_type = "COLLAPSE"
mod.ratio = ratio
mod.use_collapse_triangulate = True
mod.use_symmetry = use_symmetry
mod.symmetry_axis = symmetry_axis
bpy.ops.object.select_all(action="DESELECT")
target.select_set(True)
bpy.context.view_layer.objects.active = target
bpy.ops.object.modifier_apply(modifier=mod.name)
return target
## --------------------------------------------------------------------------------
def decimate_selected(ratios:List[float]=[0.5], min_face_count=3, use_symmetry:bool=False, symmetry_axis="X", use_instancing=True):
assert isinstance(ratios, (list, tuple)), "Ratio should be a list of floats from 0.1 to 1.0"
for value in ratios:
assert 0.1 <= value <= 1.0, f"Invalid ratio value {value} -- should be between 0.1 and 1.0"
selected_objects = list(bpy.context.selected_objects)
active = bpy.context.view_layer.objects.active
selected_meshes = _selected_meshes(bpy.context, use_instancing=use_instancing)
total = len(selected_meshes) * len(ratios)
count = 1
print(f"\n\n[ Generating {total} decimated LOD meshes (minimum face count: {min_face_count}]")
for mesh in selected_meshes:
welded_duplicate = duplicate_object(mesh, token="welded")
for index, ratio in enumerate(ratios):
padd = len(str(total)) - len(str(count))
token = f"LOD{index}"
orig_count = len(welded_duplicate.data.vertices)
lod_duplicate = decimate_object(welded_duplicate, ratio=ratio, token=token, use_symmetry=use_symmetry,
symmetry_axis=symmetry_axis, min_face_count=min_face_count)
print(f"[{'0'*padd}{count}/{total}] Decimating {mesh.name} to {ratio} ({orig_count} >> {len(lod_duplicate.data.vertices)}) ...")
copy_object_parenting(mesh, lod_duplicate)
count += 1
delete_mesh_object(welded_duplicate)
print(f"\n[ Decimation complete ]\n\n")
## --------------------------------------------------------------------------------
def import_usd_file(filepath:str, root_prim:Optional[str]=None, visible_only:bool=False, use_instancing:bool=True):
all_objects = bpy.context.scene.collection.all_objects
names = [x.name for x in all_objects]
try:
bpy.ops.object.mode_set(mode="OBJECT")
except RuntimeError:
pass
for name in names:
ob = bpy.data.objects[name]
bpy.data.objects.remove(ob)
kwargs = {
"filepath":filepath,
"import_cameras": False,
"import_curves": False,
"import_lights": False,
"import_materials": True,
"import_blendshapes": False,
"import_volumes": False,
"import_skeletons": False,
"import_shapes": False,
"import_instance_proxies": True,
"import_visible_only": visible_only,
"read_mesh_uvs": True,
"read_mesh_colors": False,
"use_instancing": use_instancing,
"validate_meshes": True,
}
if root_prim:
## if you end with a slash it fails
kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim
bpy.ops.wm.usd_import(**kwargs)
print(f"Imported USD file: {filepath}")
## --------------------------------------------------------------------------------
def export_usd_file(filepath:str, use_instancing:bool=True):
kwargs = {
"filepath":filepath,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
"generate_preview_surface": True,
"export_materials": True,
"export_uvmaps": True,
"merge_transform_and_shape": True,
"use_instancing": use_instancing,
}
bpy.ops.wm.usd_export(**kwargs)
print(f"Wrote USD file with UVs: {filepath}")
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="Path to input USD file")
parser.add_argument('--output', type=str, help="Path to output USD file (default is input_LOD.usd)")
parser.add_argument('--ratios', type=str, required=True, help='Ratios to use as a space-separated string, ex: "0.5 0.2"')
parser.add_argument('--use_symmetry', action="store_true", default=False, help="Decimate with symmetry enabled.")
parser.add_argument('--symmetry_axis', default="X", help="Symmetry axis to use (X, Y, or Z)")
parser.add_argument('--visible_only', action="store_true", default=False, help="Only import visible prims from the input USD file.")
parser.add_argument('--min_face_count', type=int, default=3, help="Minimum number of faces for decimation.")
parser.add_argument('--no_instancing', action="store_false", help="Process the prototype meshes of instanced prims.")
parser.add_argument('--root_prim', type=str, default=None,
help="Root Prim to import. If unspecified, the whole file will be imported.")
if not len(real_args):
parser.print_help()
sys.exit(1)
args = parser.parse_args(real_args)
input_file = os.path.abspath(args.input)
split = input_file.rpartition(".")
output_path = args.output or (split[0] + "_LOD." + split[-1])
ratios = args.ratios
if not " " in ratios:
ratios = [float(ratios)]
else:
ratios = list(map(lambda x: float(x), ratios.split(" ")))
use_instancing = not args.no_instancing
import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only, use_instancing=use_instancing)
bpy.ops.object.select_all(action="SELECT")
decimate_selected(ratios=ratios, min_face_count=args.min_face_count, use_symmetry=args.use_symmetry, symmetry_axis=args.symmetry_axis, use_instancing=use_instancing)
export_usd_file(output_path, use_instancing=use_instancing)
sys.exit(0)
| 9,912 | Python | 32.94863 | 166 | 0.64659 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/optimize_export.py | import os
import sys
import time
import bpy
from omni_optimization_panel.operators import OmniOverrideMixin
omniover = OmniOverrideMixin()
## ======================================================================
def perform_scene_merge():
"""
Combine all selected mesh objects into a single mesh.
"""
orig_scene = bpy.context.scene
selected = [x for x in bpy.context.selected_objects if x.type == "MESH"]
if not len(selected):
print("-- No objects selected for merge.")
return
merge_collection = bpy.data.collections.new("MergeCollection") if not "MergeCollection" in bpy.data.collections else bpy.data.collections["MergeCollection"]
merge_scene = bpy.data.scenes.new("MergeScene") if not "MergeScene" in bpy.data.scenes else bpy.data.scenes["MergeScene"]
for child in merge_scene.collection.children:
merge_scene.collection.children.unlink(child)
for ob in merge_collection.all_objects:
merge_collection.objects.unlink(ob)
to_merge = set()
sources = set()
for item in selected:
to_merge.add(item)
merge_collection.objects.link(item)
if not item.instance_type == "NONE":
item.show_instancer_for_render = True
child_set = set(item.children)
to_merge |= child_set
sources |= child_set
merge_scene.collection.children.link(merge_collection)
bpy.context.window.scene = merge_scene
for item in to_merge:
try:
merge_collection.objects.link(item)
except RuntimeError:
continue
## make sure to remove shape keys and merge modifiers for all merge_collection objects
for item in merge_collection.all_objects:
with omniover.override([item], single=True):
if item.data.shape_keys:
bpy.ops.object.shape_key_remove(all=True, apply_mix=True)
for mod in item.modifiers:
bpy.ops.object.modifier_apply(modifier=mod.name, single_user=True)
## turns out the make_duplis_real function swaps selection for you, and
## leaves non-dupli objects selected
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.duplicates_make_real()
## this invert and delete is removing the old instancer objects
bpy.ops.object.select_all(action="INVERT")
for item in sources:
item.select_set(True)
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_all(action="SELECT")
## need an active object for join poll()
bpy.context.view_layer.objects.active = bpy.context.selected_objects[0]
bpy.ops.object.join()
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
if not len(real_args):
print("-- No output path name.")
sys.exit(-1)
output_file = real_args[-1]
## make sure the add-on is properly loaded
bpy.ops.preferences.addon_enable(module="omni_optimization_panel")
start_time = time.time()
## pull all attribute names from all mixins for passing on to the optimizer
sceneopts = bpy.context.scene.omni_sceneopt
chopopts = bpy.context.scene.omni_sceneopt_chop
skips = {"bl_rna", "name", "rna_type"}
optimize_kwargs = {}
for item in sceneopts, chopopts:
for key in filter(lambda x: not x.startswith("__") and not x in skips, dir(item)):
optimize_kwargs[key] = getattr(item, key)
print(f"optimize kwargs: {optimize_kwargs}")
if sceneopts.merge:
## merge before because of the possibility of objects getting created
perform_scene_merge()
bpy.ops.wm.save_as_mainfile(filepath=output_file.rpartition(".")[0]+".blend")
## always export whole scene
optimize_kwargs["selected"] = False
optimize_kwargs["verbose"] = True
bpy.ops.omni_sceneopt.optimize(**optimize_kwargs)
optimize_time = time.time()
print(f"Optimization time: {(optimize_time - start_time):.2f} seconds.")
export_kwargs = {
"filepath": output_file,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
"material_prim_path": "/World/materials",
"generate_preview_surface": True,
"export_materials": True,
"export_uvmaps": True,
"merge_transform_and_shape": True,
"use_instancing": True,
"export_textures": sceneopts.export_textures,
}
bpy.ops.wm.usd_export(**export_kwargs)
export_time = time.time()
print(f"Wrote optimized USD file: {output_file}")
print(f"Export time: {(export_time - optimize_time):.2f} seconds.")
print(f"Total time: {(export_time - start_time):.2f} seconds.")
sys.exit(0)
| 4,378 | Python | 30.278571 | 157 | 0.693011 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/uv.py | import argparse
import os
import sys
from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
## ======================================================================
OMNI_MATERIAL_NAME = "OmniUVTestMaterial"
## ======================================================================
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
## --------------------------------------------------------------------------------
def _selected_meshes(context:Context) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
return [x for x in context.selected_objects if x.type == "MESH"]
## --------------------------------------------------------------------------------
def get_test_material() -> Material:
image_name = "OmniUVGrid"
if not image_name in bpy.data.images:
bpy.ops.image.new(generated_type="COLOR_GRID", width=4096, height=4096, name=image_name, alpha=False)
if not OMNI_MATERIAL_NAME in bpy.data.materials:
image = bpy.data.images[image_name]
material = bpy.data.materials.new(name=OMNI_MATERIAL_NAME)
## this creates the new graph
material.use_nodes = True
tree = material.node_tree
shader = tree.nodes['Principled BSDF']
im_node = tree.nodes.new("ShaderNodeTexImage")
im_node.location = [-300, 300]
tree.links.new(im_node.outputs['Color'], shader.inputs['Base Color'])
im_node.image = image
return bpy.data.materials[OMNI_MATERIAL_NAME]
## --------------------------------------------------------------------------------
def apply_test_material(ob:Object):
##!TODO: Generate it
select_only(ob)
while len(ob.material_slots):
bpy.ops.object.material_slot_remove()
material = get_test_material()
bpy.ops.object.material_slot_add()
ob.material_slots[0].material = material
## --------------------------------------------------------------------------------
def unwrap_object(ob:Object, uv_layer_name="OmniUV", apply_material=False, margin=0.0):
"""
Unwraps the target object by creating a fixed duplicate and copying the UVs over
to the original.
"""
old_mode = bpy.context.mode
scene = bpy.context.scene
if not old_mode == "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
select_only(ob)
uv_layers = list(ob.data.uv_layers)
for layer in uv_layers:
ob.data.uv_layers.remove(layer)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.cube_project()
bpy.ops.object.mode_set(mode="OBJECT")
duplicate = ob.copy()
duplicate.data = ob.data.copy()
scene.collection.objects.link(duplicate)
## if the two objects are sitting on each other it gets silly,
## so move the dupe over by double it's Y bounds size
bound_size = Vector(duplicate.bound_box[0]) - Vector(duplicate.bound_box[-1])
duplicate.location.y += bound_size.y
select_only(duplicate)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True)
bpy.ops.mesh.normals_make_consistent(inside=True)
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.uv.select_all(action='SELECT')
bpy.ops.uv.smart_project(island_margin=margin)
bpy.ops.uv.average_islands_scale()
bpy.ops.uv.pack_islands(margin=0)
bpy.ops.object.mode_set(mode="OBJECT")
## copies from ACTIVE to all other SELECTED
select_only(ob)
## This is incredibly broken
# bpy.ops.object.data_transfer(data_type="UV")
## snap back now that good UVs exist; the two meshes need to be in the same
## position in space for the modifier to behave correctly.
duplicate.matrix_world = ob.matrix_world.copy()
modifier = ob.modifiers.new(type="DATA_TRANSFER", name="OmniBake_Transfer")
modifier.object = duplicate
modifier.use_loop_data = True
modifier.data_types_loops = {'UV'}
modifier.loop_mapping = 'NEAREST_NORMAL'
select_only(ob)
bpy.ops.object.modifier_apply(modifier=modifier.name)
if apply_material:
apply_test_material(ob)
bpy.data.objects.remove(duplicate)
## --------------------------------------------------------------------------------
def unwrap_selected(uv_layer_name="OmniUV", apply_material=False, margin=0.0):
old_mode = bpy.context.mode
selected_objects = list(bpy.context.selected_objects)
active = bpy.context.view_layer.objects.active
selected_meshes = _selected_meshes(bpy.context)
total = len(selected_meshes)
count = 1
print(f"\n\n[ Unwrapping {total} meshes ]")
for mesh in selected_meshes:
padd = len(str(total)) - len(str(count))
print(f"[{'0'*padd}{count}/{total}] Unwrapping {mesh.name}...")
unwrap_object(mesh, uv_layer_name=uv_layer_name, apply_material=apply_test_material)
count += 1
print(f"\n[ Unwrapping complete ]\n\n")
select_only(selected_objects[0])
for item in selected_objects[1:]:
item.select_set(True)
bpy.context.view_layer.objects.active = active
if old_mode == "EDIT_MESH":
bpy.ops.object.mode_set(mode="EDIT")
## --------------------------------------------------------------------------------
def import_usd_file(filepath:str, root_prim=None, visible_only=False):
all_objects = bpy.context.scene.collection.all_objects
names = [x.name for x in all_objects]
try:
bpy.ops.object.mode_set(mode="OBJECT")
except RuntimeError:
pass
for name in names:
ob = bpy.data.objects[name]
bpy.data.objects.remove(ob)
kwargs = {
"filepath":filepath,
"import_cameras": False,
"import_curves": False,
"import_lights": False,
"import_materials": False,
"import_blendshapes": False,
"import_volumes": False,
"import_skeletons": False,
"import_shapes": False,
"import_instance_proxies": True,
"import_visible_only": visible_only,
"read_mesh_uvs": False,
"read_mesh_colors": False,
}
if root_prim:
## if you end with a slash it fails
kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim
bpy.ops.wm.usd_import(**kwargs)
print(f"Imported USD file: {filepath}")
## --------------------------------------------------------------------------------
def export_usd_file(filepath:str):
kwargs = {
"filepath":filepath,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
# "generate_preview_surface": False,
# "generate_mdl": False,
"merge_transform_and_shape": True,
}
bpy.ops.wm.usd_export(**kwargs)
print(f"Wrote USD file with UVs: {filepath}")
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="Path to input USD file")
parser.add_argument('--output', type=str, help="Path to output USD file (default is input_UV.usd)")
parser.add_argument('--margin', type=float, default=None, help="Island margin (default is 0.01)")
parser.add_argument('--root_prim', type=str, default=None,
help="Root Prim to import. If unspecified, the whole file will be imported.")
parser.add_argument('--add_test_material', action="store_true")
parser.add_argument('--visible_only', action="store_true", default=False)
if not len(real_args):
parser.print_help()
sys.exit(1)
args = parser.parse_args(real_args)
input_file = os.path.abspath(args.input)
split = input_file.rpartition(".")
output_path = args.output or (split[0] + "_UV." + split[-1])
margin = args.margin or 0.0
import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only)
bpy.ops.object.select_all(action="SELECT")
unwrap_selected(apply_material=args.add_test_material, margin=margin)
export_usd_file(output_path)
sys.exit(0)
| 8,005 | Python | 29.674329 | 103 | 0.639975 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/geo_nodes.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from mathutils import Vector
# the type of geometry node tree to create:
# geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized
# this puts in place a framework for more customizable and easily implementable optimizations in the future
# geometry nodes is a modifier, but unlike "DECIMATE" or "REMESH", geometry nodes can be customized with a wide array of options.
# similar to other modifiers, if there are multiple objects with the same geo node modifier, the calculations are done independently for each object.
# currently this setup can be used for generating convex hulls, creating bounding box meshes, and subdividing geometry.
# (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface)
# as the nodes options in blender expand, A lot more can be done wit it.
# more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes
def new_GeometryNodes_group():
# create a new empty node group that can be used in a GeometryNodes modifier
# tree only contains a simple input/output node setup
# the input node gives a geometry, and the output node takes a geometry.
# nodes then have input and output SOCKET(S).
# this basic tree setup will accesses the output socket of the input node in order to connect it to the input socket of the output node
# in order to make these connections, physical links between index values of inputs and outputs need to be made
# this tree on its own will do nothing. In order to make changes to the geometry, more nodes must be inserted
node_group = bpy.data.node_groups.new('GeometryNodes', 'GeometryNodeTree') # this is the container for the nodes
inNode = node_group.nodes.new('NodeGroupInput') # this is the input node and gives the geometry to be modified.
inNode.outputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the output socket on the input node
outNode = node_group.nodes.new('NodeGroupOutput') # this is the output node and returns the geometry that modified.
outNode.inputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the input socket on the output node
node_group.links.new(inNode.outputs['Geometry'], outNode.inputs['Geometry']) # makes the link between the two nodes at the given sockets
inNode.location = Vector((-1.5*inNode.width, 0)) # sets the position of the node in 2d space so that they are readable in the GUI
outNode.location = Vector((1.5*outNode.width, 0))
return node_group # now that there is a basic node tree, additional nodes can be inserted into the tree to modify the geometry
def geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute):
# once the base geo tree has been created, we can insert additional pieces
# this includes: convex hull, bounding box, subdivide
new_node = nodes.new(geo_type) # create a new node of the specified type
# insert that node between the input and output node
geo_tree.links.new(group_in.outputs['Geometry'], new_node.inputs[0])
geo_tree.links.new(new_node.outputs[0], group_out.inputs['Geometry'])
if geo_type == 'GeometryNodeSubdivisionSurface': # subsurf node requires an additional input value
geo_tree.nodes["Subdivision Surface"].inputs[1].default_value = attribute
def geoNodes(objects, geo_type, attribute):
# TODO: When Geo Nodes develops further, hopefully all other modifier ops can be done through nodes
# (currently does not support decimate/remesh)
modifier = 'NODES'
# create empty tree - this tree is a container for nodes
geo_tree = new_GeometryNodes_group()
# add tree to all objects
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.node_group = geo_tree #bpy.data.node_groups[geo_tree.name]
# alter tree - once the default tree has been created, additional nodes can be added in
nodes = geo_tree.nodes
group_in = nodes.get('Group Input') # keep track of the input node
group_out = nodes.get('Group Output') # keep track of the output node
geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute) # adds node to make modifications to the geometry
| 5,272 | Python | 63.304877 | 149 | 0.744499 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/run_ops_wo_update.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.ops import _BPyOpsSubModOp
view_layer_update = _BPyOpsSubModOp._view_layer_update
def open_update():
# blender operator calls update the scene each time after running
# updating the scene can take a long time, esp for large scenes. So we want to delay update until we are finished
# there is not an official way to suppress this update, so we need to use a workaround
def dummy_view_layer_update(context): # tricks blender into thinking the scene has been updated and instead passes
pass
_BPyOpsSubModOp._view_layer_update = dummy_view_layer_update
def close_update(): # in the end, still need to update scene, so this manually calls update
_BPyOpsSubModOp._view_layer_update = view_layer_update | 1,619 | Python | 42.783783 | 118 | 0.731316 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/chop.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy, bmesh
from mathutils import Vector
import time
from . import blender_class, run_ops_wo_update, select_mesh, bounds, utils, fix_mesh
class Chop(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "chop.scene"
bl_label = "Chop Scene"
bl_description = "Recursively split scene in half until reaches a desired threshold"
bl_options = {"REGISTER", "UNDO"}
print_results = True
def __init__(self):
self._default_attributes = dict(
merge= True, # optionally merge meshes in each split chunk after split recursion is complete
cut_meshes=True, # split all meshes intersecting each cut plane
# Cannot set this very low since split creates new triangles(if quads...)
max_vertices= 100000, # a vertex threshold value, that once a chunk is below, the splitting terminates
min_box_size= 1, # a size threshold that once a chunk is smaller than, the splitting terminates
max_depth= 16, # a recursion depth threshold that once is reached, the splitting terminates
print_updated_results= True, # print progress to console
create_bounds = False, # create new bounds objects for displaying the cut boundaries. Mostly useful for GUI
selected_only = False # uses only objects selected in scene. For GUI version only
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
Chop.print_results = attributes["print_updated_results"]
Stats.resetValues()
Stats.startTime = time.time()
then = Stats.startTime
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
self.split(context, selected, attributes) # starts the splitting process
now = time.time() # time after it finished
Stats.printTermination()
if attributes['merge']:
Stats.printMerge()
print("TIME FOR SPLIT: ", round(now-then, 3))
else:
utils.do_print_error("NO MESH OBJECTS")
return {'FINISHED'}
def getSplitPlane(self, obj_details): # the cut plane used in split. Aligned perpendicular to the longest dimension of the bounds
# find longest side
var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"}
max_dim = var.get(max(var)) # get the axis name of maximum of the three dims
# adjust the plane normal depending on the axis with the largest dimension
if max_dim == "x":
normal = [1,0,0,0]
axis = "x"
elif max_dim == "y":
normal = [0,1,0,0]
axis = "y"
else:
normal = [0,0,1,0]
axis = "z"
# get data for sub-boxes
midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # get center of bounds to be able to create the next set of bounds
return midPt, normal, axis
def getSplitBoxes(self, obj_details, attributes): # get the bounds for the two successive splits during recursion
# find longest side
var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"}
mx = var.get(max(var)) # get the axis name of maximum of the three dims
mid_0 = [obj_details.x.max, obj_details.y.max, obj_details.z.max] # the longest axis value will be replaced with a mid point
high = mid_0.copy() # maximum value of bounds
mid_1 = [obj_details.x.min, obj_details.y.min, obj_details.z.min] # the longest axis value will be replaced with a mid point
low = mid_1.copy() # minimum value fo bounds
midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # center point of previous bounds
# replace the mid point of new bounds depending on the axis with the largest dimension
if mx == "x":
mid_0[0] = midPt[0]
mid_1[0] = midPt[0]
elif mx == "y":
mid_0[1] = midPt[1]
mid_1[1] = midPt[1]
else:
mid_0[2] = midPt[2]
mid_1[2] = midPt[2]
# Create sub-bounds. These are the two halves of the previous bounds, split along the longest axis of the bounds
# only need two points to calculate bounds, uses the maximum/minimum value point (high/low) and the set mid point (mid_0/mid_1)
coords_1 = [high[:], mid_1[:]] # put the points in a list
box_0 = bounds.bounds(coords_1) # gather attributes of new bounds (max, min, mid, and dim of each axis)
coords_0 = [low[:], mid_0[:]] # put the points in a list
box_1 = bounds.bounds(coords_0) # gather attributes of new bounds (max, min, mid, and dim of each axis)
if attributes["create_bounds"]: # optionally create display objects for viewing bounds
bounds.boundsObj(coords_1)
bounds.boundsObj(coords_0)
return box_0, box_1
def boxTooSmall(self, obj_details, attributes): # returns whether bounds of current occurrences is too small
# find longest sides
dims = [obj_details.x.distance, obj_details.y.distance, obj_details.z.distance] # get the dimensions of each axis of the bounds
if max(dims) < attributes["min_box_size"]: # if the maximum of the three dims is less than the specified min_box_size
return True # continue recursion
return False # end recursion
def parentEmpty(self, part, children): # for parenting new created objects from split
parent_name = part.name # part is the original object that was split. keep track of its name
parent_col = part.users_collection[0] # track the collection of the part as well
parent_parent = part.parent # if the part object has an existing parent track that too
bpy.data.objects.remove(part, do_unlink=True) # now that that info is stored, part can be deleted and removed from the scene
# an empty will take the place of the original part
obj = bpy.data.objects.new(parent_name, None) # create an empty object that will inherit the name of part
parent_col.objects.link(obj) # connect this object to part's collection
obj.parent = parent_parent # make this empty the child of part's parent
for child in children: # make the newly created objects from the split operation children of the empty
child.parent = obj
def newObj(self, bm, parent): # create a new object for each half of a split
obj = parent.copy() # parent is the original mesh being split. this contains data such as material,
# so it is easiest to start with a copy of the object
obj.data = parent.data.copy() # need to copy the object mesh data separately
# TODO: obj.animation_data = sibling.animation_data.copy() # not sure if animation data should be copied. This would do that.
parent.users_collection[0].objects.link(obj)
# apply bmesh to new mesh
bm.to_mesh(obj.data) # Once the new object is formed, bmesh data created during the split process can be transferred to the new obj
bm.free() # always do this when finished with a bmesh
return obj
def checkIntersect(self, obj, axis, center): # for checking cut plane intersection while splitting
# intersection is checked by testing the objects bounds rather than each vertex individually
obj_details = bounds.bounds([obj.matrix_world @ Vector(v) for v in obj.bound_box])
tolerance = .01 # a tolerance value for intersection to prevent cutting a mesh that is in line with cut plane
# TODO: may need to have user control over this tolerance, or define it relative to total scene size.
# check for intersection depending on the direction of the cutting
# boolean is created for both sides of cut plane.
# rather than a single boolean checking for intersection, return if mesh is on one or both sides of cut plane.
if axis == "x":
intersect_0 = obj_details.x.max > center[0] + tolerance
intersect_1 = obj_details.x.min < center[0] - tolerance
elif axis == "y":
intersect_0 = obj_details.y.max > center[1] + tolerance
intersect_1 = obj_details.y.min < center[1] - tolerance
elif axis == "z":
intersect_0 = obj_details.z.max > center[2] + tolerance
intersect_1 = obj_details.z.min < center[2] - tolerance
return intersect_0, intersect_1
def doSplit(self, partsToSplit, planeOrigin, planeNormal, axis): # perform the actual split
# split separates the occurrences into two. those halves need to be stored in their own new lists
occurrences_0 = []
occurrences_1 = []
for part in partsToSplit: # iterate over occurrences
intersect_0, intersect_1 = self.checkIntersect(part, axis, planeOrigin) # only perform split if object intersects the cut plane.
if intersect_0 and intersect_1: # if mesh has vertices on both sides of cut plane
Stats.printPart(part) # print the part being processed
co = part.matrix_world.inverted() @ Vector(planeOrigin) # splitting takes place relative to object space not world space.
normDir = part.matrix_world.transposed() @ Vector(planeNormal) # need to adjust plane origin and normal for each object.
bmi = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object
# It allows for much greater control over mesh properties and operations
bmi.from_mesh(part.data) # attach the mesh to the bmesh container so that changes can be made
bmo = bmi.copy() # must use two separate bmesh objects because two new occurrence lists are being written to
# bisect_plane is how to split a mesh using a plane. It can only save one side of the split result at a time, so it is done twice
# save inner mesh data
bmesh.ops.bisect_plane(bmi,
geom=bmi.verts[:]+bmi.edges[:]+bmi.faces[:], # the geometry to be split, which is the first bmesh just created
dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane
# TODO: may need to have user control over this tolerance, or define it relative to total scene size.
plane_co=co, # the cut plane
plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction
clear_inner=True, # remove the geometry on the positive side of the cut plane
clear_outer=False) # keep the geometry on the negative side of the cut plane
# save outer mesh data
bmesh.ops.bisect_plane(bmo,
geom=bmo.verts[:]+bmo.edges[:]+bmo.faces[:], # the geometry to be split, which is the second bmesh just created
dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane
plane_co=co, # the cut plane
plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction
clear_inner=False, # keep the geometry on the positive side of the cut plane
clear_outer=True) # remove the geometry on the negative side of the cut plane
# make the bmesh the object's mesh
# need to transfer the altered bmesh data back to the original mesh
children = [] # create a list that will contain the newly created split meshes
obj = self.newObj(bmi, part) # create a new mesh object to attach the inner bmesh data to
occurrences_0.append(obj) # add new object to inner occurrence list
children.append(obj) # add new object to children list
obj2 = self.newObj(bmo, part) # create a new mesh object to attach the outer bmesh data to
occurrences_1.append(obj2) # add new object to outer occurrence list
children.append(obj2) # add new object to children list
self.parentEmpty(part, children) # use children list to fix object parents
if Chop.print_results:
utils.printClearLine() # clear last printed line before continuing
# if there are vertices on only one side of the cut plane there is nothing to split so place the existing mesh into the appropriate list
elif intersect_0:
occurrences_0.append(part) # add object to inner occurrence list
part.select_set(False) # deselect object
else:
occurrences_1.append(part )# add object to outer occurrence list
part.select_set(False) # deselect object
# bisect_plane can create empty objects, or zero vert count meshes. remove those objects before continuing
occurrences_0 = fix_mesh.deleteEmptyXforms(occurrences_0) # update occurrences_0
occurrences_1 = fix_mesh.deleteEmptyXforms(occurrences_1) # update occurrences_1
return occurrences_0, occurrences_1
def doMerge(self, partsToMerge): # for merging individual meshes within each chunk after split is complete
if len(partsToMerge) > 1: # if there is only one mesh or zero meshes, there is no merging to do
then = time.time() # time at the beginning of merge
ctx = bpy.context.copy() #making a copy of the current context allows for temporary modifications to be made
# in this case, the temporary context is switching the active and selected objects
# this allows avoiding needing to deselect and reselect after the merge
ctx['selected_editable_objects'] = partsToMerge # set the meshes in the chunk being merged to be selected
ctx['active_object'] = partsToMerge[0] # set active object. Blender needs active object to be the selected object
parents = [] # a list that will contain the parent of each part being merged
for merge in partsToMerge:
parents.append(merge.parent)
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
bpy.ops.object.join(ctx) # merges all parts into one
run_ops_wo_update.close_update() # must always call close_update if open_update is called
now = time.time() # time after merging is complete
Stats.mergeTime += (now-then) # add time to total merge time to get an output of total time spent on merge
def recursiveSplit(self, occurrences, attributes, obj_details, depth): # runs checks before each split, and handles recursion
if not occurrences: # if there are no occurrences, end recursion
Stats.printPercent(depth, True) # optionally print results before ending recursion
return
# Check for maximum recursive depth has been reached to terminate and merge
if attributes["max_depth"] != 0 and depth >= attributes["max_depth"]: # if max recursion depth is 0, the check will be ignored
Stats.chunks += 1 # each split creates a new chunk, adds only chunks from completed recursive branches
Stats.printMsg_maxDepth += 1 # "REACHED MAX DEPTH"
Stats.printPercent(depth) # optionally print results before ending recursion
if attributes["merge"]: # if merging, do so now
self.doMerge(occurrences)
return
# Check for vertex count threshold and bbox size to terminate and merge
vertices = utils.getVertexCount(occurrences)
if self.boxTooSmall(obj_details, attributes) or vertices < attributes["max_vertices"]:
Stats.chunks += 1 # each split creates a new chunk, adds only chunks form completed recursive branches
if vertices < attributes["max_vertices"]:
Stats.printMsg_vertexGoal += 1 # "REACHED VERTEX GOAL"
elif self.boxTooSmall(obj_details, attributes): # or vertices < attributes["max_vertices"]:
Stats.printMsg_boxSize += 1 # "BOX TOO SMALL"
Stats.printPercent(depth) # optionally print results before ending recursion
if attributes["merge"]: # if merging, do so now
self.doMerge(occurrences)
return
# Keep subdividing
planeOrigin, planeNormal, axis = self.getSplitPlane(obj_details) # calculate components for cutter object
# Do the split and merge
if attributes["cut_meshes"]: # splits meshes in scene based on cut plane and separates them into two halves
occurrences_0, occurrences_1 = self.doSplit(occurrences, planeOrigin, planeNormal, axis)
depth += 1 # if split has taken place, increment recursive depth count
# Recurse. Get bounding box for each half.
box_0, box_1 = self.getSplitBoxes(obj_details, attributes)
self.recursiveSplit(occurrences_0, attributes, box_0, depth)
self.recursiveSplit(occurrences_1, attributes, box_1, depth)
def split(self, context, selected, attributes): # preps original occurrences and file for split
occurrences = selected # tracks the objects for each recursive split
# on the first split, this is the selected objects.
# Initial bbox includes all original occurrences
boundsCombined = bounds.boundingBox(occurrences) # gets the combined bounds coordinates of the occurrences
obj_details = bounds.bounds(boundsCombined) # create a dictionary of specific statistics for each axis of bounds
if attributes["create_bounds"]: # optionally create a bounds object for each recursive split.
target_coll_name = "BOUNDARIES" # put these objects in a separate collection to keep scene organized
target_coll = bpy.data.collections.new(target_coll_name) # create a new collection in the master scene collection
context.scene.collection.children.link(target_coll) # link the newly created collection to the scene
bounds.boundsObj(boundsCombined) # create bounds obj
depth = 0 # tracks recursive depth
print("-----SPLIT HAS BEGUN-----")
Stats.printPercent(depth) # for optionally printing progress of operation
self.recursiveSplit(occurrences, attributes, obj_details, depth) # begin recursive split
class Stats():
startTime= 0 # start time of script execution, used for calculating progress
printMsg_vertexGoal = 0 # for tracking number of times recursion terminated because vertex goal was reached
printMsg_boxSize = 0 # for tracking number of times recursion terminated because box was too small
printMsg_maxDepth = 0 # for tracking number of times recursion terminated because max recursive depth was exceeded
percent_worked = 0 # for tracking amount of scene that contains objects for progress calculation
percent_empty = 0 # for tracking amount of scene that is empty for progress calculation
chunks = 0 # the number of parts created after the recursive split. each chunk may contain multiple meshes/objects
mergeTime = 0 # for tracking the amount of time spent merging chunks
def resetValues(): # reset values before running
Stats.startTime= 0
Stats.printMsg_vertexGoal = 0
Stats.printMsg_boxSize = 0
Stats.printMsg_maxDepth = 0
Stats.percent_worked = 0
Stats.percent_empty = 0
Stats.chunks = 0
Stats.mergeTime = 0
# for printing progress statistics to console
def printTermination():
print("Reached Vertex Goal: ", Stats.printMsg_vertexGoal, # print number of times recursion terminated because vertex goal was reached
" Box Too Small: ", Stats.printMsg_boxSize, # print number of times recursion terminated because box was too small
" Exceeded Max Depth: ", Stats.printMsg_maxDepth) # print number of times recursion terminated because max recursive depth was exceeded
print("chunks: ", Stats.chunks) # print total number of chunks created from split
def printMerge():
print("merge time: ", Stats.mergeTime) # print the total time the merging took
def printPart(part):
if Chop.print_results:
print("current part being split: ", part) # want to keep track of latest part being split in order to more easily debug if blender crashes
def printPercent(depth, empty=False): # for printing progress of recursive split
if Chop.print_results:
if depth != 0:
if empty: # generated chunk contains no geometry it is considered empty
Stats.percent_empty += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete
elif depth: # cannot calculate if depth is zero due to division by zero
Stats.percent_worked += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete
total = Stats.percent_empty + Stats.percent_worked # percent of bounds volume calculated. Includes empty and occupied chunks
percent_real = Stats.percent_worked/(100-Stats.percent_empty)*100 # calculated based on a ratio of chunks with split meshes to empty chunks.
# this results in a more accurate calculation of remaining time because empty chunks take virtually zero time to process
#timer
now = time.time() # current time elapsed in operation
if percent_real > 0: # if at least one occupied chunk has been calculated
est_comp_time = f"{((now-Stats.startTime)/percent_real*100 - (now-Stats.startTime)):1.0f}" # estimation of remaining time
# based on what has already been processed
else:
est_comp_time = "Unknown"
utils.printClearLine()
utils.printClearLine()
# print results to console
print("\033[93m" + "Percent_empty: ", f"{Stats.percent_empty:.1f}" , "%, Percent_worked: ", f"{Stats.percent_worked:.1f}",
"%, Total: ", f"{total:.1f}", "%, Real: ", f"{percent_real:.1f}", "%")
print("Estimated time remaining: ", est_comp_time, "s, Depth: ", depth, "\033[0m")
else:
print() # empty lines to prep for the progress printing
print() # empty lines to prep for the progress printing
| 23,807 | Python | 59.580153 | 168 | 0.656278 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/fix_mesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import bmesh
import time
from functools import reduce
from . import blender_class, run_ops_wo_update, select_mesh, utils
class FixMesh(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "fix.mesh"
bl_label = "Fix Mesh"
bl_description = "fix bad meshes in the scene"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only=False, # uses only objects selected in scene. For GUI version only
fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold'
dissolve_threshold = 0.08, # threshold value for 'fix_bad_mesh'
merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold
merge_threshold = 0.01, # distance value to use for merge_vertex
remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that.
fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well.
create_new_custom_normals = True # will auto generate new sharp edges (based on angle)
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution
if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations.
bpy.ops.object.mode_set(mode='OBJECT')
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
# if removing zero-area-faces/zero-length-edges or merging vertices by distance:
if attributes["fix_bad_mesh"] or attributes["merge_vertex"]:
self.fixBadMesh(
selected,
attributes["dissolve_threshold"],
attributes["fix_bad_mesh"],
attributes["merge_vertex"],
attributes["merge_threshold"],
attributes["remove_existing_sharp"])
if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh)
self.fixNormals(selected, attributes["create_new_custom_normals"])
else:
utils.do_print_error("NO MESH OBJECTS")
now = time.time() # time after it finished
print("TIME FOR FIX MESH: ", round(now-then, 3))
return {'FINISHED'}
def fixBadMesh(self, selected, dissolveThreshold = 0.08, fixBadMesh = False, mergeVertex = False, mergeThreshold = 0.1, removeExistingSharp = True):
# once degenerate dissolve geometry node exists (needs to be developed by Blender), replace this with a GN setup
# that would go towards producing non-destructive workflows, which is a goal for the GUI version
# for printing vertex and face data
startingVerts = utils.getVertexCount(selected)
startingFaces = utils.getFaceCount(selected)
bm = bmesh.new() # 'bmesh' in BLender is data type that contains the 'edit mesh' for an object
# It allows for much greater control over mesh properties and operations
for object in selected: # loop through each selected object
utils.printPart(object) # print the current part being fixed.
mesh = object.data # all mesh objects contain mesh data, that is what we need to alter, not the object itself
bm.from_mesh(mesh) # attach the mesh to the bmesh container so that changes can be made
if fixBadMesh:
bmesh.ops.dissolve_degenerate( # for removing zero area faces and zero length edges
bm,
dist=dissolveThreshold,
edges=bm.edges
)
if mergeVertex:
bmesh.ops.remove_doubles(
bm,
verts=bm.verts,
dist=mergeThreshold
)
# Clear sharp state for all edges. This step reduces problems that arise from bad normals
if removeExistingSharp:
for edge in bm.edges:
edge.smooth = True # smooth is the opposite of sharp, so setting to smooth is the same as removing sharp
bm.to_mesh(mesh) # need to transfer the altered bmesh data back to the original mesh
bm.clear() # always clear a bmesh after use
utils.printClearLine() # remove last print, so that printPart can be updated
# print vertex and face data
endingVerts = utils.getVertexCount(selected)
endingFaces = utils.getFaceCount(selected)
vertsRemoved = startingVerts-endingVerts
facesRemoved = startingFaces-endingFaces
print("Fix Mesh Statistics:")
utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved))
utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved))
def fixNormals(self, selected, createNewCustomNormals):
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
# important especially when working with loops
for o in selected:
if o.type != 'MESH':
continue
bpy.context.view_layer.objects.active = o
mesh = o.data
if mesh.has_custom_normals:
bpy.ops.mesh.customdata_custom_splitnormals_clear()
if createNewCustomNormals:
bpy.ops.mesh.customdata_custom_splitnormals_add()
run_ops_wo_update.close_update() # must always call close_update if open_update is called
def deleteEmptyXforms(occurrences): # Delete objects with no meshes, or zero vertex count meshes
# first separate occurrences into two lists to get meshes with zero vertex count
def partition(p, l): # uses lambda function to efficiently parse data
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], [])) # if obj has vertices place in x, else place in y
occurrences_clean, occurrences_dirty = partition(lambda obj:len(obj.data.vertices), occurrences)
# delete obj with zero vertex count or no meshes
for obj in occurrences_dirty:
bpy.data.objects.remove(obj, do_unlink=True)
# return good meshes
return occurrences_clean
| 7,637 | Python | 48.597402 | 153 | 0.647506 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/bounds.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy, bmesh
from mathutils import Vector
import collections
def boundsObj(points): # for displaying the bounds of each split chunk
mesh = bpy.data.meshes.new("mesh") # add a new mesh
obj = bpy.data.objects.new("MyObject", mesh) # add a new object using the new mesh
# link the new bounds object to the newly created collection in split.
# this is the last collection added to the scene, hence index of len -1
bpy.context.scene.collection.children[len( bpy.context.scene.collection.children)-1].objects.link(obj)
obj.display_type = 'BOUNDS' # display only the objects bounds in the Blender viewport.
bm = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object
# allows control over vertices, edges, and faces
for point in points: # iterate over input bounds(points)
bm.verts.new(point) # add a new vert
# make the bmesh the object's mesh
bm.to_mesh(obj.data) # transfer bmesh data to the new obj
bm.free() # always do this when finished with a bmesh
return obj
def boundingBox(objects): # the bounding box used for calculating the split plane
if not isinstance(objects, list): # if objects is not a list convert it to one
objects = [objects]
points_co_global = [] # list of all vertices of all objects from list with global coordinates
for obj in objects: # iterate over objects list and add its vertices to list
points_co_global.extend([obj.matrix_world @ Vector(v) for v in obj.bound_box]) # must add points in world space
return points_co_global
def bounds(coords): # returns a dictionary containing details of split bounds
zipped = zip(*coords) # The zip() function returns a zip object, which is an iterator of tuples
push_axis = [] # list that will contain useful for each axis
for (axis, _list) in zip('xyz', zipped): # for x, y, and z axis calculate set of values and add them to list
info = lambda: None
info.max = max(_list) # the maximum value of bounds for each axis
info.min = min(_list) # the minimum value of bounds for each axis
info.distance = info.max - info.min # the length of the bounds for each axis
info.mid = (info.max + info.min)/2 # the center point of bounds for each axis
push_axis.append(info) # add this info to push_axis
originals = dict(zip(['x', 'y', 'z'], push_axis)) # create dictionary wit the values from push_axis
o_details = collections.namedtuple('object_details', ['x', 'y', 'z']) # organize dictionary to be accessed easier
return o_details(**originals)
| 3,481 | Python | 47.36111 | 119 | 0.703533 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/remesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Remeshing reconstructs a mesh to produce clean/uniform geometry, but removes all UV mappings from an object
# There are four different remesh methods. (BLOCKS, SMOOTH, SHARP, VOXEL)
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier
def remesh(objects, remesh_type, prop):
modifier = 'REMESH' # sets type of modifier to be used
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.mode = remesh_type # sets remesh type (BLOCKS, SMOOTH, SHARP, VOXEL)
# first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP)
if remesh_type == 'BLOCKS': # "There is no smoothing at all."
mod.octree_depth = prop # controls the resolution of most of the remesh modifiers.
# the higher the number, the more geometry created (2^x)
elif remesh_type == 'SMOOTH': # "Output a smooth surface."
mod.octree_depth = prop # the higher the number, the more geometry created (2^x)
elif remesh_type == 'SHARP': # "Similar to Smooth, but preserves sharp edges and corners."
mod.octree_depth = prop # the higher the number, the more geometry created (2^x)
elif remesh_type == 'VOXEL': # "Uses an OpenVDB to generate a new manifold mesh from the current geometry
# while trying to preserve the mesh’s original volume."
mod.voxel_size = prop # used for voxel remesh to control resolution. the lower the number, the more geometry created (x)
else:
raise TypeError('Invalid Remesh Type')
return
| 2,657 | Python | 54.374999 | 132 | 0.703049 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/process_attributes.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.types import Operator
from . import modify, fix_mesh, chop, uv, utils
class OPTIMIZE_OT_Scene(Operator):
bl_idname = "optimize.scene"
bl_label = "Optimize Scene"
bl_description = "Optimize scene based on operation and set parameters"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
self.get_attributes(context)
return {'FINISHED'}
def get_attributes(self, context):
optimizeOptions = context.scene.optimize_options
modifyOptions = context.scene.modify_options
uvOptions = context.scene.uv_options
chopOptions = context.scene.chop_options
if optimizeOptions.operation == "modify":
attributes = dict(
selected_only= modifyOptions.selected_only,
apply_mod= modifyOptions.apply_mod,
fix_bad_mesh = modifyOptions.fix_bad_mesh,
dissolve_threshold = modifyOptions.dissolve_threshold,
merge_vertex = modifyOptions.merge_vertex,
merge_threshold = modifyOptions.merge_threshold,
remove_existing_sharp = modifyOptions.remove_existing_sharp,
fix_normals = modifyOptions.fix_normals,
create_new_custom_normals = modifyOptions.create_new_custom_normals,
modifier= modifyOptions.modifier,
# use_modifier_stack= modifyOptions.use_modifier_stack,
# modifier_stack= modifyOptions.modifier_stack,
decimate_type= modifyOptions.decimate_type,
ratio= modifyOptions.ratio,
iterations= modifyOptions.iterations,
angle= modifyOptions.angle,
remesh_type= modifyOptions.remesh_type,
oDepth= modifyOptions.oDepth,
voxel_size= modifyOptions.voxel_size,
geo_type= modifyOptions.geo_type,
geo_attribute= modifyOptions.geo_attribute
)
elif optimizeOptions.operation == "fixMesh":
attributes = dict(
selected_only=modifyOptions.selected_only,
fix_bad_mesh = modifyOptions.fix_bad_mesh,
dissolve_threshold = modifyOptions.dissolve_threshold,
merge_vertex = modifyOptions.merge_vertex,
merge_threshold = modifyOptions.merge_threshold,
remove_existing_sharp = modifyOptions.remove_existing_sharp,
fix_normals = modifyOptions.fix_normals,
create_new_custom_normals = modifyOptions.create_new_custom_normals
)
elif optimizeOptions.operation == "uv":
attributes = dict(
selected_only= uvOptions.selected_only,
scale_to_bounds = uvOptions.scale_to_bounds,
clip_to_bounds = uvOptions.clip_to_bounds,
unwrap_type = uvOptions.unwrap_type,
use_set_size = uvOptions.use_set_size,
set_size = uvOptions.set_size,
print_updated_results= uvOptions.print_updated_results
)
elif optimizeOptions.operation == "chop":
attributes = dict(
merge= chopOptions.merge,
cut_meshes= chopOptions.cut_meshes,
max_vertices= chopOptions.max_vertices,
min_box_size= chopOptions.min_box_size,
max_depth= chopOptions.max_depth,
print_updated_results= chopOptions.print_updated_results,
create_bounds = chopOptions.create_bounds,
selected_only = chopOptions.selected_only
)
if optimizeOptions.print_attributes:
print(attributes)
self.process_operation(optimizeOptions.operation, attributes)
def process_operation(self, operation, attributes):
start = utils.start_time()
blender_cmd = None
if operation == 'modify':
# Modify Scene
blender_cmd = modify.Modify()
elif operation == 'fixMesh':
# Clean Scene
blender_cmd = fix_mesh.FixMesh()
elif operation == 'chop':
# Chop Scene
blender_cmd = chop.Chop()
elif operation == 'uv':
# Unwrap scene
blender_cmd = uv.uvUnwrap()
elif operation == "noop":
# Runs the load/save USD round trip without modifying the scene.
utils.do_print("No-op for this scene")
return
else:
utils.do_print_error("Unknown operation: " + operation + " - add function call to process_file in process.py")
return
# Run the command
if blender_cmd:
blender_cmd.execute(attributes)
else:
utils.do_print_error("No Blender class found to run")
utils.report_time(start, "operation")
| 5,736 | Python | 40.273381 | 122 | 0.61175 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/utils.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Generic utility functions for Blender
import json
import sys
from timeit import default_timer as timer
import bpy
def do_print(msg):
# Flush so prints immediately.
print("\033[93m" + msg + "\033[0m", flush=True)
def do_print_error(msg):
# Flush so prints immediately.
print("\033[91m" + msg + "\033[0m", flush=True)
def start_time():
return timer()
def report_time(start, msg):
end = timer()
do_print("Elapsed time for {}: {:.3f}".format(msg, end-start))
def print_python_version():
do_print("Python version: %s.%s" % (sys.version_info.major, sys.version_info.minor))
def open_file(inputPath):
start = timer()
# Load scene. Clears any existing file before loading
if inputPath.endswith(tuple([".usd", ".usda", ".usdc"])):
do_print("Load file: " + inputPath)
bpy.ops.wm.usd_import(filepath=inputPath)
elif inputPath.endswith(".fbx"):
bpy.ops.import_scene.fbx(filepath=inputPath)
else:
do_print_error("Unrecognized file, not loaded: " + inputPath)
return False
end = timer()
do_print("Elapsed time to load file: " + "{:.3f}".format(end-start))
return True
def save_file(outputPath):
# Save scene. Only writes diffs, so faster than export.
start = timer()
do_print("Save file: " + outputPath)
bpy.ops.wm.usd_export(filepath=outputPath)
end = timer()
do_print("Elapsed time to save file: " + "{:.3f}".format(end-start))
return True
def clear_scene():
# This seems to be difficult with Blender. Partially working code:
bpy.ops.wm.read_factory_settings(use_empty=True)
def process_json_config(operation):
return json.loads(operation) if operation else None
def getVertexCount(occurrences): # returns the vertex count of all current occurrences for threshold testing during recursion
vertexCount = 0
for obj in occurrences:
vertexCount += len(obj.data.vertices)
return vertexCount
def getFaceCount(occurrences): # returns the face count of all current occurrences for threshold testing during recursion
faceCount = 0
for obj in occurrences:
faceCount += len(obj.data.polygons)
return faceCount
def printPart(part):
print("current part being operated on: ", part.name)
def printClearLine():
LINE_UP = '\033[1A' # command to move up a line in the console
LINE_CLEAR = '\x1b[2K' # command to clear current line in the console
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
| 3,371 | Python | 33.408163 | 125 | 0.69119 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/select_mesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# for selecting only mesh objects in the scene. To be used by multiple other files.
def setSelected(context, selectedOnly = False, deselectAll = True):
def select(input):
for obj in input:
if obj.type == 'MESH': # only mesh objects, ignore lights/cameras/curves/etc.
selected.append(obj) # add object to array
if deselectAll: # may want all objects deselected at end of processing
obj.select_set(False) # make sure all objects are deselected before continuing.
else:
obj.select_set(obj.type == 'MESH') # select only mesh objects
selected = [] # an empty array that will be used to store the objects that need to be unwrapped
objects=[ob for ob in context.view_layer.objects if ob.visible_get()] # only want to look at visible objects. process will fail otherwise
if not selectedOnly: # selectedOnly is for GUI version only
select(objects)
elif len(context.selected_objects): # run only if there are selected objects in the scene to isolate just the selected meshes
select(context.selected_objects)
return selected | 2,025 | Python | 46.116278 | 141 | 0.698765 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/blender_class.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from abc import ABC, abstractmethod
import json
from . import utils
class BlenderClass(ABC):
def __init__(self):
self._default_attributes = dict()
def get_attributes(self, in_attributes):
attributes = {**self._default_attributes, **in_attributes}
# utils.do_print("Attributes: " + json.dumps(attributes, indent=4, sort_keys=False))
return attributes
@abstractmethod
def execute(self, in_attributes=None):
pass
| 1,332 | Python | 32.324999 | 92 | 0.705706 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/decimate.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Decimation reduces geometry while maintaining form and UVs
# There are three different decimation methods. Each method produces different results, with its own pros/cons)
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/decimate.html#decimate-modifier
def decimate(objects, decimate_type, prop):
modifier = 'DECIMATE' # sets type of modifier to be used
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
if len(obj.data.polygons) > 3: # decimation cannot be performed on meshes with 3 or less faces
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.decimate_type = decimate_type # sets decimation type
if decimate_type == 'COLLAPSE': # "Merges vertices together progressively, taking the shape of the mesh into account.""
mod.ratio = prop # the ratio value used for collapse decimation. Is a ratio of total faces. (x/1)
elif decimate_type == 'UNSUBDIV': # "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)"
mod.iterations = prop # the number of un-subdivisions performed. The higher the number, the less geometry remaining (1/2^x)
elif decimate_type == 'DISSOLVE': # "It reduces details on forms comprised of mainly flat surfaces."
mod.angle_limit = prop # the reduction is limited to an angle between faces (x degrees)
mod.delimit = {'UV'}
else:
raise TypeError('Invalid Decimate Type')
return
| 2,515 | Python | 54.91111 | 142 | 0.702982 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/modify.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import time
import math
from . import blender_class, select_mesh, fix_mesh, decimate, remesh, geo_nodes, utils
# Master Class for all modifiers
class Modify(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "modify.scene"
bl_label = "Modify Scene"
bl_description = "Modify the scene based on set parameters"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only=True, # uses only objects selected in scene. For GUI version only
apply_mod=True, # applies the generated modifiers. Should always be true for command line running
fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold'
dissolve_threshold = .08, # threshold value for 'fix_bad_mesh'
merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold
merge_threshold = 0.01, # distance value to use for merge_vertex
remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that.
fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well.
create_new_custom_normals = True, # useful for after 'fix_bad_mesh' to fix the normals as well.
modifier= "DECIMATE", # determines which modifier type to use if 'use_modifier_stack' is False. (DECIMATE, REMESH, NODES, or SUBSURF)
# Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL'
use_modifier_stack= False, # allows use of more that one modifier sequentially. Useful for more specific customizable workflows.
modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]], # determines which modifier(s) to use if 'use_modifier_stack' is True.(DECIMATE, REMESH, NODES)
# Modifiers are procedural adjustments to a mesh. The modifiers are stored in 'modifier_stack'.
# Most modifiers have different options for calculation. for instance the 'DECIMATE' modifier options are stored in 'decimate_type'
decimate_type="COLLAPSE", # the type of decimation being performed(COLLAPSE, UNSUBDIV, or DISSOLVE)
# Each method produces different results, with its own pros/cons)
# https://docs.google.com/document/d/1pkMZxgW4Xn_KJymFlKOo5XIkK2YleVYtyLJztTUTyAY/edit
# COLLAPSE: "Merges vertices together progressively, taking the shape of the mesh into account.""
# UNSUBDIV: "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)"
# DISSOLVE: "It reduces details on forms comprised of mainly flat surfaces."
ratio=0.5, # the ratio value used for collapse decimation.
iterations=2, # the number of un-subdivisions performed
angle=15.0, # attribute used when performing dissolve decimation.
remesh_type="VOXEL", # the type of remesh being performed(BLOCKS, SMOOTH, SHARP, VOXEL)
# remeshing removes all UV mappings from an object
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier
# first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP)
# BLOCKS: "There is no smoothing at all."
# SMOOTH: "Output a smooth surface."
# SHARP: "Similar to Smooth, but preserves sharp edges and corners."
# VOXEL: "Uses an OpenVDB to generate a new manifold mesh from the current geometry while trying to preserve the mesh’s original volume."
oDepth=4, # stands for octree depth and controls the resolution of most of the remesh modifiers
voxel_size=0.1, # used for voxel remesh to control resolution
geo_type="GeometryNodeBoundBox", # the type of geometry node tree to create:
# (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface)
# geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized
# this puts in place a framework for more customizable and easily implementable optimizations in the future
# more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes
geo_attribute=2 # a generic attribute variable that can be used for the different geo node types
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution.
# shorthands for multi-used attributes
modifier = attributes["modifier"]
decimate_type = attributes["decimate_type"]
angle = attributes["angle"]
remesh_type = attributes["remesh_type"]
if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations.
bpy.ops.object.mode_set(mode='OBJECT')
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
if attributes["fix_bad_mesh"]: # optionally fix bad meshes. Can also be done separately before hand
fix_mesh.FixMesh.fixBadMesh(
self,
selected,
attributes["dissolve_threshold"],
attributes["fix_bad_mesh"],
attributes["merge_vertex"],
attributes["merge_threshold"],
attributes["remove_existing_sharp"])
if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh)
fix_mesh.FixMesh.fixNormals(self, selected, attributes["create_new_custom_normals"])
# for printing vertex and face data
startingVerts = utils.getVertexCount(selected)
startingFaces = utils.getFaceCount(selected)
if attributes["use_modifier_stack"]:
for mod in attributes["modifier_stack"]:
self.run_modifier(selected, mod[0], mod[1], mod[2])
else:
#Decimate
if modifier == 'DECIMATE':
sub_mod = decimate_type
if decimate_type == 'COLLAPSE':
prop = attributes["ratio"]
elif decimate_type == 'UNSUBDIV':
prop = attributes["iterations"]
elif decimate_type == 'DISSOLVE':
angle = math.radians(angle) # need to change angle to radians for the modifier
prop = angle
#Remesh
elif modifier == 'REMESH':
sub_mod = remesh_type
if remesh_type == 'BLOCKS' or remesh_type == 'SMOOTH' or remesh_type == 'SHARP':
prop = attributes["oDepth"]
if remesh_type == 'VOXEL':
prop = attributes["voxel_size"]
#Geometry Nodes
elif modifier == 'NODES':
sub_mod = attributes["geo_type"]
prop = attributes["geo_attribute"]
else:
sub_mod = None
prop = None
self.run_modifier(selected, modifier, sub_mod, prop)
raise RuntimeError
# apply modifiers once above loop is complete
if attributes["apply_mod"]:
context.view_layer.objects.active = selected[0] # need to set one of the selected objects as the active object
# arbitrarily choosing to set the first object in selected_objects list. (there can only be one AO, but multiple SO)
# this is necessary for the applying the modifiers.
bpy.ops.object.convert(target='MESH') # applies all modifiers of each selected mesh. this preps the scene for proper export.
# print vertex and face data
endingVerts = utils.getVertexCount(selected)
endingFaces = utils.getFaceCount(selected)
vertsRemoved = startingVerts-endingVerts
facesRemoved = startingFaces-endingFaces
print("Modify Mesh Statistics:")
utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved))
utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved))
else:
utils.do_print_error("NO MESH OBJECTS")
now = time.time() # time after it finished.
print("TIME FOR MODIFY: ", round(now-then, 3))
return {'FINISHED'} # "return {"FINISHED"} (or return{"CANCELED"}) is how Blender understands that an operator call is complete
def run_modifier(self, objects, modifier, sub_mod = None, prop = None):
# RUN BASED ON TYPE OF MODIFIER AND MODIFIER SUB_TYPE. Each modifier requires different input variables/values
# Decimate
if modifier == 'DECIMATE':
decimate.decimate(objects, sub_mod, prop)
# Remesh
elif modifier == 'REMESH':
remesh.remesh(objects, sub_mod, prop)
# Geometry Nodes
elif modifier == 'NODES':
geo_nodes.geoNodes(objects, sub_mod, prop)
| 10,769 | Python | 58.175824 | 155 | 0.626613 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/uv.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import time
import contextlib
from . import blender_class, run_ops_wo_update, select_mesh, utils
class uvUnwrap(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "uv.unwrap_batch"
bl_label = "Batch UV Unwrap"
bl_description = "batch uv unwrap objects"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only= False, # uses only objects selected in scene. For GUI version only
scale_to_bounds = False, # determines if the unwrapped map gets scaled to the square uv image bounds
clip_to_bounds = False, # if unwrapping exceeds bounds, it will be clipped off
unwrap_type = 'Cube', # the method for unwrapping (cube, sphere, cylinder, or smart)
use_set_size = False, # for cube and cylinder project, use specified projection size for all objects.
# Overrides scale_to_bounds to False
set_size = 2, # projection size for cube and cylinder project
print_updated_results= True # print progress to console
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution
# blender operates in modes/contexts, and certain operations can only be performed in certain contexts
if bpy.context.mode != 'OBJECT': # make sure context is object mode.
bpy.ops.object.mode_set(mode='OBJECT') # if it is not, set it to object mode
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
# important especially when working with loops
self.unwrap(context, attributes)
run_ops_wo_update.close_update() # must always call close_update if open_update is called
now = time.time() # time after it finished
print("TIME FOR UNWRAP: ", round(now-then, 3))
return {"FINISHED"}
def unwrap(self, context, attributes):
scaleBounds = attributes["scale_to_bounds"]
clipBounds = attributes["clip_to_bounds"]
unwrapType = attributes["unwrap_type"]
use_set_size = attributes["use_set_size"]
set_size = attributes["set_size"]
print_updated_results = attributes["print_updated_results"]
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = True)
if len(selected): # run only if there are mesh objects in the 'selected' array
LINE_UP = '\033[1A' # command to move up a line in the console
LINE_CLEAR = '\x1b[2K' # command to clear current line in the console
count = 0 # counter for which object is being calculated
then = time.time() # start time of loop execution
for object in selected: # unwrap each object separately
object.select_set(True) # select object. This is now the only selected object
context.view_layer.objects.active = object # set active object. Blender needs active object to be the selected object
bpy.ops.object.mode_set(mode='EDIT') # make sure context is edit mode. Context switching is object dependent, must be after selection
bpy.ops.mesh.select_all(action='SELECT') # select all mesh vertices. only selected vertices will be uv unwrapped
# for smart UV projection
if unwrapType == "Smart":
# smart UV can take a long time, so this prints out a progress bar
if count and print_updated_results: # if the first object has already been calculated and results should be printed
with contextlib.redirect_stdout(None): # smartUV prints an output sometimes. We don't want/need this output this suppresses it
self.smartUV(scaleBounds) # perform the uv unwrap
now = time.time() # time after unwrapping is complete
timeElapsed = now - then
remaining = len(selected)-count # number of remaining objects
timeLeft = timeElapsed/count * remaining # estimation of remaining time
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
# so move up and clear the previously printed lines and overwrite them
print("Object Count = ", count, " Objects Remaining = ", remaining)
print(" Elapsed Time = ", round(timeElapsed,3), " Time Remaining = ", round(timeLeft,3)) # print results to console
else: # if calculating the first object or not printing results
self.smartUV(scaleBounds) # perform the uv unwrap
if print_updated_results:
print("Object Count = 0")
print("Time Remaining = UNKOWN")
# for cube projection
elif unwrapType == "Cube":
self.cubeUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap
# for sphere projection
elif unwrapType == "Sphere":
self.sphereUV(scaleBounds, clipBounds) # perform the uv unwrap
# for cylinder projection
elif unwrapType == "Cylinder":
self.cylinderUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap
bpy.ops.object.mode_set(mode='OBJECT') # once complete, make sure context is object mode.
# Must be in object mode to select the next object
object.select_set(False) # deselect the current object. Now there are again no objects selected
count += 1 # increase the object counter
for obj in selected: # reselect all originally selected meshes
obj.select_set(True)
else:
utils.do_print_error("NO MESH OBJECTS")
return {'FINISHED'}
# methods for running each type of uv projection
def smartUV(self, scale):
bpy.ops.uv.smart_project(correct_aspect=True, scale_to_bounds=scale)
def cubeUV(self, scale, clip, use_set_size, size):
if use_set_size: # user sets cube_size value of cube projection
bpy.ops.uv.cube_project(scale_to_bounds=False, clip_to_bounds=clip, cube_size=size)
else:
bpy.ops.uv.cube_project(scale_to_bounds=scale, clip_to_bounds=clip)
def sphereUV(self, scale, clip):
bpy.ops.uv.sphere_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip)
# 'ALIGN_TO_OBJECT' sets the direction of the projection to be consistent regardless of view position/direction
def cylinderUV(self, scale, clip, use_set_size, size):
if use_set_size: # user sets radius value of cylinder projection
bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=False, clip_to_bounds=clip, radius=size)
else:
bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip) | 8,297 | Python | 52.192307 | 150 | 0.632277 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""
To invoke in Blender script editor:
import bpy
bpy.ops.universalmaterialmap.generator()
bpy.ops.universalmaterialmap.converter()
INFO_HT_header
Header
VIEW3D_HT_tool_header
Info Header: INFO_HT_HEADER
3D View Header: VIEW3D_HT_HEADER
Timeline Header: TIME_HT_HEADER
Outliner Header: OUTLINER_HT_HEADER
Properties Header: PROPERTIES_HT_HEADER, etc.
"""
"""
Menu location problem
https://blender.stackexchange.com/questions/3393/add-custom-menu-at-specific-location-in-the-header#:~:text=Blender%20has%20a%20built%20in,%3EPython%2D%3EUI%20Menu.
"""
bl_info = {
'name': 'Universal Material Map',
'author': 'NVIDIA Corporation',
'description': 'A Blender AddOn based on the Universal Material Map framework.',
'blender': (3, 1, 0),
'location': 'View3D',
'warning': '',
'category': 'Omniverse'
}
import sys
import importlib
import bpy
from .universalmaterialmap.blender import developer_mode
if developer_mode:
print('UMM DEBUG: Initializing "{0}"'.format(__file__))
ordered_module_names = [
'omni.universalmaterialmap',
'omni.universalmaterialmap.core',
'omni.universalmaterialmap.core.feature',
'omni.universalmaterialmap.core.singleton',
'omni.universalmaterialmap.core.data',
'omni.universalmaterialmap.core.util',
'omni.universalmaterialmap.core.operator',
'omni.universalmaterialmap.core.service',
'omni.universalmaterialmap.core.service.core',
'omni.universalmaterialmap.core.service.delegate',
'omni.universalmaterialmap.core.service.resources',
'omni.universalmaterialmap.core.service.store',
'omni.universalmaterialmap.core.converter',
'omni.universalmaterialmap.core.converter.core',
'omni.universalmaterialmap.core.converter.util',
'omni.universalmaterialmap.core.generator',
'omni.universalmaterialmap.core.generator.core',
'omni.universalmaterialmap.core.generator.util',
'omni.universalmaterialmap.blender',
'omni.universalmaterialmap.blender.menu',
'omni.universalmaterialmap.blender.converter',
'omni.universalmaterialmap.blender.generator',
'omni.universalmaterialmap.blender.material',
]
for module_name in sys.modules:
if 'omni.' not in module_name:
continue
if module_name not in ordered_module_names:
raise Exception('Unexpected module name in sys.modules: {0}'.format(module_name))
for module_name in ordered_module_names:
if module_name in sys.modules:
print('UMM reloading: {0}'.format(module_name))
importlib.reload(sys.modules.get(module_name))
if developer_mode:
from .universalmaterialmap.blender.converter import OT_InstanceToDataConverter, OT_DataToInstanceConverter, OT_DataToDataConverter, OT_ApplyDataToInstance, OT_DescribeShaderGraph
from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass
from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu
from .universalmaterialmap.blender.generator import OT_Generator
else:
from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass
from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu
def draw_item(self, context):
layout = self.layout
layout.menu(UniversalMaterialMapMenu.bl_idname)
def register():
bpy.utils.register_class(OT_CreateTemplateOmniPBR)
bpy.utils.register_class(OT_CreateTemplateOmniGlass)
if developer_mode:
bpy.utils.register_class(OT_DataToInstanceConverter)
bpy.utils.register_class(OT_DataToDataConverter)
bpy.utils.register_class(OT_ApplyDataToInstance)
bpy.utils.register_class(OT_InstanceToDataConverter)
bpy.utils.register_class(OT_DescribeShaderGraph)
bpy.utils.register_class(OT_Generator)
bpy.utils.register_class(UniversalMaterialMapMenu)
# lets add ourselves to the main header
bpy.types.NODE_HT_header.append(draw_item)
def unregister():
bpy.utils.unregister_class(OT_CreateTemplateOmniPBR)
bpy.utils.unregister_class(OT_CreateTemplateOmniGlass)
if developer_mode:
bpy.utils.unregister_class(OT_DataToInstanceConverter)
bpy.utils.unregister_class(OT_DataToDataConverter)
bpy.utils.unregister_class(OT_ApplyDataToInstance)
bpy.utils.unregister_class(OT_InstanceToDataConverter)
bpy.utils.unregister_class(OT_DescribeShaderGraph)
bpy.utils.unregister_class(OT_Generator)
bpy.utils.unregister_class(UniversalMaterialMapMenu)
bpy.types.NODE_HT_header.remove(draw_item)
if __name__ == "__main__":
register()
# The menu can also be called from scripts
# bpy.ops.wm.call_menu(name=UniversalMaterialMapMenu.bl_idname)
| 5,725 | Python | 35.471337 | 182 | 0.731528 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/util.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import sys
from .data import Plug
def to_plug_value_type(value: typing.Any, assumed_value_type: str) -> str:
"""Returns matching :class:`omni.universalmaterialmap.core.data.Plug` value type."""
if sys.version_info.major < 3:
if isinstance(value, basestring):
return Plug.VALUE_TYPE_STRING
else:
if isinstance(value, str):
return Plug.VALUE_TYPE_STRING
if type(value) == bool:
return Plug.VALUE_TYPE_BOOLEAN
if isinstance(value, int):
return Plug.VALUE_TYPE_INTEGER
if isinstance(value, float):
return Plug.VALUE_TYPE_FLOAT
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable:
if assumed_value_type == Plug.VALUE_TYPE_LIST:
return Plug.VALUE_TYPE_LIST
bum_booleans = 0
num_integers = 0
num_floats = 0
num_strings = 0
for o in value:
if sys.version_info.major < 3:
if isinstance(value, basestring):
num_strings += 1
continue
else:
if isinstance(value, str):
num_strings += 1
continue
if type(o) == bool:
bum_booleans += 1
continue
if isinstance(o, int):
num_integers += 1
continue
if isinstance(o, float):
num_floats += 1
if num_floats > 0:
if len(value) == 2:
return Plug.VALUE_TYPE_VECTOR2
if len(value) == 3:
return Plug.VALUE_TYPE_VECTOR3
if len(value) == 4:
return Plug.VALUE_TYPE_VECTOR4
if len(value) == 2 and assumed_value_type == Plug.VALUE_TYPE_VECTOR2:
return assumed_value_type
if len(value) == 3 and assumed_value_type == Plug.VALUE_TYPE_VECTOR3:
return assumed_value_type
if len(value) == 4 and assumed_value_type == Plug.VALUE_TYPE_VECTOR4:
return assumed_value_type
return Plug.VALUE_TYPE_LIST
return Plug.VALUE_TYPE_ANY
def get_extension_from_image_file_format(format:str, base_name:str) -> str:
"""
For image formats that have multiple possible extensions,
determine if we should stick with the current format specifier
or use the one from the filename itself.
"""
format = format.lower()
split = base_name.rpartition(".")[-1]
extension = split.lower() if len(split) else None
if format == "open_exr":
format = "exr"
elif format == "jpeg":
format = extension if extension in {"jpeg", "jpg"} else "jpg"
elif format == "tiff":
format = extension if extension in {"tiff", "tif"} else "tif"
elif format == "targa_raw":
format = "tga"
return format
| 3,780 | Python | 31.042373 | 88 | 0.598677 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/data.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import uuid
import sys
import importlib
from .service.core import IDelegate
class ChangeNotification(object):
def __init__(self, item: object, property_name: str, old_value: typing.Any, new_value: typing.Any):
super(ChangeNotification, self).__init__()
self._item: object = item
self._property_name: str = property_name
self._old_value: typing.Any = old_value
self._new_value: typing.Any = new_value
@property
def item(self) -> object:
""" """
return self._item
@property
def property_name(self) -> str:
""" """
return self._property_name
@property
def old_value(self) -> typing.Any:
""" """
return self._old_value
@property
def new_value(self) -> typing.Any:
""" """
return self._new_value
class Notifying(object):
"""Base class providing change notification capability"""
def __init__(self):
super(Notifying, self).__init__()
self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ChangeNotification], typing.NoReturn]] = dict()
def add_changed_fn(self, callback: typing.Callable[[ChangeNotification], typing.NoReturn]) -> uuid.uuid4:
for key, value in self._changed_callbacks.items():
if value == callback:
return key
key = uuid.uuid4()
self._changed_callbacks[key] = callback
return key
def remove_changed_fn(self, callback_id: uuid.uuid4) -> None:
if callback_id in self._changed_callbacks.keys():
del self._changed_callbacks[callback_id]
def _notify(self, notification: ChangeNotification):
for callback in self._changed_callbacks.values():
callback(notification)
def destroy(self):
self._changed_callbacks = None
class Subscribing(Notifying):
def __init__(self):
super(Subscribing, self).__init__()
self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict()
def _subscribe(self, notifying: Notifying) -> uuid.uuid4:
if notifying in self._subscriptions.keys():
return self._subscriptions[notifying]
self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification)
def _unsubscribe(self, notifying: Notifying) -> None:
if notifying in self._subscriptions.keys():
callback_id = self._subscriptions[notifying]
del self._subscriptions[notifying]
notifying.remove_changed_fn(callback_id=callback_id)
def _on_notification(self, notification: ChangeNotification) -> None:
pass
class ManagedListInsert(object):
def __init__(self, notifying: Notifying, index: int):
super(ManagedListInsert, self).__init__()
self._notifying: Notifying = notifying
self._index: int = index
@property
def notifying(self) -> Notifying:
""" """
return self._notifying
@property
def index(self) -> int:
""" """
return self._index
class ManagedListRemove(object):
def __init__(self, notifying: Notifying, index: int):
super(ManagedListRemove, self).__init__()
self._notifying: Notifying = notifying
self._index: int = index
@property
def notifying(self) -> Notifying:
""" """
return self._notifying
@property
def index(self) -> int:
""" """
return self._index
class ManagedListNotification(object):
ADDED_ITEMS: int = 0
UPDATED_ITEMS: int = 1
REMOVED_ITEMS: int = 2
def __init__(self, managed_list: 'ManagedList', items: typing.List[typing.Union[ManagedListInsert, ChangeNotification, ManagedListRemove]]):
super(ManagedListNotification, self).__init__()
self._managed_list: ManagedList = managed_list
self._inserted_items: typing.List[ManagedListInsert] = []
self._change_notifications: typing.List[ChangeNotification] = []
self._removed_items: typing.List[ManagedListRemove] = []
self._kind: int = -1
if isinstance(items[0], ManagedListInsert):
self._kind = ManagedListNotification.ADDED_ITEMS
self._inserted_items = typing.cast(typing.List[ManagedListInsert], items)
elif isinstance(items[0], ChangeNotification):
self._kind = ManagedListNotification.UPDATED_ITEMS
self._change_notifications = typing.cast(typing.List[ChangeNotification], items)
elif isinstance(items[0], ManagedListRemove):
self._kind = ManagedListNotification.REMOVED_ITEMS
self._removed_items = typing.cast(typing.List[ManagedListRemove], items)
else:
raise Exception('Unexpected object: "{0}" of type "{1}".'.format(items[0], type(items[0])))
@property
def managed_list(self) -> 'ManagedList':
""" """
return self._managed_list
@property
def kind(self) -> int:
""" """
return self._kind
@property
def inserted_items(self) -> typing.List[ManagedListInsert]:
""" """
return self._inserted_items
@property
def change_notifications(self) -> typing.List[ChangeNotification]:
""" """
return self._change_notifications
@property
def removed_items(self) -> typing.List[ManagedListRemove]:
""" """
return self._removed_items
class ManagedList(object):
def __init__(self, items: typing.List[Notifying] = None):
super(ManagedList, self).__init__()
self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict()
self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ManagedListNotification], typing.NoReturn]] = dict()
self._managed_items: typing.List[Notifying] = []
if items:
for o in items:
self._manage_item(notifying=o)
def __iter__(self):
return iter(self._managed_items)
def _manage_item(self, notifying: Notifying) -> typing.Union[Notifying, None]:
""" Subscribes to managed item. Returns item only if it became managed. """
if notifying in self._managed_items:
return None
self._managed_items.append(notifying)
self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification)
return notifying
def _unmanage_item(self, notifying: Notifying) -> typing.Union[typing.Tuple[Notifying, int], typing.Tuple[None, int]]:
""" Unsubscribes to managed item. Returns item only if it became unmanaged. """
if notifying not in self._managed_items:
return None, -1
index = self._managed_items.index(notifying)
self._managed_items.remove(notifying)
callback_id = self._subscriptions[notifying]
del self._subscriptions[notifying]
notifying.remove_changed_fn(callback_id=callback_id)
return notifying, index
def _on_notification(self, notification: ChangeNotification) -> None:
self._notify(
notification=ManagedListNotification(
managed_list=self,
items=[notification]
)
)
def _notify(self, notification: ManagedListNotification):
for callback in self._changed_callbacks.values():
callback(notification)
def add_changed_fn(self, callback: typing.Callable[[ManagedListNotification], typing.NoReturn]) -> uuid.uuid4:
for key, value in self._changed_callbacks.items():
if value == callback:
return key
key = uuid.uuid4()
self._changed_callbacks[key] = callback
return key
def remove_changed_fn(self, callback_id: uuid.uuid4) -> None:
if callback_id in self._changed_callbacks.keys():
del self._changed_callbacks[callback_id]
def append(self, notifying: Notifying) -> None:
if self._manage_item(notifying=notifying) is not None:
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListInsert(notifying=notifying, index=self.index(notifying=notifying))]
)
)
def extend(self, notifying: typing.List[Notifying]) -> None:
added = []
for o in notifying:
o = self._manage_item(notifying=o)
if o:
added.append(o)
if len(added) == 0:
return
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListInsert(notifying=o, index=self.index(notifying=o)) for o in added]
)
)
def remove(self, notifying: Notifying) -> None:
notifying, index = self._unmanage_item(notifying=notifying)
if notifying:
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListRemove(notifying=notifying, index=index)]
)
)
def remove_all(self) -> None:
items = [ManagedListRemove(notifying=o, index=i) for i, o in enumerate(self._managed_items)]
for callback_id, notifying in self._subscriptions.items():
notifying.remove_changed_fn(callback_id=callback_id)
self._subscriptions = dict()
self._managed_items = []
self._notify(
ManagedListNotification(
managed_list=self,
items=items
)
)
def pop(self, index: int = 0) -> Notifying:
notifying, index = self._unmanage_item(self._managed_items[index])
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListRemove(notifying=notifying, index=index)]
)
)
return notifying
def index(self, notifying: Notifying) -> int:
if notifying in self._managed_items:
return self._managed_items.index(notifying)
return -1
class Serializable(Subscribing):
"""Base class providing serialization method template"""
def __init__(self):
super(Serializable, self).__init__()
def serialize(self) -> dict:
""" """
return dict()
def deserialize(self, data: dict) -> None:
""" """
pass
class Base(Serializable):
"""Base class providing id property"""
@classmethod
def Create(cls) -> 'Base':
return cls()
def __init__(self):
super(Base, self).__init__()
self._id: str = str(uuid.uuid4())
def serialize(self) -> dict:
""" """
output = super(Base, self).serialize()
output['_id'] = self._id
return output
def deserialize(self, data: dict) -> None:
""" """
super(Base, self).deserialize(data=data)
self._id = data['_id'] if '_id' in data.keys() else str(uuid.uuid4())
@property
def id(self) -> str:
""" """
return self._id
class DagNode(Base):
"""Base class providing input and outputs of :class:`omni.universalmaterialmap.core.data.Plug` """
def __init__(self):
super(DagNode, self).__init__()
self._inputs: typing.List[Plug] = []
self._outputs: typing.List[Plug] = []
self._computing: bool = False
def serialize(self) -> dict:
""" """
output = super(DagNode, self).serialize()
output['_inputs'] = [plug.serialize() for plug in self.inputs]
output['_outputs'] = [plug.serialize() for plug in self.outputs]
return output
def deserialize(self, data: dict) -> None:
""" """
super(DagNode, self).deserialize(data=data)
old_inputs = self._inputs[:]
old_outputs = self._outputs[:]
while len(self._inputs):
self._unsubscribe(notifying=self._inputs.pop())
while len(self._outputs):
self._unsubscribe(notifying=self._outputs.pop())
plugs = []
if '_inputs' in data.keys():
for o in data['_inputs']:
plug = Plug(parent=self)
plug.deserialize(data=o)
plugs.append(plug)
self._inputs = plugs
plugs = []
if '_outputs' in data.keys():
for o in data['_outputs']:
plug = Plug(parent=self)
plug.deserialize(data=o)
plugs.append(plug)
self._outputs = plugs
for o in self._inputs:
self._subscribe(notifying=o)
for o in self._outputs:
self._subscribe(notifying=o)
if not old_inputs == self._inputs:
self._notify(
ChangeNotification(
item=self,
property_name='inputs',
old_value=old_inputs,
new_value=self._inputs[:]
)
)
if not old_inputs == self._outputs:
self._notify(
ChangeNotification(
item=self,
property_name='outputs',
old_value=old_outputs,
new_value=self._outputs[:]
)
)
def _on_notification(self, notification: ChangeNotification) -> None:
if notification.item == self:
return
# Re-broadcast notification
self._notify(notification=notification)
def invalidate(self, plug: 'Plug'):
pass
def compute(self) -> None:
""" """
if self._computing:
return
self._computing = True
self._compute_inputs(input_plugs=self._inputs)
self._compute_outputs(output_plugs=self._outputs)
self._computing = False
def _compute_inputs(self, input_plugs: typing.List['Plug']):
# Compute dependencies
for plug in input_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in input_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def _compute_outputs(self, output_plugs: typing.List['Plug']):
# Compute dependencies
for plug in output_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in output_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def add_input(self) -> 'Plug':
raise NotImplementedError()
def can_remove_plug(self, plug: 'Plug') -> bool:
return plug.is_removable
def remove_plug(self, plug: 'Plug') -> None:
if not plug.is_removable:
raise Exception('Plug is not removable')
notifications = []
if plug in self._inputs:
old_value = self._inputs[:]
self._unsubscribe(notifying=plug)
self._inputs.remove(plug)
notifications.append(
ChangeNotification(
item=self,
property_name='inputs',
old_value=old_value,
new_value=self._inputs[:]
)
)
if plug in self._outputs:
old_value = self._outputs[:]
self._unsubscribe(notifying=plug)
self._outputs.remove(plug)
notifications.append(
ChangeNotification(
item=self,
property_name='outputs',
old_value=old_value,
new_value=self._outputs[:]
)
)
destination: Plug
for destination in plug.outputs:
destination.input = None
for notification in notifications:
self._notify(notification=notification)
@property
def can_add_input(self) -> bool:
return False
@property
def inputs(self) -> typing.List['Plug']:
""" """
return self._inputs
@property
def outputs(self) -> typing.List['Plug']:
""" """
return self._outputs
class GraphEntity(DagNode):
"""Base class providing omni.kit.widget.graph properties for a data item."""
OPEN = 0
MINIMIZED = 1
CLOSED = 2
def __init__(self):
super(GraphEntity, self).__init__()
self._display_name: str = ''
self._position: typing.Union[typing.Tuple[float, float], None] = None
self._expansion_state: int = GraphEntity.OPEN
self._show_inputs: bool = True
self._show_outputs: bool = True
self._show_peripheral: bool = False
def serialize(self) -> dict:
""" """
output = super(GraphEntity, self).serialize()
output['_display_name'] = self._display_name
output['_position'] = self._position
output['_expansion_state'] = self._expansion_state
output['_show_inputs'] = self._show_inputs
output['_show_outputs'] = self._show_outputs
output['_show_peripheral'] = self._show_peripheral
return output
def deserialize(self, data: dict) -> None:
""" """
super(GraphEntity, self).deserialize(data=data)
self._display_name = data['_display_name'] if '_display_name' in data.keys() else ''
self._position = data['_position'] if '_position' in data.keys() else None
self._expansion_state = data['_expansion_state'] if '_expansion_state' in data.keys() else GraphEntity.OPEN
self._show_inputs = data['_show_inputs'] if '_show_inputs' in data.keys() else True
self._show_outputs = data['_show_outputs'] if '_show_outputs' in data.keys() else True
self._show_peripheral = data['_show_peripheral'] if '_show_peripheral' in data.keys() else False
@property
def display_name(self) -> str:
""" """
return self._display_name
@display_name.setter
def display_name(self, value: str) -> None:
""" """
if self._display_name is value:
return
notification = ChangeNotification(
item=self,
property_name='display_name',
old_value=self._display_name,
new_value=value
)
self._display_name = value
self._notify(notification=notification)
@property
def position(self) -> typing.Union[typing.Tuple[float, float], None]:
""" """
return self._position
@position.setter
def position(self, value: typing.Union[typing.Tuple[float, float], None]) -> None:
""" """
if self._position is value:
return
notification = ChangeNotification(
item=self,
property_name='position',
old_value=self._position,
new_value=value
)
self._position = value
self._notify(notification=notification)
@property
def expansion_state(self) -> int:
""" """
return self._expansion_state
@expansion_state.setter
def expansion_state(self, value: int) -> None:
""" """
if self._expansion_state is value:
return
notification = ChangeNotification(
item=self,
property_name='expansion_state',
old_value=self._expansion_state,
new_value=value
)
self._expansion_state = value
self._notify(notification=notification)
@property
def show_inputs(self) -> bool:
""" """
return self._show_inputs
@show_inputs.setter
def show_inputs(self, value: bool) -> None:
""" """
if self._show_inputs is value:
return
notification = ChangeNotification(
item=self,
property_name='show_inputs',
old_value=self._show_inputs,
new_value=value
)
self._show_inputs = value
self._notify(notification=notification)
@property
def show_outputs(self) -> bool:
""" """
return self._show_outputs
@show_outputs.setter
def show_outputs(self, value: bool) -> None:
""" """
if self._show_outputs is value:
return
notification = ChangeNotification(
item=self,
property_name='show_outputs',
old_value=self._show_outputs,
new_value=value
)
self._show_outputs = value
self._notify(notification=notification)
@property
def show_peripheral(self) -> bool:
""" """
return self._show_peripheral
@show_peripheral.setter
def show_peripheral(self, value: bool) -> None:
""" """
if self._show_peripheral is value:
return
notification = ChangeNotification(
item=self,
property_name='show_peripheral',
old_value=self._show_peripheral,
new_value=value
)
self._show_peripheral = value
self._notify(notification=notification)
class Connection(Serializable):
def __init__(self):
super(Connection, self).__init__()
self._source_id = ''
self._destination_id = ''
def serialize(self) -> dict:
output = super(Connection, self).serialize()
output['_source_id'] = self._source_id
output['_destination_id'] = self._destination_id
return output
def deserialize(self, data: dict) -> None:
super(Connection, self).deserialize(data=data)
self._source_id = data['_source_id'] if '_source_id' in data.keys() else ''
self._destination_id = data['_destination_id'] if '_destination_id' in data.keys() else ''
@property
def source_id(self):
return self._source_id
@property
def destination_id(self):
return self._destination_id
class Plug(Base):
"""
A Plug can be:
a source
an output
both a source and an output
a container for a static value - most likely as an output
a container for an editable value - most likely as an output
plug.default_value Starting point and for resetting.
plug.value Apply as computed_value if there is no input or dependency providing a value.
plug.computed_value Final value. Could be thought of as plug.output_value.
Plug is_dirty on
input connect
input disconnect
value change if not connected
A Plug is_dirty if
it is_dirty
its input is_dirty
any dependency is_dirty
"""
VALUE_TYPE_ANY = 'any'
VALUE_TYPE_FLOAT = 'float'
VALUE_TYPE_INTEGER = 'int'
VALUE_TYPE_STRING = 'str'
VALUE_TYPE_BOOLEAN = 'bool'
VALUE_TYPE_NODE_ID = 'node_id'
VALUE_TYPE_VECTOR2 = 'vector2'
VALUE_TYPE_VECTOR3 = 'vector3'
VALUE_TYPE_VECTOR4 = 'vector4'
VALUE_TYPE_ENUM = 'enum'
VALUE_TYPE_LIST = 'list'
VALUE_TYPES = [
VALUE_TYPE_ANY,
VALUE_TYPE_FLOAT,
VALUE_TYPE_INTEGER,
VALUE_TYPE_STRING,
VALUE_TYPE_BOOLEAN,
VALUE_TYPE_NODE_ID,
VALUE_TYPE_VECTOR2,
VALUE_TYPE_VECTOR3,
VALUE_TYPE_VECTOR4,
VALUE_TYPE_ENUM,
VALUE_TYPE_LIST,
]
@classmethod
def Create(
cls,
parent: DagNode,
name: str,
display_name: str,
value_type: str = 'any',
editable: bool = False,
is_removable: bool = False,
) -> 'Plug':
instance = cls(parent=parent)
instance._name = name
instance._display_name = display_name
instance._value_type = value_type
instance._is_editable = editable
instance._is_removable = is_removable
return instance
def __init__(self, parent: DagNode):
super(Plug, self).__init__()
self._parent: DagNode = parent
self._name: str = ''
self._display_name: str = ''
self._value_type: str = Plug.VALUE_TYPE_ANY
self._internal_value_type: str = Plug.VALUE_TYPE_ANY
self._is_peripheral: bool = False
self._is_editable: bool = False
self._is_removable: bool = False
self._default_value: typing.Any = None
self._computed_value: typing.Any = None
self._value: typing.Any = None
self._is_invalid: bool = False
self._input: typing.Union[Plug, typing.NoReturn] = None
self._outputs: typing.List[Plug] = []
self._enum_values: typing.List = []
def serialize(self) -> dict:
output = super(Plug, self).serialize()
output['_name'] = self._name
output['_display_name'] = self._display_name
output['_value_type'] = self._value_type
output['_internal_value_type'] = self._internal_value_type
output['_is_peripheral'] = self._is_peripheral
output['_is_editable'] = self._is_editable
output['_is_removable'] = self._is_removable
output['_default_value'] = self._default_value
output['_value'] = self._value
output['_enum_values'] = self._enum_values
return output
def deserialize(self, data: dict) -> None:
super(Plug, self).deserialize(data=data)
self._input = None
self._name = data['_name'] if '_name' in data.keys() else ''
self._display_name = data['_display_name'] if '_display_name' in data.keys() else ''
self._value_type = data['_value_type'] if '_value_type' in data.keys() else Plug.VALUE_TYPE_ANY
self._internal_value_type = data['_internal_value_type'] if '_internal_value_type' in data.keys() else None
self._is_peripheral = data['_is_peripheral'] if '_is_peripheral' in data.keys() else False
self._is_editable = data['_is_editable'] if '_is_editable' in data.keys() else False
self._is_removable = data['_is_removable'] if '_is_removable' in data.keys() else False
self._default_value = data['_default_value'] if '_default_value' in data.keys() else None
self._value = data['_value'] if '_value' in data.keys() else self._default_value
self._enum_values = data['_enum_values'] if '_enum_values' in data.keys() else []
def invalidate(self) -> None:
if self._is_invalid:
return
self._is_invalid = True
if self.parent:
self.parent.invalidate(self)
@property
def parent(self) -> DagNode:
return self._parent
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
if self._name is value:
return
notification = ChangeNotification(
item=self,
property_name='name',
old_value=self._name,
new_value=value
)
self._name = value
self._notify(notification=notification)
@property
def display_name(self) -> str:
return self._display_name
@display_name.setter
def display_name(self, value: str) -> None:
if self._display_name is value:
return
notification = ChangeNotification(
item=self,
property_name='display_name',
old_value=self._display_name,
new_value=value
)
self._display_name = value
self._notify(notification=notification)
@property
def value_type(self) -> str:
return self._value_type
@value_type.setter
def value_type(self, value: str) -> None:
if self._value_type is value:
return
notification = ChangeNotification(
item=self,
property_name='value_type',
old_value=self._value_type,
new_value=value
)
self._value_type = value
self._notify(notification=notification)
@property
def internal_value_type(self) -> str:
return self._internal_value_type
@internal_value_type.setter
def internal_value_type(self, value: str) -> None:
if self._internal_value_type is value:
return
notification = ChangeNotification(
item=self,
property_name='internal_value_type',
old_value=self._internal_value_type,
new_value=value
)
self._internal_value_type = value
self._notify(notification=notification)
@property
def is_removable(self) -> bool:
return self._is_removable
@property
def is_peripheral(self) -> bool:
return self._is_peripheral
@is_peripheral.setter
def is_peripheral(self, value: bool) -> None:
if self._is_peripheral is value:
return
notification = ChangeNotification(
item=self,
property_name='is_peripheral',
old_value=self._is_peripheral,
new_value=value
)
self._is_peripheral = value
self._notify(notification=notification)
@property
def computed_value(self) -> typing.Any:
return self._computed_value
@computed_value.setter
def computed_value(self, value: typing.Any) -> None:
if self._computed_value is value:
self._is_invalid = False
self._value = self._computed_value
return
notification = ChangeNotification(
item=self,
property_name='computed_value',
old_value=self._computed_value,
new_value=value
)
if self._input and self._input.is_invalid:
print('WARNING: Universal Material Map: Compute encountered an unexpected state: input invalid after compute. Results may be incorrect.')
print('\tplug: "{0}"'.format(self.name))
if self._parent:
print('\tplug.parent: "{0}"'.format(self._parent.__class__.__name__))
print('\tplug.input: "{0}"'.format(self._input.name))
if self._input.parent:
print('\tplug.input.parent: "{0}"'.format(self._input.parent.__class__.__name__))
return
self._is_invalid = False
self._computed_value = value
self._value = self._computed_value
self._notify(notification=notification)
@property
def value(self) -> typing.Any:
return self._value
@value.setter
def value(self, value: typing.Any) -> None:
if self._value is value:
return
notification = ChangeNotification(
item=self,
property_name='value',
old_value=self._value,
new_value=value
)
self._value = value
self._notify(notification=notification)
if self._input is None:
self.invalidate()
@property
def is_invalid(self) -> typing.Any:
if self._input and self._input._is_invalid:
return True
return self._is_invalid
@property
def input(self) -> typing.Union['Plug', typing.NoReturn]:
return self._input
@input.setter
def input(self, value: typing.Union['Plug', typing.NoReturn]) -> None:
if self._input is value:
return
notification = ChangeNotification(
item=self,
property_name='input',
old_value=self._input,
new_value=value
)
self._input = value
self._notify(notification=notification)
self.invalidate()
@property
def outputs(self) -> typing.List['Plug']:
return self._outputs
@property
def is_editable(self) -> bool:
return self._is_editable
@is_editable.setter
def is_editable(self, value: bool) -> None:
if self._is_editable is value:
return
notification = ChangeNotification(
item=self,
property_name='is_editable',
old_value=self._is_editable,
new_value=value
)
self._is_editable = value
self._notify(notification=notification)
@property
def default_value(self) -> typing.Any:
return self._default_value
@default_value.setter
def default_value(self, value: typing.Any) -> None:
if self._default_value is value:
return
notification = ChangeNotification(
item=self,
property_name='default_value',
old_value=self._default_value,
new_value=value
)
self._default_value = value
self._notify(notification=notification)
@property
def enum_values(self) -> typing.List:
return self._enum_values
@enum_values.setter
def enum_values(self, value: typing.List) -> None:
if self._enum_values is value:
return
notification = ChangeNotification(
item=self,
property_name='enum_values',
old_value=self._enum_values,
new_value=value
)
self._enum_values = value
self._notify(notification=notification)
class Node(DagNode):
@classmethod
def Create(cls, class_name: str) -> 'Node':
instance = typing.cast(Node, super(Node, cls).Create())
instance._class_name = class_name
return instance
def __init__(self):
super(Node, self).__init__()
self._class_name: str = ''
def serialize(self) -> dict:
output = super(Node, self).serialize()
output['_class_name'] = self._class_name
return output
def deserialize(self, data: dict) -> None:
super(Node, self).deserialize(data=data)
self._class_name = data['_class_name'] if '_class_name' in data.keys() else ''
@property
def class_name(self):
return self._class_name
class Client(Serializable):
ANY_VERSION = 'any'
NO_VERSION = 'none'
DCC_OMNIVERSE_CREATE = 'Omniverse Create'
DCC_3DS_MAX = '3ds MAX'
DCC_MAYA = 'Maya'
DCC_HOUDINI = 'Houdini'
DCC_SUBSTANCE_DESIGNER = 'Substance Designer'
DCC_SUBSTANCE_PAINTER = 'Substance Painter'
DCC_BLENDER = 'Blender'
@classmethod
def Autodesk_3dsMax(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_3DS_MAX
instance._version = version
return instance
@classmethod
def Autodesk_Maya(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_MAYA
instance._version = version
return instance
@classmethod
def OmniverseCreate(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_OMNIVERSE_CREATE
instance._version = version
return instance
@classmethod
def Blender(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_BLENDER
instance._version = version
return instance
def __init__(self):
super(Client, self).__init__()
self._name: str = ''
self._version: str = ''
def __eq__(self, other: 'Client') -> bool:
if not isinstance(other, Client):
return False
return other.name == self._name and other.version == self._version
def is_compatible(self, other: 'Client') -> bool:
if not isinstance(other, Client):
return False
if other == self:
return True
return other._version == Client.ANY_VERSION or self._version == Client.ANY_VERSION
def serialize(self) -> dict:
output = super(Client, self).serialize()
output['_name'] = self._name
output['_version'] = self._version
return output
def deserialize(self, data: dict) -> None:
super(Client, self).deserialize(data=data)
self._name = data['_name'] if '_name' in data.keys() else ''
self._version = data['_version'] if '_version' in data.keys() else ''
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def version(self) -> str:
return self._version
@version.setter
def version(self, value: str) -> None:
self._version = value
class AssemblyMetadata(Serializable):
CATEGORY_BASE = 'Base Materials'
CATEGORY_CONNECTOR = 'Connector Materials'
CATEGORIES = [
CATEGORY_BASE,
CATEGORY_CONNECTOR,
]
def __init__(self):
super(AssemblyMetadata, self).__init__()
self._category = ''
self._name = ''
self._keywords: typing.List[str] = []
self._supported_clients: typing.List[Client] = []
def serialize(self) -> dict:
output = super(AssemblyMetadata, self).serialize()
output['_category'] = self._category
output['_name'] = self._name
output['_keywords'] = self._keywords
output['_supported_clients'] = [o.serialize() for o in self._supported_clients]
return output
def deserialize(self, data: dict) -> None:
super(AssemblyMetadata, self).deserialize(data=data)
self._category = data['_category'] if '_category' in data.keys() else ''
self._name = data['_name'] if '_name' in data.keys() else ''
self._keywords = data['_keywords'] if '_keywords' in data.keys() else ''
items = []
if '_supported_clients' in data.keys():
for o in data['_supported_clients']:
item = Client()
item.deserialize(data=o)
items.append(item)
self._supported_clients = items
@property
def category(self) -> str:
return self._category
@category.setter
def category(self, value: str) -> None:
self._category = value
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def keywords(self) -> typing.List[str]:
return self._keywords
@keywords.setter
def keywords(self, value: typing.List[str]) -> None:
self._keywords = value
@property
def supported_clients(self) -> typing.List[Client]:
return self._supported_clients
class Target(GraphEntity):
def __init__(self):
super(Target, self).__init__()
self._nodes: typing.List[Node] = []
self._metadata: AssemblyMetadata = AssemblyMetadata()
self._root_node_id: str = ''
self._root_node: Node = None
self._revision: int = 0
self._store_id: str = ''
self._connections: typing.List[Connection] = []
def serialize(self) -> dict:
output = super(Target, self).serialize()
output['_nodes'] = [node.serialize() for node in self.nodes]
output['_metadata'] = self._metadata.serialize()
output['_root_node_id'] = self._root_node_id
output['_revision'] = self._revision
output['_connections'] = [o.serialize() for o in self._connections]
return output
def deserialize(self, data: dict) -> None:
super(Target, self).deserialize(data=data)
self._root_node_id = data['_root_node_id'] if '_root_node_id' in data.keys() else ''
nodes = []
if '_nodes' in data.keys():
for o in data['_nodes']:
node = Node()
node.deserialize(data=o)
nodes.append(node)
self._nodes = nodes
root_node = None
if self._root_node_id:
for node in self._nodes:
if node.id == self._root_node_id:
root_node = node
break
self._root_node = root_node
metadata = AssemblyMetadata()
if '_metadata' in data.keys():
metadata.deserialize(data=data['_metadata'])
self._metadata = metadata
self._revision = data['_revision'] if '_revision' in data.keys() else 0
items = []
if '_connections' in data.keys():
for o in data['_connections']:
item = Connection()
item.deserialize(data=o)
items.append(item)
self._connections = items
for connection in self._connections:
input_plug: Plug = None
output_plug: Plug = None
for node in self._nodes:
for plug in node.inputs:
if connection.source_id == plug.id:
input_plug = plug
elif connection.destination_id == plug.id:
input_plug = plug
for plug in node.outputs:
if connection.source_id == plug.id:
output_plug = plug
elif connection.destination_id == plug.id:
output_plug = plug
if input_plug is not None and output_plug is not None:
break
if input_plug is None or output_plug is None:
continue
if output_plug not in input_plug.outputs:
input_plug.outputs.append(output_plug)
output_plug.input = input_plug
def connect(self, source: Plug, destination: Plug) -> None:
for connection in self._connections:
if connection.source_id == source.id and connection.destination_id == destination.id:
return
connection = Connection()
connection._source_id = source.id
connection._destination_id = destination.id
self._connections.append(connection)
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
@property
def nodes(self) -> typing.List[Node]:
return self._nodes
@property
def metadata(self) -> AssemblyMetadata:
return self._metadata
@property
def root_node(self) -> Node:
return self._root_node
@root_node.setter
def root_node(self, value: Node) -> None:
self._root_node = value
self._root_node_id = self._root_node.id if self._root_node else ''
@property
def revision(self) -> int:
return self._revision
@revision.setter
def revision(self, value: int) -> None:
self._revision = value
@property
def store_id(self) -> str:
return self._store_id
@store_id.setter
def store_id(self, value: int) -> None:
if self._store_id is value:
return
notification = ChangeNotification(
item=self,
property_name='store_id',
old_value=self._store_id,
new_value=value
)
self._store_id = value
self._notify(notification=notification)
class TargetInstance(GraphEntity):
@classmethod
def FromAssembly(cls, assembly: Target) -> 'TargetInstance':
instance = cls()
instance._target_id = assembly.id
instance.target = assembly
instance.display_name = assembly.display_name
return instance
def __init__(self):
super(TargetInstance, self).__init__()
self._target_id: str = ''
self._target: typing.Union[Target, typing.NoReturn] = None
self._is_setting_target = False
def serialize(self) -> dict:
super(TargetInstance, self).serialize()
output = GraphEntity.serialize(self)
output['_target_id'] = self._target_id
output['_inputs'] = []
output['_outputs'] = []
return output
def deserialize(self, data: dict) -> None:
"""
Does not invoke super on DagNode base class because inputs and outputs are derived from assembly instance.
"""
data['_inputs'] = []
data['_outputs'] = []
GraphEntity.deserialize(self, data=data)
self._target_id = data['_target_id'] if '_target_id' in data.keys() else ''
def invalidate(self, plug: 'Plug' = None):
"""
Invalidate any plug that is a destination of an output plug named plug.name.
"""
# If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found
do_compute = True
output: Plug
destination: Plug
for output in self.outputs:
if not plug or output.name == plug.name:
for destination in output.outputs:
destination.invalidate()
do_compute = False
if do_compute:
self.compute()
@property
def target_id(self) -> str:
return self._target_id
@property
def target(self) -> typing.Union[Target, typing.NoReturn]:
return self._target
@target.setter
def target(self, value: typing.Union[Target, typing.NoReturn]) -> None:
if self._target is value:
return
if not self._target_id and value:
raise Exception('Target ID "" does not match assembly instance "{0}".'.format(value.id))
if self._target_id and not value:
raise Exception('Target ID "{0}" does not match assembly instance "None".'.format(self._target_id))
if self._target_id and value and not self._target_id == value.id:
raise Exception('Target ID "{0}" does not match assembly instance "{1}".'.format(self._target_id, value.id))
self._is_setting_target = True
notification = ChangeNotification(
item=self,
property_name='target',
old_value=self._target,
new_value=value
)
self._target = value
self._inputs = []
self._outputs = []
if self._target:
node_id_plug = Plug.Create(
parent=self,
name='node_id_output',
display_name='Node Id',
value_type=Plug.VALUE_TYPE_STRING
)
node_id_plug._id = self._target.id
node_id_plug.value = self._target.id
self._outputs.append(node_id_plug)
for node in self._target.nodes:
for o in node.inputs:
plug = Plug(parent=self)
plug.deserialize(data=o.serialize())
self._inputs.append(plug)
for o in node.outputs:
plug = Plug(parent=self)
plug.deserialize(data=o.serialize())
self._outputs.append(plug)
self._is_setting_target = False
self._notify(notification=notification)
self.invalidate()
class Operator(Base):
def __init__(
self,
id: str,
name: str,
required_inputs: int,
min_inputs: int,
max_inputs: int,
num_outputs: int,
):
super(Operator, self).__init__()
self._id = id
self._name: str = name
self._required_inputs: int = required_inputs
self._min_inputs: int = min_inputs
self._max_inputs: int = max_inputs
self._num_outputs: int = num_outputs
self._computing: bool = False
def compute(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
"""
Base class only computes input_plugs. It is assumed that extending class computes output plugs.
"""
if self._computing:
return
self._computing = True
if len(input_plugs) < self._required_inputs:
raise Exception('Array of inputs not of required length "{0}". Actual length "{1}". Operator: "{2}"'.format(self._required_inputs, len(input_plugs), self.__class__.__name__))
for plug in input_plugs:
if plug.input:
if plug.input in input_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another input on the same node.'.format(plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
if plug.input in output_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another output on the same node.'.format(
plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
for plug in output_plugs:
if plug.input:
if plug.input in output_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another output on the same node.'.format(
plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
self._compute_inputs(input_plugs=input_plugs)
self._compute_outputs(input_plugs=input_plugs, output_plugs=output_plugs)
self._computing = False
def _compute_inputs(self, input_plugs: typing.List[Plug]):
# Compute dependencies
for plug in input_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in input_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError(self.__class__)
def generate_input(self, parent: 'DagNode', index: int) -> Plug:
"""
Base class provides method template but does nothing.
"""
pass
def generate_output(self, parent: 'DagNode', index: int) -> Plug:
"""
Base class provides method template but does nothing.
"""
pass
def test(self) -> None:
parent = OperatorInstance()
inputs = []
while len(inputs) < self.min_inputs:
inputs.append(
self.generate_input(parent=parent, index=len(inputs))
)
outputs = []
while len(outputs) < self.num_outputs:
outputs.append(
self.generate_output(parent=parent, index=len(outputs))
)
self._prepare_plugs_for_test(input_plugs=inputs, output_plugs=outputs)
self._perform_test(input_plugs=inputs, output_plugs=outputs)
self._assert_test(input_plugs=inputs, output_plugs=outputs)
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _perform_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
self.compute(input_plugs=input_plugs, output_plugs=output_plugs)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError()
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
if not plug.is_removable:
raise Exception('Plug is not removable')
notifications = []
if plug in operator_instance._inputs:
old_value = operator_instance._inputs[:]
operator_instance._inputs.remove(plug)
operator_instance._unsubscribe(notifying=plug)
notifications.append(
ChangeNotification(
item=operator_instance,
property_name='inputs',
old_value=old_value,
new_value=operator_instance._inputs[:]
)
)
if plug in operator_instance._outputs:
old_value = operator_instance._outputs[:]
operator_instance._outputs.remove(plug)
operator_instance._unsubscribe(notifying=plug)
notifications.append(
ChangeNotification(
item=operator_instance,
property_name='outputs',
old_value=old_value,
new_value=operator_instance._outputs[:]
)
)
destination: Plug
for destination in plug.outputs:
destination.input = None
for notification in notifications:
for callback in operator_instance._changed_callbacks.values():
callback(notification)
@property
def name(self) -> str:
return self._name
@property
def min_inputs(self) -> int:
return self._min_inputs
@property
def max_inputs(self) -> int:
return self._max_inputs
@property
def required_inputs(self) -> int:
return self._required_inputs
@property
def num_outputs(self) -> int:
return self._num_outputs
class GraphOutput(Operator):
"""
Output resolves to a node id.
"""
def __init__(self):
super(GraphOutput, self).__init__(
id='5f39ab48-5bee-46fe-9a22-0f678013568e',
name='Graph Output',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID)
raise Exception('Output index "{0}" not supported.'.format(index))
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = input_plugs[0].computed_value
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = self.id
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == self.id:
raise Exception('Test failed.')
class OperatorInstance(GraphEntity):
@classmethod
def FromOperator(cls, operator: Operator) -> 'OperatorInstance':
instance = OperatorInstance()
instance._is_deserializing = True
instance._operator = operator
instance._display_name = operator.name
while len(instance._inputs) < operator.min_inputs:
instance._inputs.append(
operator.generate_input(parent=instance, index=len(instance._inputs))
)
while len(instance._outputs) < operator.num_outputs:
instance._outputs.append(
operator.generate_output(parent=instance, index=len(instance._outputs))
)
instance._operator_module = operator.__class__.__module__
instance._operator_class_name = operator.__class__.__name__
instance._is_deserializing = False
instance.invalidate()
return instance
def __init__(self):
super(OperatorInstance, self).__init__()
self._description: str = ''
self._operator_module: str = ''
self._operator_class_name: str = ''
self._operator: Operator = None
self._is_deserializing = False
def serialize(self) -> dict:
output = super(OperatorInstance, self).serialize()
output['_description'] = self._description
output['_operator_module'] = self._operator_module
output['_operator_class_name'] = self._operator_class_name
return output
def deserialize(self, data: dict) -> None:
self._is_deserializing = True
super(OperatorInstance, self).deserialize(data=data)
self._description = data['_description'] if '_description' in data.keys() else ''
self._operator_module = data['_operator_module'] if '_operator_module' in data.keys() else ''
self._operator_class_name = data['_operator_class_name'] if '_operator_class_name' in data.keys() else ''
if not self._operator_module:
raise Exception('Unexpected data: no valid "operator module" defined')
if not self._operator_class_name:
raise Exception('Unexpected data: no valid "operator class name" defined')
if self._operator_module not in sys.modules.keys():
importlib.import_module(self._operator_module)
module_pointer = sys.modules[self._operator_module]
class_pointer = module_pointer.__dict__[self._operator_class_name]
self._operator = typing.cast(Operator, class_pointer())
notifying = []
while len(self._inputs) < self._operator.min_inputs:
plug = self._operator.generate_input(parent=self, index=len(self._inputs))
self._inputs.append(plug)
notifying.append(plug)
while len(self._outputs) < self._operator.num_outputs:
plug = self._operator.generate_output(parent=self, index=len(self._outputs))
self._outputs.append(plug)
notifying.append(plug)
self._is_deserializing = False
for o in notifying:
self._subscribe(notifying=o)
self.invalidate()
def invalidate(self, plug: 'Plug' = None):
"""
Because one plug changed we assume any connected plug to any output needs to be invalidated.
"""
if self._is_deserializing:
return
# Set all outputs to invalid
output: Plug
for output in self.outputs:
output._is_invalid = True
# If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found
do_compute = True
destination: Plug
for output in self.outputs:
for destination in output.outputs:
destination.invalidate()
do_compute = False
if do_compute:
self.compute()
def compute(self) -> None:
if self._operator:
self._operator.compute(input_plugs=self._inputs, output_plugs=self._outputs)
def add_input(self) -> Plug:
if not self.can_add_input:
raise Exception('Cannot add another input.')
old_value = self._inputs[:]
plug = self._operator.generate_input(parent=self, index=len(self._inputs))
self._inputs.append(plug)
self._subscribe(notifying=plug)
notification = ChangeNotification(
item=self,
property_name='inputs',
old_value=old_value,
new_value=self._inputs[:]
)
self._notify(notification=notification)
for o in self.outputs:
o.invalidate()
return plug
def remove_plug(self, plug: 'Plug') -> None:
self._operator.remove_plug(operator_instance=self, plug=plug)
@property
def operator(self) -> Operator:
return self._operator
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, value: str) -> None:
if self._description is value:
return
notification = ChangeNotification(
item=self,
property_name='description',
old_value=self._description,
new_value=value
)
self._description = value
self._notify(notification=notification)
@DagNode.can_add_input.getter
def can_add_input(self) -> bool:
if self._operator.max_inputs == -1:
return True
return len(self._inputs) < self._operator.max_inputs - 1
class StyleInfo(object):
def __init__(
self,
name: str,
background_color: int,
border_color: int,
connection_color: int,
node_background_color: int,
footer_icon_filename: str,
):
super(StyleInfo, self).__init__()
self._name: str = name
self._background_color: int = background_color
self._border_color: int = border_color
self._connection_color: int = connection_color
self._node_background_color: int = node_background_color
self._footer_icon_filename: str = footer_icon_filename
@property
def name(self) -> str:
return self._name
@property
def background_color(self) -> int:
return self._background_color
@property
def border_color(self) -> int:
return self._border_color
@property
def connection_color(self) -> int:
return self._connection_color
@property
def node_background_color(self) -> int:
return self._node_background_color
@property
def footer_icon_filename(self) -> str:
return self._footer_icon_filename
class ConversionGraph(Base):
# STYLE_OUTPUT: StyleInfo = StyleInfo(
# name='output',
# background_color=0xFF2E2E2E,
# border_color=0xFFB97E9C,
# connection_color=0xFF80C26F,
# node_background_color=0xFF444444,
# footer_icon_filename='Material.svg'
# )
STYLE_SOURCE_NODE: StyleInfo = StyleInfo(
name='source_node',
background_color=0xFF2E2E2E,
border_color=0xFFE5AAC8,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='Material.svg'
)
STYLE_ASSEMBLY_REFERENCE: StyleInfo = StyleInfo(
name='assembly_reference',
background_color=0xFF2E2E2E,
border_color=0xFFB97E9C,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='Material.svg'
)
STYLE_OPERATOR_INSTANCE: StyleInfo = StyleInfo(
name='operator_instance',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_color.svg'
)
STYLE_VALUE_RESOLVER: StyleInfo = StyleInfo(
name='value_resolver',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='value_resolver.svg'
)
STYLE_BOOLEAN_SWITCH: StyleInfo = StyleInfo(
name='boolean_switch',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='boolean_switch.svg'
)
STYLE_CONSTANT_BOOLEAN: StyleInfo = StyleInfo(
name='constant_boolean',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_boolean.svg'
)
STYLE_CONSTANT_COLOR: StyleInfo = StyleInfo(
name='constant_color',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_color.svg'
)
STYLE_CONSTANT_FLOAT: StyleInfo = StyleInfo(
name='constant_float',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_float.svg'
)
STYLE_CONSTANT_INTEGER: StyleInfo = StyleInfo(
name='constant_integer',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_integer.svg'
)
STYLE_CONSTANT_STRING: StyleInfo = StyleInfo(
name='constant_string',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_string.svg'
)
STYLE_EQUAL: StyleInfo = StyleInfo(
name='equal',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='equal.svg'
)
STYLE_GREATER_THAN: StyleInfo = StyleInfo(
name='greater_than',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='greater_than.svg'
)
STYLE_LESS_THAN: StyleInfo = StyleInfo(
name='less_than',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='less_than.svg'
)
STYLE_MERGE_RGB: StyleInfo = StyleInfo(
name='merge_rgb',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='merge_rgb.svg'
)
STYLE_NOT: StyleInfo = StyleInfo(
name='not',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='not.svg'
)
STYLE_OR: StyleInfo = StyleInfo(
name='or',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='or.svg'
)
STYLE_SPLIT_RGB: StyleInfo = StyleInfo(
name='split_rgb',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='split_rgb.svg'
)
STYLE_TRANSPARENCY_RESOLVER: StyleInfo = StyleInfo(
name='transparency_resolver',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='transparency_resolver.svg'
)
STYLE_OUTPUT: StyleInfo = StyleInfo(
name='output',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='output.svg'
)
STYLE_INFOS = (
STYLE_OUTPUT,
STYLE_SOURCE_NODE,
STYLE_ASSEMBLY_REFERENCE,
STYLE_OPERATOR_INSTANCE,
STYLE_VALUE_RESOLVER,
STYLE_BOOLEAN_SWITCH,
STYLE_CONSTANT_BOOLEAN,
STYLE_CONSTANT_COLOR,
STYLE_CONSTANT_FLOAT,
STYLE_CONSTANT_INTEGER,
STYLE_CONSTANT_STRING,
STYLE_EQUAL,
STYLE_GREATER_THAN,
STYLE_LESS_THAN,
STYLE_NOT,
STYLE_OR,
STYLE_SPLIT_RGB,
STYLE_TRANSPARENCY_RESOLVER,
STYLE_MERGE_RGB,
)
def __init__(self):
super(ConversionGraph, self).__init__()
self._graph_output: OperatorInstance = OperatorInstance.FromOperator(operator=GraphOutput())
self._target_instances: typing.List[TargetInstance] = []
self._operator_instances: typing.List[OperatorInstance] = [self._graph_output]
self._connections: typing.List[Connection] = []
self._library: Library = None
self._source_node_id: str = ''
self._source_node: TargetInstance = None
self._filename: str = ''
self._exists_on_disk: bool = False
self._revision: int = 0
def _on_notification(self, notification: ChangeNotification) -> None:
if notification.item == self:
return
# Re-broadcast notification
self._notify(notification=notification)
def serialize(self) -> dict:
output = super(ConversionGraph, self).serialize()
output['_target_instances'] = [o.serialize() for o in self._target_instances]
output['_operator_instances'] = [o.serialize() for o in self._operator_instances]
output['_connections'] = [o.serialize() for o in self._connections]
output['_source_node_id'] = self._source_node_id
output['_revision'] = self._revision
return output
def deserialize(self, data: dict) -> None:
super(ConversionGraph, self).deserialize(data=data)
notifications = []
# _source_node_id
old = self._source_node_id
new = data['_source_node_id'] if '_source_node_id' in data.keys() else ''
if not old == new:
self._source_node_id = new
notifications.append(
ChangeNotification(
item=self,
property_name='source_node_id',
old_value=old,
new_value=new
)
)
# _revision
old = self._revision
new = data['_revision'] if '_revision' in data.keys() else 0
if not old == new:
self._revision = new
notifications.append(
ChangeNotification(
item=self,
property_name='revision',
old_value=old,
new_value=new
)
)
# _target_instances
old = self._target_instances[:]
while len(self._target_instances):
self._unsubscribe(notifying=self._target_instances.pop())
items = []
if '_target_instances' in data.keys():
for o in data['_target_instances']:
item = TargetInstance()
item.deserialize(data=o)
items.append(item)
self._target_instances = items
if not self._target_instances == old:
notifications.append(
ChangeNotification(
item=self,
property_name='target_instances',
old_value=old,
new_value=self._target_instances
)
)
# _source_node
old = self._source_node
source_node = None
if self._source_node_id:
items = [o for o in self._target_instances if o.id == self._source_node_id]
source_node = items[0] if len(items) else None
self._source_node = source_node
if not self._source_node == old:
notifications.append(
ChangeNotification(
item=self,
property_name='source_node',
old_value=old,
new_value=self._source_node
)
)
# _operator_instances
# _graph_output
old_operator_instances = self._operator_instances
old_graph_output = self._graph_output
items = []
self._graph_output = None
if '_operator_instances' in data.keys():
for o in data['_operator_instances']:
item = OperatorInstance()
item.deserialize(data=o)
items.append(item)
if isinstance(item.operator, GraphOutput):
self._graph_output = item
if not self._graph_output:
self._graph_output = OperatorInstance.FromOperator(operator=GraphOutput())
items.insert(0, self._graph_output)
self._operator_instances = items
if not self._operator_instances == old_operator_instances:
notifications.append(
ChangeNotification(
item=self,
property_name='operator_instances',
old_value=old_operator_instances,
new_value=self._operator_instances
)
)
if not self._graph_output == old_graph_output:
notifications.append(
ChangeNotification(
item=self,
property_name='old_graph_output',
old_value=old_operator_instances,
new_value=self._graph_output
)
)
items = []
if '_connections' in data.keys():
for o in data['_connections']:
item = Connection()
item.deserialize(data=o)
items.append(item)
self._connections = items
for o in self._target_instances:
self._subscribe(notifying=o)
for o in self._operator_instances:
self._subscribe(notifying=o)
for o in notifications:
self._notify(notification=o)
def build_dag(self) -> None:
for connection in self._connections:
source = self._get_plug(plug_id=connection.source_id)
destination = self._get_plug(plug_id=connection.destination_id)
if not source or not destination:
continue
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
def _get_plug(self, plug_id: str) -> typing.Union[Plug, typing.NoReturn]:
for assembly_reference in self._target_instances:
for plug in assembly_reference.inputs:
if plug.id == plug_id:
return plug
for plug in assembly_reference.outputs:
if plug.id == plug_id:
return plug
for operator_instance in self._operator_instances:
for plug in operator_instance.outputs:
if plug.id == plug_id:
return plug
for plug in operator_instance.inputs:
if plug.id == plug_id:
return plug
return None
def add_node(self, node: OperatorInstance) -> None:
self._operator_instances.append(node)
def add_connection(self, source: Plug, destination: Plug) -> None:
connection = Connection()
connection._source_id = source.id
connection._destination_id = destination.id
self._connections.append(connection)
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
def add(self, entity: GraphEntity) -> None:
if isinstance(entity, TargetInstance):
if entity in self._target_instances:
return
self._target_instances.append(entity)
self._subscribe(notifying=entity)
return
if isinstance(entity, OperatorInstance):
if entity in self._operator_instances:
return
self._operator_instances.append(entity)
self._subscribe(notifying=entity)
return
raise NotImplementedError()
def can_be_removed(self, entity: GraphEntity) -> bool:
if not entity:
return False
if entity not in self._target_instances and entity not in self._operator_instances:
return False
if entity == self._graph_output:
return False
return True
def remove(self, entity: GraphEntity) -> None:
if not self.can_be_removed(entity=entity):
raise Exception('Not allowed: entity is not allowed to be deleted.')
if isinstance(entity, TargetInstance):
if entity in self._target_instances:
self._unsubscribe(notifying=entity)
self._target_instances.remove(entity)
to_remove = []
for connection in self._connections:
if connection.source_id == entity.id or connection.destination_id == entity.id:
to_remove.append(connection)
for connection in to_remove:
self.remove_connection(connection=connection)
return
if isinstance(entity, OperatorInstance):
if entity in self._operator_instances:
self._unsubscribe(notifying=entity)
self._operator_instances.remove(entity)
to_remove = []
for connection in self._connections:
if connection.source_id == entity.id or connection.destination_id == entity.id:
to_remove.append(connection)
for connection in to_remove:
self.remove_connection(connection=connection)
return
raise NotImplementedError()
def remove_connection(self, connection: Connection) -> None:
if connection in self._connections:
self._connections.remove(connection)
source = self._get_plug(plug_id=connection.source_id)
destination = self._get_plug(plug_id=connection.destination_id)
if source and destination:
if destination in source.outputs:
source.outputs.remove(destination)
if destination.input == source:
destination.input = None
def get_entity_by_id(self, identifier: str) -> typing.Union[GraphEntity, typing.NoReturn]:
entities = [entity for entity in self._target_instances if entity.id == identifier]
if len(entities):
return entities[0]
entities = [entity for entity in self._operator_instances if entity.id == identifier]
if len(entities):
return entities[0]
return None
def get_output_entity(self) -> typing.Union[TargetInstance, typing.NoReturn]:
"""
Computes the dependency graph and returns the resulting Target reference.
Make sure relevant source node plug values have been set prior to invoking this method.
"""
if not self._graph_output:
return None
self._graph_output.invalidate()
assembly_id = self._graph_output.outputs[0].computed_value
for item in self._target_instances:
if item.target_id == assembly_id:
return item
return None
def get_object_style_name(self, entity: GraphEntity) -> str:
if not entity:
return ''
# TODO: Style computed output entity
# if entity == self.get_output_entity():
# return ConversionGraph.STYLE_OUTPUT.name
if entity == self.source_node:
return ConversionGraph.STYLE_SOURCE_NODE.name
if isinstance(entity, TargetInstance):
return ConversionGraph.STYLE_ASSEMBLY_REFERENCE.name
if isinstance(entity, OperatorInstance):
if entity.operator:
if entity.operator.__class__.__name__ == 'ConstantBoolean':
return ConversionGraph.STYLE_CONSTANT_BOOLEAN.name
if entity.operator.__class__.__name__ == 'ConstantColor':
return ConversionGraph.STYLE_CONSTANT_COLOR.name
if entity.operator.__class__.__name__ == 'ConstantFloat':
return ConversionGraph.STYLE_CONSTANT_FLOAT.name
if entity.operator.__class__.__name__ == 'ConstantInteger':
return ConversionGraph.STYLE_CONSTANT_INTEGER.name
if entity.operator.__class__.__name__ == 'ConstantString':
return ConversionGraph.STYLE_CONSTANT_STRING.name
if entity.operator.__class__.__name__ == 'BooleanSwitch':
return ConversionGraph.STYLE_BOOLEAN_SWITCH.name
if entity.operator.__class__.__name__ == 'ValueResolver':
return ConversionGraph.STYLE_VALUE_RESOLVER.name
if entity.operator.__class__.__name__ == 'SplitRGB':
return ConversionGraph.STYLE_SPLIT_RGB.name
if entity.operator.__class__.__name__ == 'MergeRGB':
return ConversionGraph.STYLE_MERGE_RGB.name
if entity.operator.__class__.__name__ == 'LessThan':
return ConversionGraph.STYLE_LESS_THAN.name
if entity.operator.__class__.__name__ == 'GreaterThan':
return ConversionGraph.STYLE_GREATER_THAN.name
if entity.operator.__class__.__name__ == 'Or':
return ConversionGraph.STYLE_OR.name
if entity.operator.__class__.__name__ == 'Equal':
return ConversionGraph.STYLE_EQUAL.name
if entity.operator.__class__.__name__ == 'Not':
return ConversionGraph.STYLE_NOT.name
if entity.operator.__class__.__name__ == 'MayaTransparencyResolver':
return ConversionGraph.STYLE_TRANSPARENCY_RESOLVER.name
if entity.operator.__class__.__name__ == 'GraphOutput':
return ConversionGraph.STYLE_OUTPUT.name
return ConversionGraph.STYLE_OPERATOR_INSTANCE.name
return ''
def get_output_targets(self) -> typing.List[TargetInstance]:
return [o for o in self._target_instances if not o == self._source_node]
@property
def target_instances(self) -> typing.List[TargetInstance]:
return self._target_instances[:]
@property
def operator_instances(self) -> typing.List[OperatorInstance]:
return self._operator_instances[:]
@property
def connections(self) -> typing.List[Connection]:
return self._connections[:]
@property
def filename(self) -> str:
return self._filename
@filename.setter
def filename(self, value: str) -> None:
if self._filename is value:
return
notification = ChangeNotification(
item=self,
property_name='filename',
old_value=self._filename,
new_value=value
)
self._filename = value
self._notify(notification=notification)
@property
def library(self) -> 'Library':
return self._library
@property
def graph_output(self) -> OperatorInstance:
return self._graph_output
@property
def source_node(self) -> TargetInstance:
return self._source_node
@source_node.setter
def source_node(self, value: TargetInstance) -> None:
if self._source_node is value:
return
node_notification = ChangeNotification(
item=self,
property_name='source_node',
old_value=self._source_node,
new_value=value
)
node_id_notification = ChangeNotification(
item=self,
property_name='source_node_id',
old_value=self._source_node_id,
new_value=value.id if value else ''
)
self._source_node = value
self._source_node_id = self._source_node.id if self._source_node else ''
self._notify(notification=node_notification)
self._notify(notification=node_id_notification)
@property
def exists_on_disk(self) -> bool:
return self._exists_on_disk
@property
def revision(self) -> int:
return self._revision
@revision.setter
def revision(self, value: int) -> None:
if self._revision is value:
return
notification = ChangeNotification(
item=self,
property_name='revision',
old_value=self._revision,
new_value=value
)
self._revision = value
self._notify(notification=notification)
class FileHeader(Serializable):
@classmethod
def FromInstance(cls, instance: Serializable) -> 'FileHeader':
header = cls()
header._module = instance.__class__.__module__
header._class_name = instance.__class__.__name__
return header
@classmethod
def FromData(cls, data: dict) -> 'FileHeader':
if '_module' not in data.keys():
raise Exception('Unexpected data: key "_module" not in dictionary')
if '_class_name' not in data.keys():
raise Exception('Unexpected data: key "_class_name" not in dictionary')
header = cls()
header._module = data['_module']
header._class_name = data['_class_name']
return header
def __init__(self):
super(FileHeader, self).__init__()
self._module = ''
self._class_name = ''
def serialize(self) -> dict:
output = dict()
output['_module'] = self._module
output['_class_name'] = self._class_name
return output
@property
def module(self) -> str:
return self._module
@property
def class_name(self) -> str:
return self._class_name
class FileUtility(Serializable):
@classmethod
def FromInstance(cls, instance: Serializable) -> 'FileUtility':
utility = cls()
utility._header = FileHeader.FromInstance(instance=instance)
utility._content = instance
return utility
@classmethod
def FromData(cls, data: dict) -> 'FileUtility':
if '_header' not in data.keys():
raise Exception('Unexpected data: key "_header" not in dictionary')
if '_content' not in data.keys():
raise Exception('Unexpected data: key "_content" not in dictionary')
utility = cls()
utility._header = FileHeader.FromData(data=data['_header'])
if utility._header.module not in sys.modules.keys():
importlib.import_module(utility._header.module)
module_pointer = sys.modules[utility._header.module]
class_pointer = module_pointer.__dict__[utility._header.class_name]
utility._content = class_pointer()
if isinstance(utility._content, Serializable):
utility._content.deserialize(data=data['_content'])
return utility
def __init__(self):
super(FileUtility, self).__init__()
self._header: FileHeader = None
self._content: Serializable = None
def serialize(self) -> dict:
output = dict()
output['_header'] = self._header.serialize()
output['_content'] = self._content.serialize()
return output
def assert_content_serializable(self):
data = self.content.serialize()
self._assert(data=data)
def _assert(self, data: dict):
for key, value in data.items():
if isinstance(value, dict):
self._assert(data=value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
self._assert(data=item)
else:
print(item)
else:
print(key, value)
@property
def header(self) -> FileHeader:
return self._header
@property
def content(self) -> Serializable:
return self._content
class Library(Base):
"""
A Library represents a UMM data set. It can contain any of the following types of files:
- Settings
- Conversion Graph
- Target
- Conversion Manifest
A Library is divided into a "core" and a "user" data set.
"core":
- Files provided by NVIDIA.
- Installed and updated by UMM.
- Adding, editing, and deleting files require running in "Developer Mode".
- Types:
- Conversion Graph
- Target
- Conversion Manifest
"user"
- Files created and updated by user.
- Types:
- Conversion Graph
- Target
- Conversion Manifest
Overrides ./core/Conversion Manifest
...or...
each file header has an attribute: source = core, source = user
if source == core then it is read-only to users.
TARGET: problem with that is what if user needs to update an existing target?
...why would they?
...because they may want to edit property states in the Target... would want their own.
CONVERSION GRAPH
...they could just Save As and make a different one. no problem here. do need to change the 'source' attribute to 'user' though.
CONVERSION MANIFEST
2 files
ConversionManifest.json
ConversionManifest_user.json (overrides ConversionManifest.json)
Limitation: User cannot all together remove a manifest item
"""
@classmethod
def Create(
cls,
library_id: str,
name: str,
manifest: IDelegate = None,
conversion_graph: IDelegate = None,
target: IDelegate = None,
settings: IDelegate = None
) -> 'Library':
instance = typing.cast(Library, super(Library, cls).Create())
instance._id = library_id
instance._name = name
instance._manifest = manifest
instance._conversion_graph = conversion_graph
instance._target = target
instance._settings = settings
return instance
def __init__(self):
super(Library, self).__init__()
self._name: str = ''
self._manifest: typing.Union[IDelegate, typing.NoReturn] = None
self._conversion_graph: typing.Union[IDelegate, typing.NoReturn] = None
self._target: typing.Union[IDelegate, typing.NoReturn] = None
self._settings: typing.Union[IDelegate, typing.NoReturn] = None
def serialize(self) -> dict:
output = super(Library, self).serialize()
output['_name'] = self._name
return output
def deserialize(self, data: dict) -> None:
super(Library, self).deserialize(data=data)
self._name = data['_name'] if '_name' in data.keys() else ''
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def manifest(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._manifest
@property
def conversion_graph(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._conversion_graph
@property
def target(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._target
@property
def settings(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._settings
@property
def is_read_only(self) -> bool:
return not self._conversion_graph or not self._target or not self._conversion_graph
class Settings(Serializable):
def __init__(self):
super(Settings, self).__init__()
self._libraries: typing.List[Library] = []
self._store_id = 'Settings.json'
self._render_contexts: typing.List[str] = []
def serialize(self) -> dict:
output = super(Settings, self).serialize()
output['_libraries'] = [o.serialize() for o in self._libraries]
output['_render_contexts'] = self._render_contexts
return output
def deserialize(self, data: dict) -> None:
super(Settings, self).deserialize(data=data)
items = []
if '_libraries' in data.keys():
for o in data['_libraries']:
item = Library()
item.deserialize(data=o)
items.append(item)
self._libraries = items
self._render_contexts = data['_render_contexts'] if '_render_contexts' in data.keys() else []
@property
def libraries(self) -> typing.List[Library]:
return self._libraries
@property
def store_id(self) -> str:
return self._store_id
@property
def render_contexts(self) -> typing.List[str]:
return self._render_contexts
class ClassInfo(object):
def __init__(self, display_name: str, class_name: str):
super(ClassInfo, self).__init__()
self._display_name = display_name
self._class_name = class_name
@property
def display_name(self) -> str:
return self._display_name
@property
def class_name(self) -> str:
return self._class_name
class OmniMDL(object):
OMNI_GLASS: ClassInfo = ClassInfo(display_name='Omni Glass', class_name='OmniGlass.mdl|OmniGlass')
OMNI_GLASS_OPACITY: ClassInfo = ClassInfo(display_name='Omni Glass Opacity',
class_name='OmniGlass_Opacity.mdl|OmniGlass_Opacity')
OMNI_PBR: ClassInfo = ClassInfo(display_name='Omni PBR', class_name='OmniPBR.mdl|OmniPBR')
OMNI_PBR_CLEAR_COAT: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat',
class_name='OmniPBR_ClearCoat.mdl|OmniPBR_ClearCoat')
OMNI_PBR_CLEAR_COAT_OPACITY: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat Opacity',
class_name='OmniPBR_ClearCoat_Opacity.mdl|OmniPBR_ClearCoat_Opacity')
OMNI_PBR_OPACITY = ClassInfo(display_name='Omni PBR Opacity', class_name='OmniPBR_Opacity.mdl|OmniPBR_Opacity')
OMNI_SURFACE: ClassInfo = ClassInfo(display_name='OmniSurface', class_name='OmniSurface.mdl|OmniSurface')
OMNI_SURFACE_LITE: ClassInfo = ClassInfo(display_name='OmniSurfaceLite',
class_name='OmniSurfaceLite.mdl|OmniSurfaceLite')
OMNI_SURFACE_UBER: ClassInfo = ClassInfo(display_name='OmniSurfaceUber',
class_name='OmniSurfaceUber.mdl|OmniSurfaceUber')
class MayaShader(object):
LAMBERT: ClassInfo = ClassInfo(display_name='Lambert', class_name='lambert')
class ConversionMap(Serializable):
@classmethod
def Create(
cls,
render_context: str,
application: str,
document: ConversionGraph,
) -> 'ConversionMap':
if not isinstance(document, ConversionGraph):
raise Exception('Argument "document" unexpected class: "{0}"'.format(type(document)))
instance = cls()
instance._render_context = render_context
instance._application = application
instance._conversion_graph_id = document.id
instance._conversion_graph = document
return instance
def __init__(self):
super(ConversionMap, self).__init__()
self._render_context: str = ''
self._application: str = ''
self._conversion_graph_id: str = ''
self._conversion_graph: ConversionGraph = None
def __eq__(self, other: 'ConversionMap') -> bool:
if not isinstance(other, ConversionMap):
return False
if not self.render_context == other.render_context:
return False
if not self.application == other.application:
return False
if not self.conversion_graph_id == other.conversion_graph_id:
return False
return True
def serialize(self) -> dict:
output = super(ConversionMap, self).serialize()
output['_render_context'] = self._render_context
output['_application'] = self._application
output['_conversion_graph_id'] = self._conversion_graph_id
return output
def deserialize(self, data: dict) -> None:
super(ConversionMap, self).deserialize(data=data)
self._render_context = data['_render_context'] if '_render_context' in data.keys() else ''
self._application = data['_application'] if '_application' in data.keys() else ''
self._conversion_graph_id = data['_conversion_graph_id'] if '_conversion_graph_id' in data.keys() else ''
self._conversion_graph = None
@property
def render_context(self) -> str:
return self._render_context
@property
def application(self) -> str:
return self._application
@property
def conversion_graph_id(self) -> str:
return self._conversion_graph_id
@property
def conversion_graph(self) -> ConversionGraph:
return self._conversion_graph
class ConversionManifest(Serializable):
def __init__(self):
super(ConversionManifest, self).__init__()
self._version_major: int = 100
self._version_minor: int = 0
self._conversion_maps: typing.List[ConversionMap] = []
self._store_id = 'ConversionManifest.json'
def serialize(self) -> dict:
output = super(ConversionManifest, self).serialize()
output['_version_major'] = self._version_major
output['_version_minor'] = self._version_minor
output['_conversion_maps'] = [o.serialize() for o in self._conversion_maps]
return output
def deserialize(self, data: dict) -> None:
super(ConversionManifest, self).deserialize(data=data)
self._version_major = data['_version_major'] if '_version_major' in data.keys() else 100
self._version_minor = data['_version_minor'] if '_version_minor' in data.keys() else 0
items = []
if '_conversion_maps' in data.keys():
for o in data['_conversion_maps']:
item = ConversionMap()
item.deserialize(data=o)
items.append(item)
self._conversion_maps = items
def set_version(self, major: int = 100, minor: int = 0) -> None:
self._version_major = major
self._version_minor = minor
def add(
self,
render_context: str,
application: str,
document: ConversionGraph,
) -> ConversionMap:
item = ConversionMap.Create(
render_context=render_context,
application=application,
document=document,
)
self._conversion_maps.append(item)
return item
def remove(self, item: ConversionMap) -> None:
if item in self._conversion_maps:
self._conversion_maps.remove(item)
@property
def conversion_maps(self) -> typing.List[ConversionMap]:
return self._conversion_maps[:]
@property
def version(self) -> str:
return '{0}.{1}'.format(self._version_major, self._version_minor)
@property
def version_major(self) -> int:
return self._version_major
@property
def version_minor(self) -> int:
return self._version_minor
@property
def store_id(self) -> str:
return self._store_id
| 100,965 | Python | 32.949563 | 187 | 0.58241 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/operator.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import sys
import typing
from .data import Operator, Plug, DagNode, OperatorInstance
from . import util
class ConstantFloat(Operator):
def __init__(self):
super(ConstantFloat, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb6',
name='Constant Float',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0.0
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True
)
plug.value = 0.0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = len(self.id) * 0.3
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == len(self.id) * 0.3:
raise Exception('Test failed.')
class ConstantInteger(Operator):
def __init__(self):
super(ConstantInteger, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb7',
name='Constant Integer',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = len(self.id)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == len(self.id):
raise Exception('Test failed.')
class ConstantBoolean(Operator):
def __init__(self):
super(ConstantBoolean, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb8',
name='Constant Boolean',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else False
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=True
)
plug.value = True
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = False
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if output.computed_value:
raise Exception('Test failed.')
class ConstantString(Operator):
def __init__(self):
super(ConstantString, self).__init__(
id='cb169ec0-5ddb-45eb-98d1-5d09f1ca759g',
name='Constant String',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else ''
# print('ConstantString._compute_outputs(): output_plugs[0].computed_value', output_plugs[0].computed_value)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=True
)
plug.value = ''
plug.default_value = ''
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = self.id
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == self.id:
raise Exception('Test failed.')
class ConstantRGB(Operator):
def __init__(self):
super(ConstantRGB, self).__init__(
id='60f21797-dd62-4b06-9721-53882aa42e81',
name='Constant RGB',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=True
)
plug.value = (0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = (0.1, 0.2, 0.3)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == (0.1, 0.2, 0.3):
raise Exception('Test failed.')
class ConstantRGBA(Operator):
def __init__(self):
super(ConstantRGBA, self).__init__(
id='0ab39d82-5862-4332-af7a-329200ae1d14',
name='Constant RGBA',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0, 0)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=True
)
plug.value = (0, 0, 0, 1)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = (0.1, 0.2, 0.3, 0.4)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == (0.1, 0.2, 0.3, 0.4):
raise Exception('Test failed.')
class BooleanSwitch(Operator):
"""
Outputs the value of input 2 if input 1 is TRUE. Otherwise input 3 will be output.
Input 1 must be a boolean.
Input 2 and 3 can be of any value type.
"""
def __init__(self):
super(BooleanSwitch, self).__init__(
id='a628ab13-f19f-45b3-81cf-6824dd6e7b5d',
name='Boolean Switch',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
debug = False
value = None
if debug:
print('BooleanSwitch')
print('\tinput_plugs[0].input:', input_plugs[0].input)
if input_plugs[0].input is not None:
if debug:
print('\tinput_plugs[0].input.computed_value:', input_plugs[0].input.computed_value)
print('\tinput_plugs[1].input:', input_plugs[1].input)
if input_plugs[1].input is not None:
print('\tinput_plugs[1].input.computed_value:', input_plugs[1].input.computed_value)
print('\tinput_plugs[2].input:', input_plugs[2].input)
if input_plugs[2].input is not None:
print('\tinput_plugs[2].input.computed_value:', input_plugs[2].input.computed_value)
if input_plugs[0].input.computed_value:
value = input_plugs[1].input.computed_value if input_plugs[1].input is not None else False
else:
value = input_plugs[2].input.computed_value if input_plugs[2].input is not None else False
elif debug:
print('\tskipping evaluating inputs')
if debug:
print('\tvalue:', value)
print('\toutput_plugs[0].computed_value is value', output_plugs[0].computed_value is value)
output_plugs[0].computed_value = value if value is not None else False
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='input_boolean', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
plug.value = False
return plug
if index == 1:
return Plug.Create(parent=parent, name='on_true', display_name='True Output', value_type=Plug.VALUE_TYPE_ANY)
if index == 2:
return Plug.Create(parent=parent, name='on_false', display_name='False Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
plug.value = False
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = 'Input 1 value'
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = 'Input 2 value'
input_plugs[2].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == 'Input 1 value':
raise Exception('Test failed.')
class SplitRGB(Operator):
def __init__(self):
super(SplitRGB, self).__init__(
id='1cbcf8c6-328c-49b6-b4fc-d16fd78d4868',
name='Split RGB',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=3
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = 0
output_plugs[1].computed_value = 0
output_plugs[2].computed_value = 0
else:
value = input_plugs[0].input.computed_value
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable and len(value) == 3:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
output_plugs[2].computed_value = value[2]
else:
output_plugs[0].computed_value = output_plugs[0].default_value
output_plugs[1].computed_value = output_plugs[1].default_value
output_plugs[2].computed_value = output_plugs[2].default_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_rgb', display_name='RGB', value_type=Plug.VALUE_TYPE_VECTOR3)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='red',
display_name='Red',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='green',
display_name='Green',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='blue',
display_name='Blue',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.1, 0.2, 0.3)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.1:
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 0.2:
raise Exception('Test failed.')
if not output_plugs[2].computed_value == 0.3:
raise Exception('Test failed.')
class MergeRGB(Operator):
def __init__(self):
super(MergeRGB, self).__init__(
id='1cbcf8c6-328d-49b6-b4fc-d16fd78d4868',
name='Merge RGB',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
rgb = [0.0, 0.0, 0.0]
for i in range(3):
if input_plugs[i].input is not None:
assumed_value_type = input_plugs[i].input.value_type
if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT:
rgb[i] = input_plugs[i].input.computed_value
output_plugs[0].computed_value = tuple(rgb)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 2:
return Plug.Create(parent=parent, name='input_B', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='rgb',
display_name='RGB',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False
)
plug.value = (0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.3
input_plugs[2].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == (0.1, 0.2, 0.3):
raise Exception('Test failed.')
class SplitRGBA(Operator):
def __init__(self):
super(SplitRGBA, self).__init__(
id='2c48e13c-2b58-48b9-a3b6-5f977c402b2e',
name='Split RGBA',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=4
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = 0
output_plugs[1].computed_value = 0
output_plugs[2].computed_value = 0
output_plugs[3].computed_value = 0
return
value = input_plugs[0].input.computed_value
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable and len(value) == 4:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
output_plugs[2].computed_value = value[2]
output_plugs[3].computed_value = value[3]
else:
output_plugs[0].computed_value = output_plugs[0].default_value
output_plugs[1].computed_value = output_plugs[1].default_value
output_plugs[2].computed_value = output_plugs[2].default_value
output_plugs[3].computed_value = output_plugs[3].default_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR4)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='red',
display_name='Red',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='green',
display_name='Green',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='blue',
display_name='Blue',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='alpha',
display_name='Alpha',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.1, 0.2, 0.3, 0.4)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.1:
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 0.2:
raise Exception('Test failed.')
if not output_plugs[2].computed_value == 0.3:
raise Exception('Test failed.')
if not output_plugs[3].computed_value == 0.4:
raise Exception('Test failed.')
class MergeRGBA(Operator):
def __init__(self):
super(MergeRGBA, self).__init__(
id='92e57f3d-8514-4786-a4ed-2767139a15eb',
name='Merge RGBA',
required_inputs=4,
min_inputs=4,
max_inputs=4,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
rgba = [0.0, 0.0, 0.0, 0.0]
for i in range(4):
if input_plugs[i].input is not None:
assumed_value_type = input_plugs[i].input.value_type
if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT:
rgba[i] = input_plugs[i].input.computed_value
output_plugs[0].computed_value = tuple(rgba)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 2:
return Plug.Create(parent=parent, name='input_b', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 3:
return Plug.Create(parent=parent, name='input_a', display_name='A', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False
)
plug.value = (0, 0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.3
input_plugs[2].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.4
input_plugs[3].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == (0.1, 0.2, 0.3, 0.4):
raise Exception('Test failed.')
class LessThan(Operator):
def __init__(self):
super(LessThan, self).__init__(
id='996df9bd-08d5-451b-a67c-80d0de7fba32',
name='Less Than',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None or input_plugs[1].input is None:
for output in output_plugs:
output.computed_value = False
return
value = input_plugs[0].input.computed_value
compare = input_plugs[1].input.computed_value
result = False
try:
result = value < compare
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is less than "{1}". '
'Setting output to "{2}".'.format(
value,
compare,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is Less Than', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class GreaterThan(Operator):
def __init__(self):
super(GreaterThan, self).__init__(
id='1e751c3a-f6cd-43a2-aa72-22cb9d82ad19',
name='Greater Than',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None or input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value = input_plugs[0].input.computed_value
compare = input_plugs[1].input.computed_value
result = False
try:
result = value > compare
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is greater than "{1}". '
'Setting output to "{2}".'.format(
value,
compare,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is Greater Than', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if output_plugs[0].computed_value:
raise Exception('Test failed.')
class Or(Operator):
def __init__(self):
super(Or, self).__init__(
id='d0288faf-cb2e-4765-8923-1a368b45f62c',
name='Or',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = False
return
if value_1 is None:
output_plugs[0].computed_value = True if value_2 else False
return
if value_2 is None:
output_plugs[0].computed_value = True if value_1 else False
return
output_plugs[0].computed_value = value_1 or value_2
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = False
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class And(Operator):
def __init__(self):
super(And, self).__init__(
id='9c5e4fb9-9948-4075-a7d6-ae9bc04e25b5',
name='And',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = False
return
if value_1 is None:
output_plugs[0].computed_value = True if value_2 else False
return
if value_2 is None:
output_plugs[0].computed_value = True if value_1 else False
return
output_plugs[0].computed_value = value_1 and value_2
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class Equal(Operator):
def __init__(self):
super(Equal, self).__init__(
id='fb353972-aebd-4d32-8231-f644f75d322c',
name='Equal',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = True
return
if input_plugs[0].input is None or input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value
value_2 = input_plugs[1].input.computed_value
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = True
return
if value_1 is None or value_2 is None:
output_plugs[0].computed_value = False
return
result = False
try:
result = value_1 == value_2
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is equal to "{1}". '
'Setting output to "{2}".'.format(
value_1,
value_2,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_1', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Are Equal', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = self.id
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = self.id
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class Not(Operator):
def __init__(self):
super(Not, self).__init__(
id='7b8b67df-ce2e-445c-98b7-36ea695c77e3',
name='Not',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value
if value_1 is None:
output_plugs[0].computed_value = False
return
output_plugs[0].computed_value = not value_1
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = False
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class ValueTest(Operator):
def __init__(self):
super(ValueTest, self).__init__(
id='2899f66b-2e8d-467b-98d1-5f590cf98e7a',
name='Value Test',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = None
return
output_plugs[0].computed_value = input_plugs[0].input.computed_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantInteger())
fake.outputs[0].value = 10
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 10:
raise Exception('Test failed.')
class ValueResolver(Operator):
def __init__(self):
super(ValueResolver, self).__init__(
id='74306cd0-b668-4a92-9e15-7b23486bd89a',
name='Value Resolver',
required_inputs=8,
min_inputs=8,
max_inputs=8,
num_outputs=7
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type
computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type)
if value_type == Plug.VALUE_TYPE_BOOLEAN:
output_plugs[0].computed_value = computed_value
else:
output_plugs[0].computed_value = input_plugs[1].computed_value
if value_type == Plug.VALUE_TYPE_VECTOR3:
output_plugs[1].computed_value = computed_value
else:
output_plugs[1].computed_value = input_plugs[2].computed_value
if value_type == Plug.VALUE_TYPE_FLOAT:
output_plugs[2].computed_value = computed_value
else:
output_plugs[2].computed_value = input_plugs[3].computed_value
if value_type == Plug.VALUE_TYPE_INTEGER:
output_plugs[3].computed_value = computed_value
else:
output_plugs[3].computed_value = input_plugs[4].computed_value
if value_type == Plug.VALUE_TYPE_STRING:
output_plugs[4].computed_value = computed_value
else:
output_plugs[4].computed_value = input_plugs[5].computed_value
if value_type == Plug.VALUE_TYPE_VECTOR4:
output_plugs[5].computed_value = computed_value
else:
output_plugs[5].computed_value = input_plugs[6].computed_value
if value_type == Plug.VALUE_TYPE_LIST:
output_plugs[6].computed_value = computed_value
else:
output_plugs[6].computed_value = input_plugs[7].computed_value
for index, input_plug in enumerate(input_plugs):
if index == 0:
continue
input_plug.is_editable = not input_plug.input
for output_plug in output_plugs:
output_plug.is_editable = False
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
plug = Plug.Create(
parent=parent,
name='boolean',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=True,
)
plug.value = False
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='color',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=True,
)
plug.value = (0, 0, 0)
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='float',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
)
plug.value = 0
return plug
if index == 4:
plug = Plug.Create(
parent=parent,
name='integer',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True,
)
plug.value = 0
return plug
if index == 5:
plug = Plug.Create(
parent=parent,
name='string',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=True,
)
plug.value = ''
return plug
if index == 6:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=True,
)
plug.value = (0, 0, 0, 1)
return plug
if index == 7:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.value = []
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='boolean',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=False,
)
plug.value = False
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='color',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False,
)
plug.value = (0, 0, 0)
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='float',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False,
)
plug.value = 0
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='integer',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=False,
)
plug.value = 0
return plug
if index == 4:
plug = Plug.Create(
parent=parent,
name='string',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=False,
)
plug.value = ''
return plug
if index == 5:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=False,
)
plug.value = (0, 0, 0, 1)
return plug
if index == 6:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.value = []
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantInteger())
fake.outputs[0].value = 10
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[3].computed_value == 10:
raise Exception('Test failed.')
class MayaTransparencyResolver(Operator):
"""
Specialty operator based on Maya transparency attribute.
If the input is of type string - and is not an empty string - then the output will be TRUE.
If the input is a tripple float - and any value is greater than zero - then the output will also be TRUE.
In all other cases the output will be FALSE.
"""
def __init__(self):
super(MayaTransparencyResolver, self).__init__(
id='2b523832-ac84-4051-9064-6046121dcd48',
name='Maya Transparency Resolver',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
is_transparent = False
assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type
computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type)
if value_type == Plug.VALUE_TYPE_STRING:
is_transparent = not computed_value == ''
elif value_type == Plug.VALUE_TYPE_VECTOR3:
for value in computed_value:
if value > 0:
is_transparent = True
break
elif value_type == Plug.VALUE_TYPE_FLOAT:
is_transparent = computed_value > 0
output_plugs[0].computed_value = is_transparent
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='is_transparent',
display_name='Is Transparent',
value_type=Plug.VALUE_TYPE_BOOLEAN,
)
plug.value = False
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.5, 0.5, 0.5)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class ListGenerator(Operator):
def __init__(self):
super(ListGenerator, self).__init__(
id='a410f7a0-280a-451f-a26c-faf9a8e302b4',
name='List Generator',
required_inputs=0,
min_inputs=0,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = []
for input_plug in input_plugs:
output.append(input_plug.computed_value)
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
return Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_ANY,
editable=False,
is_removable=True,
)
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='list', display_name='list', value_type=Plug.VALUE_TYPE_LIST)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(ListGenerator, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class ListIndex(Operator):
def __init__(self):
super(ListIndex, self).__init__(
id='e4a81506-fb6b-4729-8273-f68e97f5bc6b',
name='List Index',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
try:
test = iter(input_plugs[0].computed_value)
index = input_plugs[1].computed_value
if 0 <= index < len(input_plugs[0].computed_value):
output_plugs[0].computed_value = input_plugs[0].computed_value[index]
else:
output_plugs[0].computed_value = None
except TypeError:
output_plugs[0].computed_value = None
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST)
if index == 1:
plug = Plug.Create(
parent=parent,
name='index',
display_name='Index',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True
)
plug.computed_value = 0
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].value = ['hello', 'world']
input_plugs[1].value = 1
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 'world':
raise Exception('Test failed.')
class MDLColorSpace(Operator):
def __init__(self):
super(MDLColorSpace, self).__init__(
id='cf0b97c8-fb55-4cf3-8afc-23ebd4a0a6c7',
name='MDL Color Space',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 'auto'
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['auto', 'raw', 'sRGB']
plug.default_value = 'auto'
plug.value = 'auto'
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = output_plugs[0].enum_values[2]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]:
raise Exception('Test failed.')
class MDLTextureResolver(Operator):
def __init__(self):
super(MDLTextureResolver, self).__init__(
id='af766adb-cf54-4a8b-a598-44b04fbcf630',
name='MDL Texture Resolver',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
filepath = input_plugs[0].input.computed_value if input_plugs[0].input else ''
value_type = util.to_plug_value_type(value=filepath, assumed_value_type=Plug.VALUE_TYPE_STRING)
filepath = filepath if value_type == Plug.VALUE_TYPE_STRING else ''
colorspace = input_plugs[1].computed_value
output_plugs[0].computed_value = [filepath, colorspace]
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_STRING)
if index == 1:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['auto', 'raw', 'sRGB']
plug.default_value = 'auto'
plug.value = 'auto'
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.default_value = ['', 'auto']
plug.value = ['', 'auto']
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].value = 'c:/folder/color.png'
input_plugs[1].value = 'raw'
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[3].computed_value == ['c:/folder/color.png', 'raw']:
raise Exception('Test failed.')
class SplitTextureData(Operator):
def __init__(self):
super(SplitTextureData, self).__init__(
id='6a411798-434c-4ad4-b464-0bd2e78cdcec',
name='Split Texture Data',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=2
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
is_valid_input = False
try:
value = input_plugs[0].computed_value
test = iter(value)
if len(value) == 2:
if sys.version_info.major < 3:
if isinstance(value[0], basestring) and isinstance(value[1], basestring):
is_valid_input = True
else:
if isinstance(value[0], str) and isinstance(value[1], str):
is_valid_input = True
except TypeError:
pass
if is_valid_input:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
else:
output_plugs[0].computed_value = ''
output_plugs[1].computed_value = 'auto'
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST)
plug.default_value = ['', 'auto']
plug.computed_value = ['', 'auto']
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='texture_path', display_name='Texture Path', value_type=Plug.VALUE_TYPE_STRING)
plug.default_value = ''
plug.computed_value = ''
return plug
if index == 1:
plug = Plug.Create(parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_STRING)
plug.default_value = 'auto'
plug.computed_value = 'auto'
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = ['hello.png', 'world']
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 'hello.png':
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 'world':
raise Exception('Test failed.')
class Multiply(Operator):
def __init__(self):
super(Multiply, self).__init__(
id='0f5c9828-f582-48aa-b055-c12b91e692a7',
name='Multiply',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
values = []
for input_plug in input_plugs:
if isinstance(input_plug.computed_value, int):
values.append(input_plug.computed_value)
continue
if isinstance(input_plug.computed_value, float):
values.append(input_plug.computed_value)
if len(values) < 2:
output_plugs[0].computed_value = 0
else:
product = 1.0
for o in values:
product *= o
output_plugs[0].computed_value = product
for input_plug in input_plugs:
input_plug.is_editable = not input_plug.input
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=index > 1,
)
plug.default_value = 1.0
plug.value = 1.0
plug.computed_value = 1.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='product', display_name='product', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Multiply, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = 2
input_plugs[1].computed_value = 2
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 4:
raise Exception('Test failed.')
class ColorSpaceResolver(Operator):
MAPPING = {
'MDL|auto|Blender': 'sRGB',
'MDL|srgb|Blender': 'sRGB',
'MDL|raw|Blender': 'Raw',
'Blender|filmic log|MDL': 'raw',
'Blender|linear|MDL': 'raw',
'Blender|linear aces|MDL': 'raw',
'Blender|non-color|MDL': 'raw',
'Blender|raw|MDL': 'raw',
'Blender|srgb|MDL': 'sRGB',
'Blender|xyz|MDL': 'raw',
}
DEFAULT = {
'Blender': 'Linear',
'MDL': 'auto',
}
def __init__(self):
super(ColorSpaceResolver, self).__init__(
id='c159df8f-a0a2-4300-b897-e8eaa689a901',
name='Color Space Resolver',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
color_space = input_plugs[0].computed_value.lower()
from_color_space = input_plugs[1].computed_value
to_color_space = input_plugs[2].computed_value
key = '{0}|{1}|{2}'.format(
from_color_space,
color_space,
to_color_space
)
if key in ColorSpaceResolver.MAPPING:
output_plugs[0].computed_value = ColorSpaceResolver.MAPPING[key]
else:
output_plugs[0].computed_value = ColorSpaceResolver.DEFAULT[to_color_space]
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_STRING,
editable=False,
is_removable=False,
)
plug.default_value = ''
plug.computed_value = ''
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='from_color_space',
display_name='From',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['MDL', 'Blender']
plug.default_value = 'MDL'
plug.computed_value = 'MDL'
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='to_color_space',
display_name='To',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['Blender', 'MDL']
plug.default_value = 'Blender'
plug.computed_value = 'Blender'
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_STRING,
editable=False
)
plug.default_value = ''
plug.computed_value = ''
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError()
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]:
raise Exception('Test failed.')
class Add(Operator):
def __init__(self):
super(Add, self).__init__(
id='f2818669-5454-4599-8792-2cb09f055bf9',
name='Add',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = 0
for input_plug in input_plugs:
try:
output += input_plug.computed_value
except:
pass
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=True,
)
plug.default_value = 0.0
plug.computed_value = 0.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='sum', display_name='sum', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Add, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class Subtract(Operator):
def __init__(self):
super(Subtract, self).__init__(
id='15f523f3-4e94-43a5-8306-92d07cbfa48c',
name='Subtract',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = None
for input_plug in input_plugs:
try:
if output is None:
output = input_plug.computed_value
else:
output -= input_plug.computed_value
except:
pass
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=True,
)
plug.default_value = 0.0
plug.computed_value = 0.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='difference', display_name='difference', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Subtract, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class Remap(Operator):
def __init__(self):
super(Remap, self).__init__(
id='2405c02a-facc-47a6-80ef-d35d959b0cd4',
name='Remap',
required_inputs=5,
min_inputs=5,
max_inputs=5,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
result = 0.0
old_value = input_plugs[0].computed_value
try:
test = iter(old_value)
is_iterable = True
except TypeError:
is_iterable = False
if not is_iterable:
try:
old_min = input_plugs[1].computed_value
old_max = input_plugs[2].computed_value
new_min = input_plugs[3].computed_value
new_max = input_plugs[4].computed_value
result = ((old_value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min
except:
pass
else:
result = []
for o in old_value:
try:
old_min = input_plugs[1].computed_value
old_max = input_plugs[2].computed_value
new_min = input_plugs[3].computed_value
new_max = input_plugs[4].computed_value
result.append(((o - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min)
except:
pass
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_ANY)
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 1:
plug = Plug.Create(parent=parent, name='old_min', display_name='Old Min', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 2:
plug = Plug.Create(parent=parent, name='old_max', display_name='Old Max', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 1
plug.computed_value = 1
return plug
if index == 3:
plug = Plug.Create(parent=parent, name='new_min', display_name='New Min', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 4:
plug = Plug.Create(parent=parent, name='new_max', display_name='New Max', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 10
plug.computed_value = 10
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='remapped_value', display_name='Remapped Value', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = 0.5
input_plugs[1].computed_value = 0
input_plugs[2].computed_value = 1
input_plugs[3].computed_value = 1
input_plugs[4].computed_value = 0
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.5:
raise Exception('Test failed.') | 77,143 | Python | 37.765829 | 150 | 0.570421 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/generator/util.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import sys
import typing
from ..data import Library, Target
from .core import IGenerator
__generators: typing.List['IGenerator'] = []
def register(generator: IGenerator) -> typing.NoReturn:
""" Registers the generator at the top of the internal list - overriding previously registered generators - for future queries and processes. """
generators = getattr(sys.modules[__name__], '__generators')
if generator not in generators:
generators.insert(0, generator)
def un_register(generator: IGenerator) -> typing.NoReturn:
""" Removes the generator from internal list of generators and will ignore it for future queries and processes. """
generators = getattr(sys.modules[__name__], '__generators')
if generator in generators:
generators.remove(generator)
def can_generate_target(class_name: str) -> bool:
""" """
generators = getattr(sys.modules[__name__], '__generators')
for generator in generators:
if generator.can_generate_target(class_name=class_name):
return True
return False
def generate_target(class_name: str) -> typing.Tuple[Library, Target]:
""" """
generators = getattr(sys.modules[__name__], '__generators')
for generator in generators:
if generator.can_generate_target(class_name=class_name):
print('UMM using generator "{0}" for class_name "{1}".'.format(generator, class_name))
return generator.generate_target(class_name=class_name)
raise Exception('Registered generators does not support action.')
def generate_targets() -> typing.List[typing.Tuple[Library, Target]]:
""" Generates targets from all registered workers that are able to. """
targets = []
generators = getattr(sys.modules[__name__], '__generators')
for generator in generators:
if generator.can_generate_targets():
print('UMM using generator "{0}" for generating targets.'.format(generator))
targets.extend(generator.generate_targets())
return targets
def can_generate_target_from_instance(instance: object) -> bool:
""" """
generators = getattr(sys.modules[__name__], '__generators')
for generator in generators:
if generator.can_generate_target_from_instance(instance=instance):
return True
return False
def generate_target_from_instance(instance: object) -> typing.List[typing.Tuple[Library, Target]]:
""" Generates targets from all registered workers that are able to. """
generators = getattr(sys.modules[__name__], '__generators')
for generator in generators:
if generator.can_generate_target_from_instance(instance=instance):
print('UMM using generator "{0}" for instance "{1}".'.format(generator, instance))
return generator.generate_target_from_instance(instance=instance)
| 3,695 | Python | 40.066666 | 149 | 0.696076 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/converter/util.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""
Convert Queries & Actions
#########################
DCC Connectors and other conversion solutions will want to use this module.
There are three different conversion strategies available:
1. Source *class* and *data*.
The framework finds a suitable conversion template and returns data indicating a *target class* and data for setting its attributes.
For example:
.. code::
from omni.universalmaterialmap.core.converter import util
if util.can_convert_data_to_data(
class_name='lambert',
render_context='MDL',
source_data=[
('color', 'color_texture.png'),
('normalCamera', 'normal_texture.png')
]):
data = util.convert_data_to_data(
class_name='lambert',
render_context='MDL',
source_data=[
('color', 'color_texture.png'),
('normalCamera', 'normal_texture.png')
]
)
...could return:
.. code::
[
('umm_target_class', 'omnipbr'),
('diffuse_texture', 'color_texture.png'),
('normalmap_texture', 'normal_texture.png'),
]
Note that the first value pair :code:`('umm_target_class', 'omnipbr')` indicates the object class that should be used for conversion. All other value pairs indicate attribute names and attribute values.
Using this strategy puts very little responsibility on the conversion workers to understand assets. They merely have to apply the arguments to a conversion template, compute the internal graph, and spit out the results.
It also means that the solution invoking the converter will have to gather the necessary arguments from some object or data source.
2. Source *instance* into conversion data.
Here we use an object instance in order to get the same data as in strategy #1 above.
For example:
.. code::
from omni.universalmaterialmap.core.converter import util
if util.can_convert_instance(
instance=MyLambertPyNode,
render_context='MDL'):
data = util.convert_instance_to_data(
instance=MyLambertPyNode,
render_context='MDL'
)
...could return:
.. code::
[
('umm_target_class', 'omnipbr'),
('diffuse_texture', 'color_texture.png'),
('normalmap_texture', 'normal_texture.png'),
]
Note that the first value pair :code:`('umm_target_class', 'omnipbr')` indicates the object class that should be used for conversion. All other value pairs indicate attribute names and attribute values.
The advantage here is that the user of the framework can rely on a converter's understanding of objects and attributes.
The downside is that there has to be an actual asset or dependency graph loaded.
3. Source *instance* into converted object.
In this approach the converter will create a new object and set its properties/attributes based on a conversion template.
For example:
.. code::
from omni.universalmaterialmap.core.converter import util
if util.can_convert_instance(
instance=MyLambertPyNode,
render_context='MDL'):
node = util.convert_instance_to_instance(
instance=MyLambertPyNode,
render_context='MDL'
)
...could create and return an MDL material in the current Maya scene.
Manifest Query
##############
Module has methods for querying its conversion capabilities as indicated by library manifests.
This could be useful when wanting to expose commands for converting assets within a DCC application scene.
Note that this API does not require any data or object instance argument. It's a more *general* query.
.. code::
from omni.universalmaterialmap.core.converter import util
manifest = util.get_conversion_manifest()
# Returns data indicating what source class can be converted to a render context.
#
# Example:
# [
# ('lambert', 'MDL'),
# ('blinn', 'MDL'),
# ]
if (my_class_name, 'MDL') in manifest:
# Do something
"""
import sys
import typing
import traceback
from .. import data
from .core import ICoreConverter, IDataConverter, IObjectConverter
_debug_mode = False
__converters: typing.List['ICoreConverter'] = []
TARGET_CLASS_IDENTIFIER = 'umm_target_class'
def register(converter: ICoreConverter) -> typing.NoReturn:
""" Registers the converter at the top of the internal list - overriding previously registered converters - for future queries and processes. """
converters = getattr(sys.modules[__name__], '__converters')
if converter not in converters:
if _debug_mode:
print('UMM: core.converter.util: Registering converter: "{0}"'.format(converter))
converters.insert(0, converter)
elif _debug_mode:
print('UMM: core.converter.util: Not registering converter because it is already registered: "{0}"'.format(converter))
def un_register(converter: ICoreConverter) -> typing.NoReturn:
""" Removes the converter from internal list of converters and will ignore it for future queries and processes. """
converters = getattr(sys.modules[__name__], '__converters')
if converter in converters:
if _debug_mode:
print('UMM: core.converter.util: un-registering converter: "{0}"'.format(converter))
converters.remove(converter)
elif _debug_mode:
print('UMM: core.converter.util: Not un-registering converter because it not registered to begin with: "{0}"'.format(converter))
def can_create_instance(class_name: str) -> bool:
""" Resolves if a converter can create a node. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_create_instance(class_name=class_name):
if _debug_mode:
print('UMM: core.converter.util: converter can create instance: "{0}"'.format(converter))
return True
if _debug_mode:
print('UMM: core.converter.util: no converter can create instance.')
return False
def create_instance(class_name: str) -> object:
""" Creates an asset using the first converter in the internal list that supports the class_name. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_create_instance(class_name=class_name):
if _debug_mode:
print('UMM: core.converter.util: converter creating instance: "{0}"'.format(converter))
return converter.create_instance(class_name=class_name)
raise Exception('Registered converters does not support class "{0}".'.format(class_name))
def can_set_plug_value(instance: object, plug: data.Plug) -> bool:
""" Resolves if a converter can set the plug's value given the instance and its attributes. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if _debug_mode:
print('UMM: core.converter.util: converter can set plug value: "{0}"'.format(converter))
if converter.can_set_plug_value(instance=instance, plug=plug):
return True
if _debug_mode:
print('UMM: core.converter.util: converter cannot set plug value given instance "{0}" and plug "{1}"'.format(instance, plug))
return False
def set_plug_value(instance: object, plug: data.Plug) -> typing.NoReturn:
""" Sets the plug's value given the value of the instance's attribute named the same as the plug. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_set_plug_value(instance=instance, plug=plug):
if _debug_mode:
print('UMM: core.converter.util: converter setting plug value: "{0}"'.format(converter))
return converter.set_plug_value(instance=instance, plug=plug)
raise Exception('Registered converters does not support action.')
def can_set_instance_attribute(instance: object, name: str) -> bool:
""" Resolves if a converter can set an attribute by the given name on the instance. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if _debug_mode:
print('UMM: core.converter.util: converter can set instance attribute: "{0}", "{1}", "{2}"'.format(converter, instance, name))
if converter.can_set_instance_attribute(instance=instance, name=name):
return True
if _debug_mode:
print('UMM: core.converter.util: cannot set instance attribute: "{0}", "{1}"'.format(instance, name))
return False
def set_instance_attribute(instance: object, name: str, value: typing.Any) -> typing.NoReturn:
""" Sets the named attribute on the instance to the value. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_set_instance_attribute(instance=instance, name=name):
if _debug_mode:
print('UMM: core.converter.util: converter setting instance attribute: "{0}", "{1}", "{2}", "{3}"'.format(converter, instance, name, value))
return converter.set_instance_attribute(instance=instance, name=name, value=value)
raise Exception('Registered converters does not support action.')
def can_convert_instance(instance: object, render_context: str) -> bool:
""" Resolves if a converter can convert the instance to another object given the render_context. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if _debug_mode:
print('UMM: core.converter.util: converter can convert instance: "{0}", "{1}", "{2}"'.format(converter, instance, render_context))
if converter.can_convert_instance(instance=instance, render_context=render_context):
return True
return False
def convert_instance_to_instance(instance: object, render_context: str) -> typing.Any:
""" Interprets the instance and instantiates another object given the render_context. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_convert_instance(instance=instance, render_context=render_context):
if _debug_mode:
print('UMM: core.converter.util: converter converting instance: "{0}", "{1}", "{2}"'.format(converter, instance, render_context))
return converter.convert_instance_to_instance(instance=instance, render_context=render_context)
raise Exception('Registered converters does not support action.')
def can_convert_instance_to_data(instance: object, render_context: str) -> bool:
""" Resolves if a converter can convert the instance to another object given the render_context. """
try:
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_convert_instance_to_data(instance=instance, render_context=render_context):
return True
except Exception as error:
print('Warning: Universal Material Map: function "can_convert_instance_to_data": Unexpected error:')
print('\targument "instance" = "{0}"'.format(instance))
print('\targument "render_context" = "{0}"'.format(render_context))
print('\terror: {0}'.format(error))
print('\tcallstack: {0}'.format(traceback.format_exc()))
return False
def convert_instance_to_data(instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
try:
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_convert_instance_to_data(instance=instance, render_context=render_context):
result = converter.convert_instance_to_data(instance=instance, render_context=render_context)
print('Universal Material Map: convert_instance_to_data({0}, "{1}") generated data:'.format(instance, render_context))
print('\t(')
for o in result:
print('\t\t{0}'.format(o))
print('\t)')
return result
except Exception as error:
print('Warning: Universal Material Map: function "convert_instance_to_data": Unexpected error:')
print('\targument "instance" = "{0}"'.format(instance))
print('\targument "render_context" = "{0}"'.format(render_context))
print('\terror: {0}'.format(error))
print('\tcallstack: {0}'.format(traceback.format_exc()))
result = dict()
result['umm_notification'] = 'unexpected_error'
result['message'] = 'Not able to convert "{0}" for render context "{1}" because there was an unexpected error. Details: {2}'.format(instance, render_context, error)
return result
raise Exception('Registered converters does not support action.')
def can_convert_attribute_values(instance: object, render_context: str, destination: object) -> bool:
""" Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_convert_attribute_values(instance=instance, render_context=render_context, destination=destination):
return True
return False
def convert_attribute_values(instance: object, render_context: str, destination: object) -> typing.NoReturn:
""" Attribute values are converted and set on the destination object's attributes. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_convert_attribute_values(instance=instance, render_context=render_context, destination=destination):
return converter.convert_attribute_values(instance=instance, render_context=render_context, destination=destination)
raise Exception('Registered converters does not support action.')
def can_convert_data_to_data(class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool:
""" Resolves if a converter can convert the given class and source_data to another class and target data. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IDataConverter):
if converter.can_convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data):
return True
return False
def convert_data_to_data(class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IDataConverter):
if converter.can_convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data):
result = converter.convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data)
print('Universal Material Map: convert_data_to_data("{0}", "{1}") generated data:'.format(class_name, render_context))
print('\t(')
for o in result:
print('\t\t{0}'.format(o))
print('\t)')
return result
raise Exception('Registered converters does not support action.')
def can_apply_data_to_instance(source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool:
""" Resolves if a converter can create one or more instances given the arguments. """
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance):
return True
return False
def apply_data_to_instance(source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> dict:
"""
Returns a list of created objects.
"""
try:
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
if isinstance(converter, IObjectConverter):
if converter.can_apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance):
converter.apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance)
print('Universal Material Map: apply_data_to_instance("{0}", "{1}") completed.'.format(instance, render_context))
result = dict()
result['umm_notification'] = 'success'
result['message'] = 'Material conversion data applied to "{0}".'.format(instance)
return result
result = dict()
result['umm_notification'] = 'incomplete_process'
result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there is no Conversion Graph for that scenario. No changes were applied to "{2}".'.format(source_class_name, render_context, instance)
return result
except Exception as error:
print('UMM: Unexpected error: {0}'.format(traceback.format_exc()))
result = dict()
result['umm_notification'] = 'unexpected_error'
result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there was an unexpected error. Some changes may have been applied to "{2}". Details: {3}'.format(source_class_name, render_context, instance, error)
return result
def get_conversion_manifest() -> typing.List[typing.Tuple[str, str]]:
"""
Returns data indicating what source class can be converted to a render context.
Example: [('lambert', 'MDL'), ('blinn', 'MDL'),]
"""
manifest: typing.List[typing.Tuple[str, str]] = []
converters = getattr(sys.modules[__name__], '__converters')
for converter in converters:
manifest.extend(converter.get_conversion_manifest())
return manifest
| 20,886 | Python | 47.687646 | 241 | 0.655559 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/converter/core.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from abc import ABCMeta, abstractmethod
import typing
from ..data import Plug
class ICoreConverter(metaclass=ABCMeta):
""" """
@abstractmethod
def __init__(self):
super(ICoreConverter, self).__init__()
@abstractmethod
def get_conversion_manifest(self) -> typing.List[typing.Tuple[str, str]]:
"""
Returns data indicating what source class can be converted to a render context.
Example: [('lambert', 'MDL'), ('blinn', 'MDL'),]
"""
raise NotImplementedError()
class IObjectConverter(ICoreConverter):
""" """
@abstractmethod
def can_create_instance(self, class_name: str) -> bool:
""" Returns true if worker can generate an object of the given class name. """
raise NotImplementedError()
@abstractmethod
def create_instance(self, class_name: str) -> object:
""" Creates an object of the given class name. """
raise NotImplementedError()
@abstractmethod
def can_set_plug_value(self, instance: object, plug: Plug) -> bool:
""" Returns true if worker can set the plug's value given the instance and its attributes. """
raise NotImplementedError()
@abstractmethod
def set_plug_value(self, instance: object, plug: Plug) -> typing.NoReturn:
""" Sets the plug's value given the value of the instance's attribute named the same as the plug. """
raise NotImplementedError()
@abstractmethod
def can_set_instance_attribute(self, instance: object, name: str):
""" Resolves if worker can set an attribute by the given name on the instance. """
return False
@abstractmethod
def set_instance_attribute(self, instance: object, name: str, value: typing.Any) -> typing.NoReturn:
""" Sets the named attribute on the instance to the value. """
raise NotImplementedError()
@abstractmethod
def can_convert_instance(self, instance: object, render_context: str) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
return False
@abstractmethod
def convert_instance_to_instance(self, instance: object, render_context: str) -> typing.Any:
""" Converts the instance to another object given the render_context. """
raise NotImplementedError()
@abstractmethod
def can_convert_instance_to_data(self, instance: object, render_context: str) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
return False
@abstractmethod
def convert_instance_to_data(self, instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
raise NotImplementedError()
@abstractmethod
def can_convert_attribute_values(self, instance: object, render_context: str, destination: object) -> bool:
""" Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """
raise NotImplementedError()
@abstractmethod
def convert_attribute_values(self, instance: object, render_context: str, destination: object) -> typing.NoReturn:
""" Attribute values are converted and set on the destination object's attributes. """
raise NotImplementedError()
@abstractmethod
def can_apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
return False
@abstractmethod
def apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> dict:
"""
Returns a notification object
Examples:
{
'umm_notification': "success",
'message': "Material \"Material_A\" was successfully converted from \"OmniPBR\" data."
}
{
'umm_notification': "incomplete_process",
'message': "Not able to convert \"Material_B\" using \"CustomMDL\" since there is no Conversion Graph supporting that scenario."
}
{
'umm_notification': "unexpected_error",
'message': "Not able to convert \"Material_C\" using \"OmniGlass\" due to an unexpected error. Details: \"cannot set property to None\"."
}
"""
raise NotImplementedError()
class IDataConverter(ICoreConverter):
""" """
@abstractmethod
def can_convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool:
""" Resolves if worker can convert the given class and source_data to another class and target data. """
return False
@abstractmethod
def convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
raise NotImplementedError()
| 6,404 | Python | 40.590909 | 176 | 0.665209 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/store.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import os
import uuid
import traceback
from .. import data
from .. import operator
from ..feature import POLLING
from ..singleton import Singleton
from .core import ChangeEvent, IDelegate
from .delegate import Filesystem, FilesystemManifest, FilesystemSettings
from .resources import install
COMMON_LIBRARY_ID = '327ef29b-8358-441b-b2f0-4a16a9afd349'
libraries_directory = os.path.expanduser('~').replace('\\', '/')
if not libraries_directory.endswith('/Documents'):
# os.path.expanduser() has different behaviour between 2.7 and 3
libraries_directory = '{0}/Documents'.format(libraries_directory)
libraries_directory = '{0}/Omniverse'.format(libraries_directory)
common_library_directory = '{0}/ConnectorCommon/UMMLibrary'.format(libraries_directory)
cache_directory = '{0}/Cache'.format(common_library_directory)
COMMON_LIBRARY = data.Library.Create(
library_id=COMMON_LIBRARY_ID,
name='Common',
manifest=FilesystemManifest(root_directory='{0}'.format(common_library_directory)),
conversion_graph=Filesystem(root_directory='{0}/ConversionGraph'.format(common_library_directory)),
target=Filesystem(root_directory='{0}/Target'.format(common_library_directory)),
settings=FilesystemSettings(root_directory='{0}'.format(common_library_directory)),
)
DEFAULT_LIBRARIES = [COMMON_LIBRARY]
class _ItemProvider(object):
""" Class provides IO interface for a single UMM Library item. """
def __init__(self, identifier: str, library_delegate: IDelegate = None, cache_delegate: IDelegate = None):
super(_ItemProvider, self).__init__()
self._library_delegate: typing.Union[IDelegate, typing.NoReturn] = library_delegate
self._cache_delegate: typing.Union[IDelegate, typing.NoReturn] = cache_delegate
self._identifier: str = identifier
self._file_util: typing.Union[data.FileUtility, typing.NoReturn] = None
self._content_cache: dict = dict()
def revert(self) -> None:
if self._file_util:
self._file_util.content.deserialize(data=self._content_cache)
def has_unsaved_changes(self) -> bool:
if not self._file_util:
return False
return not self._file_util.content.serialize() == self._content_cache
def read(self, update: bool = False) -> None:
"""
TODO: Check if path has changed since last read from disk.
"""
if not self._library_delegate and not self._cache_delegate:
raise Exception('Not supported: No delegate available to read().')
# update_cache() assumes that read() prioritizes reading with library delegate!
delegate = self._library_delegate if self._library_delegate else self._cache_delegate
if not self._file_util:
contents = delegate.read(identifier=self._identifier)
if contents is not None:
self._file_util = data.FileUtility.FromData(data=contents)
self._update_content_cache()
elif update:
contents = delegate.read(identifier=self._identifier)
self._file_util.content.deserialize(data=contents)
def create(self, instance: data.Serializable) -> None:
self._file_util = data.FileUtility.FromInstance(instance=instance)
self.write()
def write(self, content: data.Serializable = None) -> None:
if not self._library_delegate and not self._cache_delegate:
raise Exception('Not supported: No delegate available to write().')
if content:
if not self._file_util:
self._file_util = data.FileUtility.FromInstance(instance=content)
else:
self._file_util._content = content
elif not self._file_util:
raise Exception('Not supported: _ItemProvider not initialized properly prior to "write()"')
contents = self._file_util.serialize()
if self._library_delegate:
self._library_delegate.write(identifier=self._identifier, contents=contents)
if self._cache_delegate:
self._cache_delegate.write(identifier=self._identifier, contents=contents)
self._update_content_cache()
def delete(self) -> None:
if not self._library_delegate and not self._cache_delegate:
raise Exception('Not supported: No delegate available to delete().')
if self._library_delegate:
self._library_delegate.delete(identifier=self._identifier)
if self._cache_delegate:
self._cache_delegate.delete(identifier=self._identifier)
self._file_util = None
self._content_cache = None
def _update_content_cache(self) -> None:
if not self._file_util:
self._content_cache = dict()
else:
self._content_cache = self._file_util.content.serialize()
def update_cache(self) -> bool:
if not self._library_delegate or not self._cache_delegate:
return False
# Assumes that read() prioritizes reading with library delegate!
try:
self.read()
except Exception as error:
print('Warning: Universal Material Map error reading data with identifier "{0}". Cache will not be updated due to the read error.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(self._identifier, error, traceback.format_exc()))
return False
self._cache_delegate.write(identifier=self._identifier, contents=self._file_util.serialize())
def on_shutdown(self):
self._cache_delegate = None
self._library_delegate = None
self._identifier = None
self._file_util = None
self._content_cache = None
@property
def content(self) -> data.Serializable:
return self._file_util.content
class _LibraryProvider(object):
""" Class provides IO interface for a single UMM Library. """
@staticmethod
def _transfer_data(source: IDelegate, target: IDelegate) -> bool:
""" Returns True if transfer was made. """
if not source or not target:
return False
for identifier in source.get_ids():
target.write(identifier=identifier, contents=source.read(identifier=identifier))
return True
def __init__(self, library: data.Library):
super(_LibraryProvider, self).__init__()
self._library: data.Library = library
if POLLING:
self._manifest_subscription: uuid.uuid4 = None
self._conversion_graph_subscription: uuid.uuid4 = None
self._target_subscription: uuid.uuid4 = None
self._manifest_cache: typing.Union[IDelegate, typing.NoReturn] = None
self._conversion_graph_cache: typing.Union[IDelegate, typing.NoReturn] = None
self._target_cache: typing.Union[IDelegate, typing.NoReturn] = None
self._settings_cache: typing.Union[IDelegate, typing.NoReturn] = None
self._manifest_providers: typing.Dict[str, _ItemProvider] = dict()
self._conversion_graph_providers: typing.Dict[str, _ItemProvider] = dict()
self._target_providers: typing.Dict[str, _ItemProvider] = dict()
self._settings_providers: typing.Dict[str, _ItemProvider] = dict()
self._initialize()
def _initialize(self) -> None:
cache: _ItemProvider
for cache in self._manifest_providers.values():
cache.on_shutdown()
for cache in self._conversion_graph_providers.values():
cache.on_shutdown()
for cache in self._target_providers.values():
cache.on_shutdown()
for cache in self._settings_providers.values():
cache.on_shutdown()
self._manifest_providers = dict()
self._conversion_graph_providers = dict()
self._target_providers = dict()
self._settings_providers = dict()
if not self._library:
return
if not self._library.id == COMMON_LIBRARY_ID:
self._manifest_cache = FilesystemManifest(
root_directory='{0}/{1}'.format(cache_directory, self._library.id)
)
self._conversion_graph_cache = Filesystem(
root_directory='{0}/{1}/ConversionGraph'.format(cache_directory, self._library.id)
)
self._target_cache = Filesystem(
root_directory='{0}/{1}/Target'.format(cache_directory, self._library.id)
)
self._settings_cache = FilesystemSettings(
root_directory='{0}/{1}'.format(cache_directory, self._library.id)
)
if not self._library.id == COMMON_LIBRARY_ID and not self._library.is_read_only:
self._update_cache()
def _update_cache(self) -> None:
if self._library.is_read_only:
return
self._update_cache_table(
source=self._library.manifest,
target=self._manifest_cache,
providers=self._manifest_providers,
)
self._update_cache_table(
source=self._library.conversion_graph,
target=self._conversion_graph_cache,
providers=self._conversion_graph_providers,
)
self._update_cache_table(
source=self._library.target,
target=self._target_cache,
providers=self._target_providers,
)
self._update_cache_table(
source=self._library.settings,
target=self._settings_cache,
providers=self._settings_providers,
)
def _update_cache_table(self, source: IDelegate, target: IDelegate, providers: dict) -> None:
if self._library.is_read_only:
return
if not source or not target:
return
for identifier in source.get_ids():
if identifier not in providers.keys():
provider = _ItemProvider(
identifier=identifier,
library_delegate=source,
cache_delegate=target
)
providers[identifier] = provider
else:
provider = providers[identifier]
provider.update_cache()
def get_settings(self) -> typing.List[data.Settings]:
if not self._library.settings:
return []
settings: typing.List[data.Settings] = []
for identifier in self._library.settings.get_ids():
if identifier not in self._settings_providers.keys():
cache = _ItemProvider(
identifier=identifier,
library_delegate=self._library.settings,
cache_delegate=self._settings_cache
)
self._settings_providers[identifier] = cache
else:
cache = self._settings_providers[identifier]
cache.read()
setting = typing.cast(data.Settings, cache.content)
settings.append(setting)
return settings
def get_manifests(self) -> typing.List[data.ConversionManifest]:
delegate = self._library.manifest if self._library.manifest else self._manifest_cache
if not delegate:
return []
manifests: typing.List[data.ConversionManifest] = []
conversion_graphs: typing.List[data.ConversionGraph] = None
for identifier in delegate.get_ids():
if identifier not in self._manifest_providers.keys():
cache = _ItemProvider(
identifier=identifier,
library_delegate=self._library.manifest,
cache_delegate=self._manifest_cache
)
self._manifest_providers[identifier] = cache
else:
cache = self._manifest_providers[identifier]
cache.read()
manifest = typing.cast(data.ConversionManifest, cache.content)
if not conversion_graphs:
conversion_graphs = self.get_conversion_graphs()
for item in manifest.conversion_maps:
if not item._conversion_graph:
for conversion_graph in conversion_graphs:
if conversion_graph.id == item.conversion_graph_id:
item._conversion_graph = conversion_graph
break
manifests.append(manifest)
if POLLING:
if self._library.manifest and not self._manifest_subscription:
self._manifest_subscription = self._library.manifest.add_change_subscription(callback=self._on_store_manifest_changes)
return manifests
def get_conversion_graphs(self) -> typing.List[data.ConversionGraph]:
delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache
if not delegate:
return []
conversion_graphs: typing.List[data.ConversionGraph] = []
for identifier in delegate.get_ids():
if identifier not in self._conversion_graph_providers.keys():
cache = _ItemProvider(
identifier=identifier,
library_delegate=self._library.conversion_graph,
cache_delegate=self._conversion_graph_cache
)
try:
cache.read()
except Exception as error:
print('Warning: Universal Material Map error reading Conversion Graph data with identifier "{0}". Graph will not be available for use inside UMM.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(identifier, error, traceback.format_exc()))
continue
self._conversion_graph_providers[identifier] = cache
else:
cache = self._conversion_graph_providers[identifier]
try:
cache.read()
except Exception as error:
print('Warning: Universal Material Map error reading Conversion Graph data with identifier "{0}". Graph will not be available for use inside UMM.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(identifier, error, traceback.format_exc()))
continue
conversion_graph = typing.cast(data.ConversionGraph, cache.content)
conversion_graph._library = self._library
conversion_graph.filename = identifier
conversion_graph._exists_on_disk = True
conversion_graphs.append(conversion_graph)
if POLLING:
if self._library.conversion_graph and not self._conversion_graph_subscription:
self._conversion_graph_subscription = self._library.conversion_graph.add_change_subscription(callback=self._on_store_conversion_graph_changes)
return conversion_graphs
def get_targets(self) -> typing.List[data.Target]:
delegate = self._library.target if self._library.target else self._target_cache
if not delegate:
return []
targets: typing.List[data.Target] = []
for identifier in delegate.get_ids():
if identifier not in self._target_providers.keys():
cache = _ItemProvider(
identifier=identifier,
library_delegate=self._library.target,
cache_delegate=self._target_cache
)
self._target_providers[identifier] = cache
else:
cache = self._target_providers[identifier]
cache.read()
target = typing.cast(data.Target, cache.content)
target.store_id = identifier
targets.append(target)
if POLLING:
if self._library.target and not self._target_subscription:
self._target_subscription = self._library.target.add_change_subscription(callback=self._on_store_target_changes)
return targets
def _on_store_manifest_changes(self, event: ChangeEvent) -> None:
if not POLLING:
raise NotImplementedError()
print('_on_store_manifest_changes', event)
def _on_store_conversion_graph_changes(self, event: ChangeEvent) -> None:
if not POLLING:
raise NotImplementedError()
print('_on_store_conversion_graph_changes', event)
def _on_store_target_changes(self, event: ChangeEvent) -> None:
if not POLLING:
raise NotImplementedError()
print('_on_store_target_changes...', event, self)
def revert(self, item: data.Serializable) -> bool:
"""
Returns True if the item existed in a data store and was successfully reverted.
"""
if isinstance(item, data.ConversionGraph):
if item.filename not in self._conversion_graph_providers.keys():
return False
filename = item.filename
library = item.library
cache = self._conversion_graph_providers[item.filename]
cache.revert()
item.filename = filename
item._library = library
item._exists_on_disk = True
return True
if isinstance(item, data.Target):
if item.store_id not in self._target_providers.keys():
return False
cache = self._target_providers[item.store_id]
cache.revert()
return True
if isinstance(item, data.ConversionManifest):
if item.store_id not in self._manifest_providers.keys():
return False
cache = self._manifest_providers[item.store_id]
cache.revert()
return True
if isinstance(item, data.Settings):
if item.store_id not in self._settings_providers.keys():
return False
cache = self._settings_providers[item.store_id]
cache.revert()
return True
def write(self, item: data.Serializable, identifier: str = None, overwrite: bool = False) -> None:
if isinstance(item, data.Settings):
if not item.store_id:
raise Exception('Not supported: Settings must have a valid store id in order to write the item.')
if not self._library.settings:
raise Exception('Library "{0}" with id="{1}" does not support a Settings store.'.format(self._library.name, self._library.id))
if item.store_id not in self._settings_providers.keys():
cache = _ItemProvider(
identifier=item.store_id,
library_delegate=self._library.settings,
cache_delegate=self._settings_cache
)
self._settings_providers[item.store_id] = cache
else:
if not overwrite:
return
cache = self._settings_providers[item.store_id]
cache.write(content=item)
return
if isinstance(item, data.ConversionManifest):
if not item.store_id:
raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.')
if item.store_id not in self._manifest_providers.keys():
cache = _ItemProvider(
identifier=item.store_id,
library_delegate=self._library.manifest,
cache_delegate=self._manifest_cache
)
self._manifest_providers[item.store_id] = cache
else:
if not overwrite:
return
cache = self._manifest_providers[item.store_id]
cache.write(content=item)
return
if isinstance(item, data.ConversionGraph):
if not item.filename and not identifier:
raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.')
key = identifier if identifier else item.filename
if key not in self._conversion_graph_providers.keys():
cache = _ItemProvider(
identifier=key,
library_delegate=self._library.conversion_graph,
cache_delegate=self._conversion_graph_cache
)
self._conversion_graph_providers[key] = cache
else:
if not overwrite:
return
cache = self._conversion_graph_providers[key]
item.revision += 1
cache.write(content=item)
if identifier:
item.filename = identifier
item._exists_on_disk = True
item._library = self._library
return
if isinstance(item, data.Target):
if not item.store_id:
raise Exception(
'Not supported: Conversion Manifest must have a valid store id in order to write the item.')
if item.store_id not in self._target_providers.keys():
cache = _ItemProvider(
identifier=item.store_id,
library_delegate=self._library.target,
cache_delegate=self._target_cache
)
self._target_providers[item.store_id] = cache
else:
if not overwrite:
return
cache = self._target_providers[item.store_id]
cache.write(content=item)
return
raise NotImplementedError()
def delete(self, item: data.Serializable) -> None:
if isinstance(item, data.Settings):
if not item.store_id:
raise Exception('Not supported: Settings must have a valid store id in order to write the item.')
if not self._library.settings:
raise Exception('Library "{0}" with id="{1}" does not support a Settings store.'.format(self._library.name, self._library.id))
if item.store_id not in self._settings_providers.keys():
return
cache = self._settings_providers[item.store_id]
cache.delete()
cache.on_shutdown()
del self._settings_providers[item.store_id]
return
if isinstance(item, data.ConversionManifest):
if not item.store_id:
raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.')
if item.store_id not in self._manifest_providers.keys():
return
cache = self._manifest_providers[item.store_id]
cache.delete()
cache.on_shutdown()
del self._manifest_providers[item.store_id]
return
if isinstance(item, data.ConversionGraph):
if not item.filename:
raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.')
if item.filename not in self._conversion_graph_providers.keys():
return
cache = self._conversion_graph_providers[item.filename]
cache.delete()
cache.on_shutdown()
del self._conversion_graph_providers[item.filename]
return
if isinstance(item, data.Target):
if not item.store_id:
raise Exception(
'Not supported: Conversion Manifest must have a valid store id in order to write the item.')
if item.store_id not in self._target_providers.keys():
return
cache = self._target_providers[item.store_id]
cache.write(content=item)
cache.on_shutdown()
del self._target_providers[item.store_id]
return
raise NotImplementedError()
def can_show_in_store(self, item: data.Serializable) -> bool:
if isinstance(item, data.ConversionGraph):
delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache
if not delegate:
return False
return delegate.can_show_in_store(identifier=item.filename)
if isinstance(item, data.Target):
delegate = self._library.target if self._library.target else self._target_cache
if not delegate:
return False
return delegate.can_show_in_store(identifier=item.store_id)
return False
def show_in_store(self, item: data.Serializable) -> None:
if isinstance(item, data.ConversionGraph):
delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache
if not delegate:
return
return delegate.show_in_store(identifier=item.filename)
if isinstance(item, data.Target):
delegate = self._library.target if self._library.target else self._target_cache
if not delegate:
return
return delegate.show_in_store(identifier=item.store_id)
@property
def library(self) -> data.Library:
return self._library
@library.setter
def library(self, value: data.Library) -> None:
if self._library == value:
return
if POLLING:
if self._library:
if self._manifest_subscription and self._library.manifest:
self._library.manifest.remove_change_subscription(subscription_id=self._manifest_subscription)
if self._conversion_graph_subscription and self._library.conversion_graph:
self._library.conversion_graph.remove_change_subscription(subscription_id=self._conversion_graph_subscription)
if self._target_subscription and self._library.target:
self._library.target.remove_change_subscription(subscription_id=self._target_subscription)
self._library = value
self._initialize()
@Singleton
class __Manager:
def __init__(self):
install()
self._library_caches: typing.Dict[str, _LibraryProvider] = dict()
self._operators: typing.List[data.Operator] = [
operator.And(),
operator.Add(),
operator.BooleanSwitch(),
operator.ColorSpaceResolver(),
operator.ConstantBoolean(),
operator.ConstantFloat(),
operator.ConstantInteger(),
operator.ConstantRGB(),
operator.ConstantRGBA(),
operator.ConstantString(),
operator.Equal(),
operator.GreaterThan(),
operator.LessThan(),
operator.ListGenerator(),
operator.ListIndex(),
operator.MayaTransparencyResolver(),
operator.MergeRGB(),
operator.MergeRGBA(),
operator.MDLColorSpace(),
operator.MDLTextureResolver(),
operator.Multiply(),
operator.Not(),
operator.Or(),
operator.Remap(),
operator.SplitRGB(),
operator.SplitRGBA(),
operator.SplitTextureData(),
operator.Subtract(),
operator.ValueResolver(),
operator.ValueTest(),
]
for o in self._operators:
if len([item for item in self._operators if item.id == o.id]) == 1:
continue
raise Exception('Operator id "{0}" is not unique.'.format(o.id))
provider = _LibraryProvider(library=COMMON_LIBRARY)
self._library_caches[COMMON_LIBRARY_ID] = provider
render_contexts = [
'MDL',
'USDPreview',
'Blender',
]
settings = provider.get_settings()
if len(settings) == 0:
self._settings: data.Settings = data.Settings()
for render_context in render_contexts:
self._settings.render_contexts.append(render_context)
self._settings.render_contexts.append(render_context)
self._save_settings()
else:
self._settings: data.Settings = settings[0]
added_render_context = False
for render_context in render_contexts:
if render_context not in self._settings.render_contexts:
self._settings.render_contexts.append(render_context)
added_render_context = True
if added_render_context:
self._save_settings()
for i in range(len(self._settings.libraries)):
for library in DEFAULT_LIBRARIES:
if self._settings.libraries[i].id == library.id:
self._settings.libraries[i] = library
break
for library in DEFAULT_LIBRARIES:
if len([o for o in self._settings.libraries if o.id == library.id]) == 0:
self._settings.libraries.append(library)
for library in self._settings.libraries:
self.register_library(library=library)
def _save_settings(self) -> None:
if COMMON_LIBRARY_ID not in self._library_caches.keys():
raise Exception('Not supported: Common library not in cache. Unable to save settings.')
cache = self._library_caches[COMMON_LIBRARY_ID]
cache.write(item=self._settings, identifier=None, overwrite=True)
def register_library(self, library: data.Library) -> None:
preferences_changed = False
to_remove = []
for item in self._settings.libraries:
if item.id == library.id:
if not item == library:
to_remove.append(item)
for item in to_remove:
self._settings.libraries.remove(item)
preferences_changed = True
if library not in self._settings.libraries:
self._settings.libraries.append(library)
preferences_changed = True
if preferences_changed:
self._save_settings()
if library.id not in self._library_caches.keys():
self._library_caches[library.id] = _LibraryProvider(library=library)
else:
cache = self._library_caches[library.id]
cache.library = library
def register_render_contexts(self, context: str) -> None:
"""Register a render context such as MDL or USD Preview."""
if context not in self._settings.render_contexts:
self._settings.render_contexts.append(context)
self._save_settings()
def get_assembly(self, reference: data.TargetInstance) -> typing.Union[data.Target, None]:
cache: _LibraryProvider
for cache in self._library_caches.values():
for target in cache.get_targets():
if target.id == reference.target_id:
return target
return None
def get_assemblies(self, library: data.Library = None) -> typing.List[data.Target]:
if library:
if library.id not in self._library_caches.keys():
return []
cache = self._library_caches[library.id]
return cache.get_targets()
targets: typing.List[data.Target] = []
cache: _LibraryProvider
for cache in self._library_caches.values():
targets.extend(cache.get_targets())
return targets
def get_documents(self, library: data.Library = None) -> typing.List[data.ConversionGraph]:
conversion_graphs: typing.List[data.ConversionGraph] = []
if library:
if library.id not in self._library_caches.keys():
return []
cache = self._library_caches[library.id]
conversion_graphs = cache.get_conversion_graphs()
else:
cache: _LibraryProvider
for cache in self._library_caches.values():
conversion_graphs.extend(cache.get_conversion_graphs())
for conversion_graph in conversion_graphs:
self._completed_document_serialization(conversion_graph=conversion_graph)
return conversion_graphs
def get_document(self, library: data.Library, document_filename: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]:
if library.id not in self._library_caches.keys():
return None
cache = self._library_caches[library.id]
for conversion_graph in cache.get_conversion_graphs():
if conversion_graph.filename == document_filename:
self._completed_document_serialization(conversion_graph=conversion_graph)
return conversion_graph
return None
def can_show_in_filesystem(self, document: data.ConversionGraph) -> bool:
if not document.library:
return False
if document.library.id not in self._library_caches.keys():
return False
cache = self._library_caches[document.library.id]
return cache.can_show_in_store(item=document)
def show_in_filesystem(self, document: data.ConversionGraph) -> None:
if not document.library:
return
if document.library.id not in self._library_caches.keys():
return
cache = self._library_caches[document.library.id]
cache.show_in_store(item=document)
def get_document_by_id(self, library: data.Library, document_id: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]:
for conversion_graph in self.get_documents(library=library):
if conversion_graph.id == document_id:
return conversion_graph
return None
def create_new_document(self, library: data.Library) -> data.ConversionGraph:
conversion_graph = data.ConversionGraph()
conversion_graph._library = library
conversion_graph.filename = ''
self._completed_document_serialization(conversion_graph=conversion_graph)
return conversion_graph
def _completed_document_serialization(self, conversion_graph: data.ConversionGraph) -> None:
build_dag = len(conversion_graph.target_instances) == 0
for reference in conversion_graph.target_instances:
if reference.target and reference.target.id == reference.target_id:
continue
reference.target = self.get_assembly(reference=reference)
build_dag = True
if build_dag:
conversion_graph.build_dag()
def create_from_source(self, source: data.ConversionGraph) -> data.ConversionGraph:
new_conversion_graph = data.ConversionGraph()
new_id = new_conversion_graph.id
new_conversion_graph.deserialize(data=source.serialize())
new_conversion_graph._id = new_id
new_conversion_graph._library = source.library
new_conversion_graph.filename = source.filename
self._completed_document_serialization(conversion_graph=new_conversion_graph)
return new_conversion_graph
def revert(self, library: data.Library, instance: data.Serializable) -> bool:
"""
Returns True if the file existed on disk and was successfully reverted.
"""
if not library:
return False
if library.id not in self._library_caches.keys():
return False
cache = self._library_caches[library.id]
if cache.revert(item=instance):
if isinstance(instance, data.ConversionGraph):
self._completed_document_serialization(conversion_graph=instance)
return True
return False
def find_documents(self, source_class: str, library: data.Library = None) -> typing.List[data.ConversionGraph]:
conversion_graphs = []
for conversion_graph in self.get_documents(library=library):
if not conversion_graph.source_node:
continue
for node in conversion_graph.source_node.target.nodes:
if node.class_name == source_class:
conversion_graphs.append(conversion_graph)
return conversion_graphs
def find_assembly(self, assembly_class: str, library: data.Library = None) -> typing.List[data.Target]:
targets = []
for target in self.get_assemblies(library=library):
for node in target.nodes:
if node.class_name == assembly_class:
targets.append(target)
break
return targets
def _get_manifest_filepath(self, library: data.Library) -> str:
return '{0}/ConversionManifest.json'.format(library.path)
def get_conversion_manifest(self, library: data.Library) -> data.ConversionManifest:
if library.id not in self._library_caches.keys():
return data.ConversionManifest()
cache = self._library_caches[library.id]
manifests = cache.get_manifests()
if len(manifests):
manifest = manifests[0]
for conversion_map in manifest.conversion_maps:
if conversion_map.conversion_graph is None:
continue
self._completed_document_serialization(conversion_graph=conversion_map.conversion_graph)
return manifest
return data.ConversionManifest()
def save_conversion_manifest(self, library: data.Library, manifest: data.ConversionManifest) -> None:
if library.id not in self._library_caches.keys():
return
cache = self._library_caches[library.id]
cache.write(item=manifest)
def write(self, filename: str, instance: data.Serializable, library: data.Library, overwrite: bool = False) -> None:
if not filename.strip():
raise Exception('Invalid filename: empty string.')
if library.id not in self._library_caches.keys():
raise Exception('Cannot write to a library that is not registered')
if not filename.lower().endswith('.json'):
filename = '{0}.json'.format(filename)
cache = self._library_caches[library.id]
cache.write(item=instance, identifier=filename, overwrite=overwrite)
def delete_document(self, document: data.ConversionGraph) -> bool:
if not document.library:
return False
if document.library.id not in self._library_caches.keys():
return False
cache = self._library_caches[document.library.id]
cache.delete(item=document)
return True
def is_graph_entity_id(self, identifier: str) -> bool:
for item in self.get_assemblies():
if item.id == identifier:
return True
return False
def get_graph_entity(self, identifier: str) -> data.GraphEntity:
for item in self.get_assemblies():
if item.id == identifier:
return data.TargetInstance.FromAssembly(assembly=item)
for item in self.get_operators():
if item.id == identifier:
return data.OperatorInstance.FromOperator(operator=item)
raise Exception('Graph Entity with id "{0}" cannot be found'.format(identifier))
def register_operator(self, operator: data.Operator):
if operator not in self._operators:
self._operators.append(operator)
def get_operators(self) -> typing.List[data.Operator]:
return self._operators
def is_operator_id(self, identifier: str) -> bool:
for item in self.get_operators():
if item.id == identifier:
return True
return False
def on_shutdown(self):
if len(self._library_caches.keys()):
provider: _LibraryProvider
for provider in self._library_caches.values():
provider.library = None
self._library_caches = dict()
@property
def libraries(self) -> typing.List[data.Library]:
return self._settings.libraries
def register_library(library: data.Library) -> None:
""" """
__Manager().register_library(library=library)
def get_libraries() -> typing.List[data.Library]:
""" """
return __Manager().libraries
def get_library(library_id: str) -> data.Library:
""" """
for library in __Manager().libraries:
if library.id == library_id:
return library
raise Exception('Library with id "{0}" not found.'.format(library_id))
def get_assembly(reference: data.TargetInstance) -> data.Target:
""" """
# TODO: Is this still needed?
return __Manager().get_assembly(reference=reference)
def write(filename: str, instance: data.Serializable, library: data.Library, overwrite: bool = False) -> None:
""" """
__Manager().write(filename=filename, instance=instance, library=library, overwrite=overwrite)
def get_assemblies(library: data.Library = None) -> typing.List[data.Target]:
""" """
return __Manager().get_assemblies(library=library)
def is_graph_entity_id(identifier: str) -> bool:
""" """
return __Manager().is_graph_entity_id(identifier=identifier)
def get_graph_entity(identifier: str) -> data.GraphEntity:
""" """
return __Manager().get_graph_entity(identifier=identifier)
def get_documents(library: data.Library = None) -> typing.List[data.ConversionGraph]:
""" """
return __Manager().get_documents(library=library)
def get_document(library: data.Library, document_filename: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]:
""" """
# TODO: Is this still needed?
return __Manager().get_document(library=library, document_filename=document_filename)
def create_new_document(library: data.Library) -> data.ConversionGraph:
""" """
return __Manager().create_new_document(library=library)
def create_from_source(source: data.ConversionGraph) -> data.ConversionGraph:
""" """
return __Manager().create_from_source(source=source)
def revert(library: data.Library, instance: data.Serializable) -> bool:
"""
Returns True if the file existed on disk and was successfully reverted.
"""
return __Manager().revert(library, instance)
def find_documents(source_class: str, library: data.Library = None) -> typing.List[data.ConversionGraph]:
""" """
# TODO: Is this still needed?
return __Manager().find_documents(source_class=source_class, library=library)
def find_assembly(assembly_class: str, library: data.Library = None) -> typing.List[data.Target]:
""" """
# TODO: Is this still needed?
return __Manager().find_assembly(assembly_class=assembly_class, library=library)
def register_operator(operator: data.Operator):
""" """
__Manager().register_operator(operator=operator)
def get_operators() -> typing.List[data.Operator]:
""" """
return __Manager().get_operators()
def is_operator_id(identifier: str) -> bool:
""" """
return __Manager().is_operator_id(identifier=identifier)
def delete_document(document: data.ConversionGraph) -> bool:
""" """
return __Manager().delete_document(document=document)
def get_conversion_manifest(library: data.Library) -> data.ConversionManifest:
""" """
return __Manager().get_conversion_manifest(library=library)
def get_render_contexts() -> typing.List[str]:
"""Returns list of registered render contexts."""
return __Manager()._settings.render_contexts[:]
def register_render_contexts(context: str) -> None:
"""Register a render context such as MDL or USD Preview."""
__Manager().register_render_contexts(context=context)
def can_show_in_filesystem(document: data.ConversionGraph) -> bool:
"""Checks if the operating system can display where a document is saved on disk."""
return __Manager().can_show_in_filesystem(document=document)
def show_in_filesystem(document: data.ConversionGraph) -> None:
"""Makes the operating system display where a document is saved on disk."""
return __Manager().show_in_filesystem(document=document)
def on_shutdown() -> None:
"""Makes the operating system display where a document is saved on disk."""
return __Manager().on_shutdown()
| 44,912 | Python | 38.60582 | 254 | 0.614557 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/delegate.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import os
import json
import subprocess
import threading
import platform
import uuid
from ..feature import POLLING
from .core import ChangeEvent, IDelegate
class Filesystem(IDelegate):
def __init__(self, root_directory: str):
super(Filesystem, self).__init__()
if POLLING:
self.__is_polling: bool = False
self.__poll_timer: threading.Timer = None
self.__poll_data: typing.Dict[str, float] = dict()
self.__poll_subscriptions: typing.Dict[uuid.uuid4, typing.Callable[[ChangeEvent], typing.NoReturn]] = dict()
self.__pending_write_ids: typing.List[str] = []
self.__pending_delete_ids: typing.List[str] = []
self._root_directory: str = root_directory
def __start_polling(self) -> None:
if not POLLING:
return
if self.__is_polling:
return
self.__is_polling = True
# Store current state in self.__poll_data so that __on_timer we only notify of changes since starting to poll
self.__poll_data = dict()
self.__pending_change_ids = []
identifiers = self.get_ids()
for identifier in identifiers:
filepath = '{0}/{1}'.format(self._root_directory, identifier)
modified_time = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime
self.__poll_data[identifier] = modified_time
self.__poll_timer = threading.Timer(5, self.__on_timer)
self.__poll_timer.start()
def __on_timer(self):
print('UMM PING')
if not POLLING:
return
if not self.__is_polling:
return
try:
identifiers = self.get_ids()
added = [o for o in identifiers if o not in self.__poll_data.keys() and o not in self.__pending_write_ids]
removed = [o for o in self.__poll_data.keys() if o not in identifiers and o not in self.__pending_delete_ids]
modified_maybe = [o for o in identifiers if o not in added and o not in removed and o not in self.__pending_write_ids]
modified = []
for identifier in modified_maybe:
filepath = '{0}/{1}'.format(self._root_directory, identifier)
modified_time = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime
if self.__poll_data[identifier] == modified_time:
continue
modified.append(identifier)
self.__poll_data[identifier] = modified_time
for identifier in added:
filepath = '{0}/{1}'.format(self._root_directory, identifier)
self.__poll_data[identifier] = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime
for identifier in removed:
del self.__poll_data[identifier]
if len(added) + len(modified) + len(removed) > 0:
event = ChangeEvent(added=tuple(added), modified=tuple(modified), removed=tuple(removed))
for callbacks in self.__poll_subscriptions.values():
callbacks(event)
except Exception as error:
print('WARNING: Universal Material Map failed to poll {0} for file changes.\nDetail: {1}'.format(self._root_directory, error))
self.__poll_timer.run()
def __stop_polling(self) -> None:
if not POLLING:
return
self.__is_polling = False
try:
self.__poll_timer.cancel()
except:
pass
self.__poll_data = dict()
def can_poll(self) -> bool:
if not POLLING:
return False
return True
def start_polling(self):
if not POLLING:
return
self.__start_polling()
def stop_polling(self):
if not POLLING:
return
self.__stop_polling()
def add_change_subscription(self, callback: typing.Callable[[ChangeEvent], typing.NoReturn]) -> uuid.uuid4:
if not POLLING:
raise NotImplementedError('Polling feature not enabled.')
for key, value in self.__poll_subscriptions.items():
if value == callback:
return key
key = uuid.uuid4()
self.__poll_subscriptions[key] = callback
self.start_polling()
return key
def remove_change_subscription(self, subscription_id: uuid.uuid4) -> None:
if not POLLING:
raise NotImplementedError('Polling feature not enabled.')
if subscription_id in self.__poll_subscriptions.keys():
del self.__poll_subscriptions[subscription_id]
if len(self.__poll_subscriptions.keys()) == 0:
self.stop_polling()
def get_ids(self) -> typing.List[str]:
identifiers: typing.List[str] = []
for directory, sub_directories, filenames in os.walk(self._root_directory):
for filename in filenames:
if not filename.lower().endswith('.json'):
continue
identifiers.append(filename)
break
return identifiers
def read(self, identifier: str) -> typing.Union[typing.Dict, typing.NoReturn]:
if not identifier.lower().endswith('.json'):
raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier))
filepath = '{0}/{1}'.format(self._root_directory, identifier)
if os.path.exists(filepath):
try:
with open(filepath, 'r') as pointer:
contents = json.load(pointer)
if not isinstance(contents, dict):
raise Exception('Not supported: Load of file "{0}" did not resolve to a dictionary. Could be due to reading same file twice too fast.'.format(filepath))
return contents
except Exception as error:
print('Failed to open file "{0}"'.format(filepath))
raise error
return None
def write(self, identifier: str, contents: typing.Dict) -> None:
if not identifier.lower().endswith('.json'):
raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier))
if not isinstance(contents, dict):
raise Exception('Not supported: Argument "contents" is not an instance of dict.')
if not os.path.exists(self._root_directory):
os.makedirs(self._root_directory)
if POLLING:
if identifier not in self.__pending_write_ids:
self.__pending_write_ids.append(identifier)
filepath = '{0}/{1}'.format(self._root_directory, identifier)
with open(filepath, 'w') as pointer:
json.dump(contents, pointer, indent=4)
if POLLING:
# Store the modified time so that we don't trigger a notification. We only want notifications when changes are caused by external modifiers.
self.__poll_data[identifier] = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime
self.__pending_write_ids.remove(identifier)
def delete(self, identifier: str) -> None:
if not identifier.lower().endswith('.json'):
raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier))
if POLLING:
if identifier not in self.__pending_delete_ids:
self.__pending_delete_ids.append(identifier)
filepath = '{0}/{1}'.format(self._root_directory, identifier)
if os.path.exists(filepath):
os.remove(filepath)
if POLLING:
# Remove the item from self.__poll_data so that we don't trigger a notification. We only want notifications when changes are caused by external modifiers.
if identifier in self.__poll_data.keys():
del self.__poll_data[identifier]
self.__pending_delete_ids.remove(identifier)
def can_show_in_store(self, identifier: str) -> bool:
filepath = '{0}/{1}'.format(self._root_directory, identifier)
return os.path.exists(filepath)
def show_in_store(self, identifier: str) -> None:
filepath = '{0}/{1}'.format(self._root_directory, identifier)
if os.path.exists(filepath):
subprocess.Popen(r'explorer /select,"{0}"'.format(filepath.replace('/', '\\')))
class FilesystemManifest(Filesystem):
def __init__(self, root_directory: str):
super(FilesystemManifest, self).__init__(root_directory=root_directory)
def get_ids(self) -> typing.List[str]:
identifiers: typing.List[str] = []
for directory, sub_directories, filenames in os.walk(self._root_directory):
for filename in filenames:
if not filename.lower() == 'conversionmanifest.json':
continue
identifiers.append(filename)
break
return identifiers
class FilesystemSettings(Filesystem):
def __init__(self, root_directory: str):
super(FilesystemSettings, self).__init__(root_directory=root_directory)
def get_ids(self) -> typing.List[str]:
identifiers: typing.List[str] = []
for directory, sub_directories, filenames in os.walk(self._root_directory):
for filename in filenames:
if not filename.lower() == 'settings.json':
continue
identifiers.append(filename)
break
return identifiers | 10,456 | Python | 40.995984 | 176 | 0.608072 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/core.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import abc
import typing
import uuid
class ChangeEvent(object):
def __init__(self, added: typing.Tuple[str], modified: typing.Tuple[str], removed: typing.Tuple[str]):
super(ChangeEvent, self).__init__()
self.__added: typing.Tuple[str] = added
self.__modified: typing.Tuple[str] = modified
self.__removed: typing.Tuple[str] = removed
def __str__(self):
o = 'omni.universalmaterialmap.core.service.core.ChangeEvent('
o += '\n\tadded: '
o += ', '.join(self.__added)
o += '\n\tmodified: '
o += ', '.join(self.__modified)
o += '\n\tremoved: '
o += ', '.join(self.__removed)
o += '\n)'
return o
@property
def added(self) -> typing.Tuple[str]:
return self.__added
@property
def modified(self) -> typing.Tuple[str]:
return self.__modified
@property
def removed(self) -> typing.Tuple[str]:
return self.__removed
class IDelegate(metaclass=abc.ABCMeta):
""" Interface for an online library database table. """
@abc.abstractmethod
def get_ids(self) -> typing.List[str]:
""" Returns a list of identifiers. """
raise NotImplementedError
@abc.abstractmethod
def read(self, identifier: str) -> typing.Dict:
""" Returns a JSON dictionary if an item by the given identifier exists - otherwise None """
raise NotImplementedError
@abc.abstractmethod
def write(self, identifier: str, contents: typing.Dict) -> str:
""" Creates or updates an item by using the JSON contents data. """
raise NotImplementedError
@abc.abstractmethod
def delete(self, identifier: str) -> None:
""" Deletes an item by the given identifier if it exists. """
raise NotImplementedError
@abc.abstractmethod
def can_show_in_store(self, identifier: str) -> bool:
""" Deletes an item by the given identifier if it exists. """
raise NotImplementedError
@abc.abstractmethod
def show_in_store(self, identifier: str) -> None:
""" Deletes an item by the given identifier if it exists. """
raise NotImplementedError
@abc.abstractmethod
def can_poll(self) -> bool:
""" States if delegate is able to poll file changes and provide subscription to those changes. """
raise NotImplementedError
@abc.abstractmethod
def start_polling(self) -> None:
""" Starts monitoring files for changes. """
raise NotImplementedError
@abc.abstractmethod
def stop_polling(self) -> None:
""" Stops monitoring files for changes. """
raise NotImplementedError
@abc.abstractmethod
def add_change_subscription(self, callback: typing.Callable[[ChangeEvent], typing.NoReturn]) -> uuid.uuid4:
""" Creates a subscription for file changes in location managed by delegate. """
raise NotImplementedError
@abc.abstractmethod
def remove_change_subscription(self, subscription_id: uuid.uuid4) -> None:
""" Removes the subscription for file changes in location managed by delegate. """
raise NotImplementedError | 4,024 | Python | 34.307017 | 111 | 0.657306 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/resources/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import os
import shutil
import json
import inspect
from ...data import FileUtility, Target, ConversionGraph, ConversionManifest
def __copy(source_path: str, destination_path: str) -> None:
try:
shutil.copy(source_path, destination_path)
except Exception as error:
print('Error installing UMM data. Unable to copy source "{0}" to destination "{1}".\n Details: {2}'.format(source_path, destination_path, error))
raise error
def __install_library(source_root: str, destination_root: str) -> None:
source_root = source_root.replace('\\', '/')
destination_root = destination_root.replace('\\', '/')
for directory, sub_directories, filenames in os.walk(source_root):
directory = directory.replace('\\', '/')
destination_directory = directory.replace(source_root, destination_root)
destination_directory_created = os.path.exists(destination_directory)
for filename in filenames:
if not filename.lower().endswith('.json'):
continue
source_path = '{0}/{1}'.format(directory, filename)
destination_path = '{0}/{1}'.format(destination_directory, filename)
if not destination_directory_created:
try:
os.makedirs(destination_directory)
destination_directory_created = True
except Exception as error:
print('Universal Material Map error installing data. Unable to create directory "{0}".\n Details: {1}'.format(destination_directory, error))
raise error
if not os.path.exists(destination_path):
__copy(source_path=source_path, destination_path=destination_path)
print('Universal Material Map installed "{0}".'.format(destination_path))
continue
try:
with open(source_path, 'r') as fp:
source = FileUtility.FromData(data=json.load(fp)).content
except Exception as error:
print('Universal Material Map error installing data. Unable to read source "{0}". \n Details: {1}'.format(source_path, error))
raise error
try:
with open(destination_path, 'r') as fp:
destination = FileUtility.FromData(data=json.load(fp)).content
except Exception as error:
print('Warning: Universal Material Map error installing data. Unable to read destination "{0}". It is assumed that the installed version is more recent than the one attempted to be installed.\n Details: {1}'.format(destination_path, error))
continue
if isinstance(source, Target) and isinstance(destination, Target):
if source.revision > destination.revision:
__copy(source_path=source_path, destination_path=destination_path)
print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path))
continue
if isinstance(source, ConversionGraph) and isinstance(destination, ConversionGraph):
if source.revision > destination.revision:
__copy(source_path=source_path, destination_path=destination_path)
print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path))
continue
if isinstance(source, ConversionManifest) and isinstance(destination, ConversionManifest):
if source.version_major < destination.version_major:
continue
if source.version_minor <= destination.version_minor:
continue
__copy(source_path=source_path, destination_path=destination_path)
print('Universal Material Map installed the more recent revision #{0}.{1} of "{2}".'.format(source.version_major, source.version_minor, destination_path))
continue
def install() -> None:
current_path = inspect.getfile(inspect.currentframe()).replace('\\', '/')
current_path = current_path[:current_path.rfind('/')]
library_names = []
for o in os.listdir(current_path):
path = '{0}/{1}'.format(current_path, o)
if os.path.isdir(path) and not o == '__pycache__':
library_names.append(o)
libraries_directory = os.path.expanduser('~').replace('\\', '/')
if not libraries_directory.endswith('/Documents'):
# os.path.expanduser() has different behaviour between 2.7 and 3
libraries_directory = '{0}/Documents'.format(libraries_directory)
libraries_directory = '{0}/Omniverse'.format(libraries_directory)
for library_name in library_names:
source_root = '{0}/{1}/UMMLibrary'.format(current_path, library_name)
destination_root = '{0}/{1}/UMMLibrary'.format(libraries_directory, library_name)
__install_library(source_root=source_root, destination_root=destination_root)
| 5,935 | Python | 49.735042 | 256 | 0.643134 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/converter.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import sys
import traceback
import os
import re
import json
import math
import bpy
import bpy_types
from . import get_library, get_value, CORE_MATERIAL_PROPERTIES, create_template, developer_mode, get_template_data_by_shader_node, get_template_data_by_class_name, create_from_template
from ..core.converter.core import ICoreConverter, IObjectConverter, IDataConverter
from ..core.converter import util
from ..core.service import store
from ..core.data import Plug, ConversionManifest, DagNode, ConversionGraph, TargetInstance
from ..core.util import get_extension_from_image_file_format
__initialized: bool = False
__manifest: ConversionManifest = None
def _get_manifest() -> ConversionManifest:
if not getattr(sys.modules[__name__], '__manifest'):
setattr(sys.modules[__name__], '__manifest', store.get_conversion_manifest(library=get_library()))
if developer_mode:
manifest: ConversionManifest = getattr(sys.modules[__name__], '__manifest')
print('UMM DEBUG: blender.converter._get_manifest(): num entries = "{0}"'.format(len(manifest.conversion_maps)))
for conversion_map in manifest.conversion_maps:
print('UMM DEBUG: blender.converter._get_manifest(): Entry: graph_id = "{0}", render_context = "{1}"'.format(conversion_map.conversion_graph_id, conversion_map.render_context))
return getattr(sys.modules[__name__], '__manifest')
def _get_conversion_graph_impl(source_class: str, render_context: str) -> typing.Union[ConversionGraph, typing.NoReturn]:
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl(source_class="{0}", render_context="{1}")'.format(source_class, render_context))
for conversion_map in _get_manifest().conversion_maps:
if not conversion_map.render_context == render_context:
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.render_context "{0}" != "{1}")'.format(conversion_map.render_context, render_context))
continue
if not conversion_map.conversion_graph:
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph "{0}")'.format(conversion_map.conversion_graph))
continue
if not conversion_map.conversion_graph.source_node:
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.source_node "{0}")'.format(conversion_map.conversion_graph.source_node))
continue
if not conversion_map.conversion_graph.source_node.target.root_node.class_name == source_class:
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph.source_node.target.root_node.class_name "{0}" != "{1}")'.format(conversion_map.conversion_graph.source_node.target.root_node.class_name, source_class))
continue
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found match "{0}")'.format(conversion_map.conversion_graph.filename))
return conversion_map.conversion_graph
if developer_mode:
print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found no match!)')
return None
def _instance_to_output_entity(graph: ConversionGraph, instance: object) -> TargetInstance:
if developer_mode:
print('_instance_to_output_entity')
for output in graph.source_node.outputs:
if output.name == 'node_id_output':
continue
if util.can_set_plug_value(instance=instance, plug=output):
util.set_plug_value(instance=instance, plug=output)
else:
print('UMM Warning: Unable to set output plug "{0}"... using default value of "{1}"'.format(output.name, output.default_value))
output.value = output.default_value
return graph.get_output_entity()
def _data_to_output_entity(graph: ConversionGraph, data: typing.List[typing.Tuple[str, typing.Any]]) -> TargetInstance:
for output in graph.source_node.outputs:
if output.name == 'node_id_output':
continue
o = [o for o in data if o[0] == output.name]
if len(o):
output.value = o[0][1]
else:
output.value = output.default_value
return graph.get_output_entity()
def _instance_to_data(instance: object, graph: ConversionGraph) -> typing.List[typing.Tuple[str, typing.Any]]:
target_instance = _instance_to_output_entity(graph=graph, instance=instance)
if developer_mode:
print('_instance_to_data')
print('\ttarget_instance.target.store_id', target_instance.target.store_id)
# Compute target attribute values
attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)]
for plug in target_instance.inputs:
if not plug.input:
continue
if developer_mode:
print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid))
if plug.is_invalid and isinstance(plug.parent, DagNode):
plug.parent.compute()
if developer_mode:
print('\t{} computed value = {}'.format(plug.name, plug.computed_value))
attribute_data.append((plug.name, plug.computed_value))
return attribute_data
def _to_convertible_instance(instance: object, material: bpy.types.Material = None) -> object:
if developer_mode:
print('_to_convertible_instance', type(instance))
if material is None:
if isinstance(instance, bpy.types.Material):
material = instance
else:
for m in bpy.data.materials:
if not m.use_nodes:
continue
if not len([o for o in m.node_tree.nodes if o == instance]):
continue
material = m
break
if material is None:
return instance
if not material.use_nodes:
return material
if instance == material:
# Find the Surface Shader.
for link in material.node_tree.links:
if not isinstance(link, bpy.types.NodeLink):
continue
if not isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial):
continue
if not link.to_socket.name == 'Surface':
continue
result = _to_convertible_instance(instance=link.from_node, material=material)
if result is not None:
return result
# No surface shader found - return instance
return instance
if isinstance(instance, bpy.types.ShaderNodeAddShader):
for link in material.node_tree.links:
if not isinstance(link, bpy.types.NodeLink):
continue
if not link.to_node == instance:
continue
# if not link.to_socket.name == 'Shader':
# continue
result = _to_convertible_instance(instance=link.from_node, material=material)
if result is not None:
return result
# if isinstance(instance, bpy.types.ShaderNodeBsdfGlass):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeBsdfGlossy):
# return instance
if isinstance(instance, bpy.types.ShaderNodeBsdfPrincipled):
return instance
# if isinstance(instance, bpy.types.ShaderNodeBsdfRefraction):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeBsdfTranslucent):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeBsdfTransparent):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeEeveeSpecular):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeEmission):
# return instance
# if isinstance(instance, bpy.types.ShaderNodeSubsurfaceScattering):
# return instance
return None
class CoreConverter(ICoreConverter):
def __init__(self):
super(CoreConverter, self).__init__()
def get_conversion_manifest(self) -> typing.List[typing.Tuple[str, str]]:
"""
Returns data indicating what source class can be converted to a render context.
Example: [('lambert', 'MDL'), ('blinn', 'MDL'),]
"""
output = []
for conversion_map in _get_manifest().conversion_maps:
if not conversion_map.render_context:
continue
if not conversion_map.conversion_graph:
continue
if not conversion_map.conversion_graph.source_node:
continue
output.append((conversion_map.conversion_graph.source_node.target.root_node.class_name, conversion_map.render_context))
return output
class ObjectConverter(CoreConverter, IObjectConverter):
""" """
MATERIAL_CLASS = 'bpy.types.Material'
SHADER_NODES = [
'bpy.types.ShaderNodeBsdfGlass',
'bpy.types.ShaderNodeBsdfGlossy',
'bpy.types.ShaderNodeBsdfPrincipled',
'bpy.types.ShaderNodeBsdfRefraction',
'bpy.types.ShaderNodeBsdfTranslucent',
'bpy.types.ShaderNodeBsdfTransparent',
'bpy.types.ShaderNodeEeveeSpecular',
'bpy.types.ShaderNodeEmission',
'bpy.types.ShaderNodeSubsurfaceScattering',
]
def can_create_instance(self, class_name: str) -> bool:
""" Returns true if worker can generate an object of the given class name. """
if class_name == ObjectConverter.MATERIAL_CLASS:
return True
return class_name in ObjectConverter.SHADER_NODES
def create_instance(self, class_name: str, name: str = 'material') -> object:
""" Creates an object of the given class name. """
material = bpy.data.materials.new(name=name)
if class_name in ObjectConverter.SHADER_NODES:
material.use_nodes = True
return material
def can_set_plug_value(self, instance: object, plug: Plug) -> bool:
""" Returns true if worker can set the plug's value given the instance and its attributes. """
if plug.input:
return False
if isinstance(instance, bpy.types.Material):
for o in CORE_MATERIAL_PROPERTIES:
if o[0] == plug.name:
return hasattr(instance, plug.name)
return False
if isinstance(instance, bpy_types.ShaderNode):
return len([o for o in instance.inputs if o.name == plug.name]) == 1
return False
def set_plug_value(self, instance: object, plug: Plug) -> typing.NoReturn:
""" Sets the plug's value given the value of the instance's attribute named the same as the plug. """
if isinstance(instance, bpy.types.Material):
plug.value = getattr(instance, plug.name)
if developer_mode:
print('set_plug_value')
print('\tinstance', type(instance))
print('\tname', plug.name)
print('\tvalue', plug.value)
return
inputs = [o for o in instance.inputs if o.name == plug.name]
if not len(inputs) == 1:
return
plug.value = get_value(socket=inputs[0])
if developer_mode:
# print('set_plug_value')
# print('\tinstance', type(instance))
# print('\tname', plug.name)
# print('\tvalue', plug.value)
print('\tset_plug_value: {} = {}'.format(plug.name, plug.value))
def can_set_instance_attribute(self, instance: object, name: str):
""" Resolves if worker can set an attribute by the given name on the instance. """
return False
def set_instance_attribute(self, instance: object, name: str, value: typing.Any) -> typing.NoReturn:
""" Sets the named attribute on the instance to the value. """
raise NotImplementedError()
def can_convert_instance(self, instance: object, render_context: str) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
return False
def convert_instance_to_instance(self, instance: object, render_context: str) -> typing.Any:
""" Converts the instance to another object given the render_context. """
raise NotImplementedError()
def can_convert_instance_to_data(self, instance: object, render_context: str) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
node = _to_convertible_instance(instance=instance)
if node is not None and not node == instance:
if developer_mode:
print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node)))
instance = node
template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance)
if template is None:
class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__)
conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context)
if not conversion_graph:
return False
try:
destination_target_instance = _instance_to_output_entity(graph=conversion_graph, instance=instance)
except Exception as error:
print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error))
return False
return destination_target_instance is not None
else:
conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context)
return conversion_graph is not None
def convert_instance_to_data(self, instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
node = _to_convertible_instance(instance=instance)
if node is not None and not node == instance:
if developer_mode:
print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node)))
instance = node
template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance)
if template is None:
class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__)
conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context)
return _instance_to_data(instance=instance, graph=conversion_graph)
else:
conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context)
if developer_mode:
print('conversion_graph', conversion_graph.filename)
# set plug values on conversion_graph.source_node.outputs
for output in conversion_graph.source_node.outputs:
if output.name == 'node_id_output':
continue
if developer_mode:
print('output', output.name)
internal_node = None
for a in conversion_graph.source_node.target.nodes:
for b in a.outputs:
if output.id == b.id:
internal_node = a
break
if internal_node is not None:
break
if internal_node is None:
raise NotImplementedError(f"No internal node found for {output.name}")
map_definition = None
for o in template_map['maps']:
if o['blender_node'] == internal_node.id and o['blender_socket'] == output.name:
map_definition = o
break
if map_definition is None:
raise NotImplementedError(f"No map definition found for {output.name}")
if developer_mode:
print('map_definition', map_definition['blender_node'])
if map_definition['blender_node'] == '':
output.value = output.default_value
if developer_mode:
print('output.value', output.value)
continue
for shader_node in material.node_tree.nodes:
if not shader_node.name == map_definition['blender_node']:
continue
if isinstance(shader_node, bpy.types.ShaderNodeTexImage):
if map_definition['blender_socket'] == 'image':
if shader_node.image and (shader_node.image.source == 'FILE' or shader_node.image.source == 'TILED'):
print(f'UMM: image.filepath: "{shader_node.image.filepath}"')
print(f'UMM: image.source: "{shader_node.image.source}"')
print(f'UMM: image.file_format: "{shader_node.image.file_format}"')
value = shader_node.image.filepath
if (shader_node.image.source == 'TILED'):
# Find all numbers in the path.
numbers = re.findall('[0-9]+', value)
if (len(numbers) > 0):
# Get the string representation of the last number.
num_str = str(numbers[-1])
# Replace the number substring with '<UDIM>'.
split_items = value.rsplit(num_str, 1)
if (len(split_items) == 2):
value = split_items[0] + '<UDIM>' + split_items[1]
try:
if value is None or value == '':
file_format = shader_node.image.file_format
file_format = get_extension_from_image_file_format(file_format, shader_node.image.name)
if not shader_node.image.name.endswith(file_format):
value = f'{shader_node.image.name}.{file_format}'
else:
value = shader_node.image.name
output.value = [value, shader_node.image.colorspace_settings.name]
else:
output.value = [os.path.abspath(bpy.path.abspath(value)), shader_node.image.colorspace_settings.name]
except Exception as error:
print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(shader_node.image.filepath, error))
output.value = ['', 'raw']
print(f'UMM: output.value: "{output.value}"')
else:
if developer_mode:
print('setting default value for output.value')
if not shader_node.image:
print('\tshader_node.image == None')
else:
print('\tshader_node.image.source == {}'.format(shader_node.image.source))
output.value = ['', 'raw']
if developer_mode:
print('output.value', output.value)
break
raise NotImplementedError(f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}")
if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled):
socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']]
output.value = socket.default_value
if developer_mode:
print('output.value', output.value)
break
if isinstance(shader_node, bpy.types.ShaderNodeGroup):
if map_definition['blender_socket'] not in shader_node.inputs.keys():
if developer_mode:
print(f'{map_definition["blender_socket"]} not in shader_node.inputs.keys()')
break
socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']]
output.value = socket.default_value
if developer_mode:
print('output.value', output.value)
break
if isinstance(shader_node, bpy.types.ShaderNodeMapping):
socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']]
value = socket.default_value
if output.name == 'Rotation':
value = [
math.degrees(value[0]),
math.degrees(value[1]),
math.degrees(value[2])
]
output.value = value
if developer_mode:
print('output.value', output.value)
break
# compute to target_instance for output
target_instance = conversion_graph.get_output_entity()
if developer_mode:
print('_instance_to_data')
print('\ttarget_instance.target.store_id', target_instance.target.store_id)
# Compute target attribute values
attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)]
for plug in target_instance.inputs:
if not plug.input:
continue
if developer_mode:
print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid))
if plug.is_invalid and isinstance(plug.parent, DagNode):
plug.parent.compute()
if developer_mode:
print('\t{} computed value = {}'.format(plug.name, plug.computed_value))
value = plug.computed_value
if plug.internal_value_type == 'bool':
value = True if value else False
attribute_data.append((plug.name, value))
return attribute_data
def can_convert_attribute_values(self, instance: object, render_context: str, destination: object) -> bool:
""" Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """
raise NotImplementedError()
def convert_attribute_values(self, instance: object, render_context: str, destination: object) -> typing.NoReturn:
""" Attribute values are converted and set on the destination object's attributes. """
raise NotImplementedError()
def can_apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool:
""" Resolves if worker can convert the instance to another object given the render_context. """
if developer_mode:
print('can_apply_data_to_instance()')
if not isinstance(instance, bpy.types.Material):
if developer_mode:
print('can_apply_data_to_instance: FALSE - instance not bpy.types.Material')
return False
if not render_context == 'Blender':
if developer_mode:
print('can_apply_data_to_instance: FALSE - render_context not "Blender"')
return False
conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context)
if not conversion_graph:
if developer_mode:
print('can_apply_data_to_instance: FALSE - conversion_graph is None')
return False
if developer_mode:
print(f'conversion_graph {conversion_graph.filename}')
try:
destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data)
except Exception as error:
print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error))
return False
if developer_mode:
if destination_target_instance is None:
print('destination_target_instance is None')
elif destination_target_instance is None:
print('destination_target_instance.target is None')
else:
print('destination_target_instance.target is not None')
if destination_target_instance is None or destination_target_instance.target is None:
return False
if developer_mode:
print(f'num destination_target_instance.target.nodes: {len(destination_target_instance.target.nodes)}')
if len(destination_target_instance.target.nodes) < 2:
return True
template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name)
if developer_mode:
print(f'return {template is not None}')
return template is not None
def apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> None:
"""
Implementation requires that `instance` is type `bpy.types.Material`.
"""
if developer_mode:
print('apply_data_to_instance()')
if not isinstance(instance, bpy.types.Material):
raise Exception('instance type not supported', type(instance))
if not render_context == 'Blender':
raise Exception('render_context not supported', render_context)
conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context)
# This only works for Blender import of MDL/USDPreview. Blender export would need to use convert_instance_to_data().
destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data)
material: bpy.types.Material = instance
# Make sure we're using nodes
material.use_nodes = True
# Remove existing nodes - we're starting from scratch - assuming Blender import
to_delete = [o for o in material.node_tree.nodes]
while len(to_delete):
material.node_tree.nodes.remove(to_delete.pop())
if len(destination_target_instance.target.nodes) < 2:
# Create base graph
output_node = material.node_tree.nodes.new('ShaderNodeOutputMaterial')
output_node.location = [300.0, 300.0]
bsdf_node = material.node_tree.nodes.new('ShaderNodeBsdfPrincipled')
bsdf_node.location = [0.0, 300.0]
material.node_tree.links.new(bsdf_node.outputs[0], output_node.inputs[0])
node_cache = dict()
node_location = [-500, 300]
# Create graph if texture value
for plug in destination_target_instance.inputs:
if not plug.input:
continue
if isinstance(plug.computed_value, list) or isinstance(plug.computed_value, tuple):
if len(plug.computed_value) == 2 and isinstance(plug.computed_value[0], str) and isinstance(plug.computed_value[1], str):
key = '{0}|{1}'.format(plug.computed_value[0], plug.computed_value[1])
if key in node_cache.keys():
node = node_cache[key]
else:
try:
path = plug.computed_value[0]
if not path == '':
node = material.node_tree.nodes.new('ShaderNodeTexImage')
path = plug.computed_value[0]
if '<UDIM>' in path:
pattern = path.replace('\\', '/')
pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]')
directory = pattern[:pattern.rfind('/') + 1]
pattern = pattern.replace(directory, '')
image_set = False
for item in os.listdir(directory):
if re.match(pattern, item):
tile_path = '{}{}'.format(directory, item)
if not os.path.isfile(tile_path):
continue
if not image_set:
node.image = bpy.data.images.load(tile_path)
node.image.source = 'TILED'
image_set = True
continue
tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item)
node.image.tiles.new(int(tile_indexes[-1]))
else:
node.image = bpy.data.images.load(path)
node.image.colorspace_settings.name = plug.computed_value[1]
else:
continue
except Exception as error:
print('Warning: UMM failed to properly setup a ShaderNodeTexImage. Details: {0}\n{1}'.format(error, traceback.format_exc()))
continue
node_cache[key] = node
node.location = node_location
node_location[1] -= 300
bsdf_input = [o for o in bsdf_node.inputs if o.name == plug.name][0]
if plug.name == 'Metallic':
separate_node = None
for link in material.node_tree.links:
if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB':
separate_node = link.to_node
break
if separate_node is None:
separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB')
separate_node.location = [node.location[0] + 250, node.location[1]]
material.node_tree.links.new(node.outputs[0], separate_node.inputs[0])
material.node_tree.links.new(separate_node.outputs[2], bsdf_input)
elif plug.name == 'Roughness':
separate_node = None
for link in material.node_tree.links:
if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB':
separate_node = link.to_node
break
if separate_node is None:
separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB')
separate_node.location = [node.location[0] + 250, node.location[1]]
material.node_tree.links.new(node.outputs[0], separate_node.inputs[0])
material.node_tree.links.new(separate_node.outputs[1], bsdf_input)
elif plug.name == 'Normal':
normal_node = None
for link in material.node_tree.links:
if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeNormalMap':
normal_node = link.to_node
break
if normal_node is None:
normal_node = material.node_tree.nodes.new('ShaderNodeNormalMap')
normal_node.location = [node.location[0] + 250, node.location[1]]
material.node_tree.links.new(node.outputs[0], normal_node.inputs[1])
material.node_tree.links.new(normal_node.outputs[0], bsdf_input)
else:
material.node_tree.links.new(node.outputs[0], bsdf_input)
continue
# Set Value
blender_inputs = [o for o in bsdf_node.inputs if o.name == plug.name]
if len(blender_inputs) == 0:
for property_name, property_object in bsdf_node.rna_type.properties.items():
if not property_name == plug.name:
continue
if property_object.is_readonly:
break
try:
setattr(bsdf_node, property_name, plug.computed_value)
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, plug.computed_value, error))
else:
if isinstance(blender_inputs[0], bpy.types.NodeSocketShader):
continue
try:
blender_inputs[0].default_value = plug.computed_value
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(plug.name, plug.computed_value, error))
return
if developer_mode:
print(f'TEMPLATE CREATION BASED ON {destination_target_instance.target.root_node.class_name}')
# find template to use
template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name)
if developer_mode:
print(f"TEMPLATE NAME {template['name']}")
# create graph
create_from_template(material=material, template=template)
# set attributes
use_albedo_map = False
use_normal_map = False
use_detail_normal_map = False
use_emission_map = False
for input_plug in destination_target_instance.inputs:
# if developer_mode:
# print('input_plug', input_plug.name)
internal_node = None
for a in destination_target_instance.target.nodes:
for b in a.inputs:
if input_plug.id == b.id:
internal_node = a
break
if internal_node is not None:
break
if internal_node is None:
raise NotImplementedError(f"No internal node found for {input_plug.name}")
map_definition = None
for o in template_map['maps']:
if o['blender_node'] == internal_node.id and o['blender_socket'] == input_plug.name:
map_definition = o
break
if map_definition is None:
raise NotImplementedError(f"No map definition found for {internal_node.id} {input_plug.name}")
for shader_node in material.node_tree.nodes:
if not shader_node.name == map_definition['blender_node']:
continue
# if developer_mode:
# print(f'node: {shader_node.name}')
if isinstance(shader_node, bpy.types.ShaderNodeTexImage):
if map_definition['blender_socket'] == 'image':
# if developer_mode:
# print(f'\tbpy.types.ShaderNodeTexImage: path: {input_plug.computed_value[0]}')
# print(f'\tbpy.types.ShaderNodeTexImage: colorspace: {input_plug.computed_value[1]}')
path = input_plug.computed_value[0]
if not path == '':
if '<UDIM>' in path:
pattern = path.replace('\\', '/')
pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]')
directory = pattern[:pattern.rfind('/') + 1]
pattern = pattern.replace(directory, '')
image_set = False
for item in os.listdir(directory):
if re.match(pattern, item):
tile_path = '{}{}'.format(directory, item)
if not os.path.isfile(tile_path):
continue
if not image_set:
shader_node.image = bpy.data.images.load(tile_path)
shader_node.image.source = 'TILED'
image_set = True
continue
tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item)
shader_node.image.tiles.new(int(tile_indexes[-1]))
else:
shader_node.image = bpy.data.images.load(path)
if map_definition['blender_node'] == 'Albedo Map':
use_albedo_map = True
if map_definition['blender_node'] == 'Normal Map':
use_normal_map = True
if map_definition['blender_node'] == 'Detail Normal Map':
use_detail_normal_map = True
if map_definition['blender_node'] == 'Emissive Map':
use_emission_map = True
shader_node.image.colorspace_settings.name = input_plug.computed_value[1]
continue
raise NotImplementedError(
f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}")
if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled):
blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name]
if len(blender_inputs) == 0:
for property_name, property_object in shader_node.rna_type.properties.items():
if not property_name == input_plug.name:
continue
if property_object.is_readonly:
break
try:
setattr(shader_node, property_name, input_plug.computed_value)
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error))
else:
if isinstance(blender_inputs[0], bpy.types.NodeSocketShader):
continue
try:
blender_inputs[0].default_value = input_plug.computed_value
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error))
continue
if isinstance(shader_node, bpy.types.ShaderNodeGroup):
blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name]
if len(blender_inputs) == 0:
for property_name, property_object in shader_node.rna_type.properties.items():
if not property_name == input_plug.name:
continue
if property_object.is_readonly:
break
try:
setattr(shader_node, property_name, input_plug.computed_value)
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error))
else:
if isinstance(blender_inputs[0], bpy.types.NodeSocketShader):
continue
try:
blender_inputs[0].default_value = input_plug.computed_value
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error))
continue
if isinstance(shader_node, bpy.types.ShaderNodeMapping):
blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name]
value = input_plug.computed_value
if input_plug.name == 'Rotation':
value[0] = math.radians(value[0])
value[1] = math.radians(value[1])
value[2] = math.radians(value[2])
if len(blender_inputs) == 0:
for property_name, property_object in shader_node.rna_type.properties.items():
if not property_name == input_plug.name:
continue
if property_object.is_readonly:
break
try:
setattr(shader_node, property_name, value)
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error))
else:
if isinstance(blender_inputs[0], bpy.types.NodeSocketShader):
continue
try:
blender_inputs[0].default_value = value
except Exception as error:
print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error))
continue
# UX assist with special attributes
for shader_node in material.node_tree.nodes:
if shader_node.name == 'OmniPBR Compute' and isinstance(shader_node, bpy.types.ShaderNodeGroup):
shader_node.inputs['Use Albedo Map'].default_value = 1 if use_albedo_map else 0
shader_node.inputs['Use Normal Map'].default_value = 1 if use_normal_map else 0
shader_node.inputs['Use Detail Normal Map'].default_value = 1 if use_detail_normal_map else 0
shader_node.inputs['Use Emission Map'].default_value = 1 if use_emission_map else 0
break
class DataConverter(CoreConverter, IDataConverter):
""" """
def can_convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool:
""" Resolves if worker can convert the given class and source_data to another class and target data. """
conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context)
if not conversion_graph:
return False
try:
destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data)
except Exception as error:
print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error))
return False
return destination_target_instance is not None
def convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]:
"""
Returns a list of key value pairs in tuples.
The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class.
"""
if developer_mode:
print('UMM DEBUG: DataConverter.convert_data_to_data()')
print('\tclass_name="{0}"'.format(class_name))
print('\trender_context="{0}"'.format(render_context))
print('\tsource_data=[')
for o in source_data:
if o[1] == '':
print('\t\t("{0}", ""),'.format(o[0]))
continue
print('\t\t("{0}", {1}),'.format(o[0], o[1]))
print('\t]')
conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context)
destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data)
attribute_data = [(util.TARGET_CLASS_IDENTIFIER, destination_target_instance.target.root_node.class_name)]
for plug in destination_target_instance.inputs:
if not plug.input:
continue
if plug.is_invalid and isinstance(plug.parent, DagNode):
plug.parent.compute()
attribute_data.append((plug.name, plug.computed_value))
return attribute_data
class OT_InstanceToDataConverter(bpy.types.Operator):
bl_idname = 'universalmaterialmap.instance_to_data_converter'
bl_label = 'Universal Material Map Converter Operator'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
print('Conversion Operator: execute')
# Get object by name: bpy.data.objects['Cube']
# Get material by name: bpy.data.materials['MyMaterial']
# node = [o for o in bpy.context.active_object.active_material.node_tree.nodes if o.select][0]
print('selected_node', bpy.context.active_object, type(bpy.context.active_object))
# print('\n'.join(dir(bpy.context.active_object)))
material_slot: bpy.types.MaterialSlot # https://docs.blender.org/api/current/bpy.types.MaterialSlot.html?highlight=materialslot#bpy.types.MaterialSlot
for material_slot in bpy.context.active_object.material_slots:
material: bpy.types.Material = material_slot.material
if material.node_tree:
for node in material.node_tree.nodes:
if isinstance(node, bpy.types.ShaderNodeOutputMaterial):
for input in node.inputs:
if not input.type == 'SHADER':
continue
if not input.is_linked:
continue
for link in input.links:
if not isinstance(link, bpy.types.NodeLink):
continue
if not link.is_valid:
continue
instance = link.from_node
for render_context in ['MDL', 'USDPreview']:
if util.can_convert_instance_to_data(instance=instance, render_context=render_context):
util.convert_instance_to_data(instance=instance, render_context=render_context)
else:
print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context))
else:
instance = material
for render_context in ['MDL', 'USDPreview']:
if util.can_convert_instance_to_data(instance=instance, render_context=render_context):
util.convert_instance_to_data(instance=instance, render_context=render_context)
else:
print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context))
return {'FINISHED'}
class OT_DataToInstanceConverter(bpy.types.Operator):
bl_idname = 'universalmaterialmap.data_to_instance_converter'
bl_label = 'Universal Material Map Converter Operator'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
render_context = 'Blender'
source_class = 'OmniPBR.mdl|OmniPBR'
sample_data = [
('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)),
('diffuse_texture', ''),
('reflection_roughness_constant', 0.4000000059604645),
('reflectionroughness_texture', ''),
('metallic_constant', 0.0),
('metallic_texture', ''),
('specular_level', 0.5),
('enable_emission', True),
('emissive_color', (0.0, 0.0, 0.0)),
('emissive_color_texture', ''),
('emissive_intensity', 1.0),
('normalmap_texture', ''),
('enable_opacity', True),
('opacity_constant', 1.0),
]
if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data):
converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data)
destination_class = converted_data[0][1]
if util.can_create_instance(class_name=destination_class):
instance = util.create_instance(class_name=destination_class)
print('instance "{0}".'.format(instance))
temp = converted_data[:]
while len(temp):
item = temp.pop(0)
property_name = item[0]
property_value = item[1]
if util.can_set_instance_attribute(instance=instance, name=property_name):
util.set_instance_attribute(instance=instance, name=property_name, value=property_value)
else:
print('Cannot create instance from "{0}".'.format(source_class))
return {'FINISHED'}
class OT_DataToDataConverter(bpy.types.Operator):
bl_idname = 'universalmaterialmap.data_to_data_converter'
bl_label = 'Universal Material Map Converter Operator'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
render_context = 'Blender'
source_class = 'OmniPBR.mdl|OmniPBR'
sample_data = [
('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)),
('diffuse_texture', ''),
('reflection_roughness_constant', 0.4000000059604645),
('reflectionroughness_texture', ''),
('metallic_constant', 0.0),
('metallic_texture', ''),
('specular_level', 0.5),
('enable_emission', True),
('emissive_color', (0.0, 0.0, 0.0)),
('emissive_color_texture', ''),
('emissive_intensity', 1.0),
('normalmap_texture', ''),
('enable_opacity', True),
('opacity_constant', 1.0),
]
if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data):
converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data)
print('converted_data:', converted_data)
else:
print('UMM Failed to convert data. util.can_convert_data_to_data() returned False')
return {'FINISHED'}
class OT_ApplyDataToInstance(bpy.types.Operator):
bl_idname = 'universalmaterialmap.apply_data_to_instance'
bl_label = 'Universal Material Map Apply Data To Instance Operator'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
if not bpy.context:
return {'FINISHED'}
if not bpy.context.active_object:
return {'FINISHED'}
if not bpy.context.active_object.active_material:
return {'FINISHED'}
instance = bpy.context.active_object.active_material
render_context = 'Blender'
source_class = 'OmniPBR.mdl|OmniPBR'
sample_data = [
('albedo_add', 0.02), # Adds a constant value to the diffuse color
('albedo_desaturation', 0.19999999), # Desaturates the diffuse color
('ao_texture', ('', 'raw')),
('ao_to_diffuse', 1), # Controls the amount of ambient occlusion multiplied into the diffuse color channel
('bump_factor', 10), # Strength of normal map
('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)),
('diffuse_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_albedo.png', 'sRGB')),
('diffuse_tint', (0.96202534, 0.8118357, 0.8118357)), # When enabled, this color value is multiplied over the final albedo color
('enable_emission', 0),
('enable_ORM_texture', 1),
('metallic_constant', 1),
('metallic_texture', ('', 'raw')),
('metallic_texture_influence', 1),
('normalmap_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_normal.png', 'raw')),
('ORM_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_orm.png', 'raw')),
('reflection_roughness_constant', 1), # Higher roughness values lead to more blurry reflections
('reflection_roughness_texture_influence', 1), # Blends between the constant value and the lookup of the roughness texture
('reflectionroughness_texture', ('', 'raw')),
('texture_rotate', 45),
('texture_scale', (2, 2)),
('texture_translate', (0.1, 0.9)),
]
if util.can_apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance):
util.apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance)
else:
print('UMM Failed to convert data. util.can_convert_data_to_data() returned False')
return {'FINISHED'}
class OT_CreateTemplateOmniPBR(bpy.types.Operator):
bl_idname = 'universalmaterialmap.create_template_omnipbr'
bl_label = 'Convert to OmniPBR Graph'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
if not bpy.context:
return {'FINISHED'}
if not bpy.context.active_object:
return {'FINISHED'}
if not bpy.context.active_object.active_material:
return {'FINISHED'}
create_template(source_class='OmniPBR', material=bpy.context.active_object.active_material)
return {'FINISHED'}
class OT_CreateTemplateOmniGlass(bpy.types.Operator):
bl_idname = 'universalmaterialmap.create_template_omniglass'
bl_label = 'Convert to OmniGlass Graph'
bl_description = 'Universal Material Map Converter'
def execute(self, context):
if not bpy.context:
return {'FINISHED'}
if not bpy.context.active_object:
return {'FINISHED'}
if not bpy.context.active_object.active_material:
return {'FINISHED'}
create_template(source_class='OmniGlass', material=bpy.context.active_object.active_material)
return {'FINISHED'}
class OT_DescribeShaderGraph(bpy.types.Operator):
bl_idname = 'universalmaterialmap.describe_shader_graph'
bl_label = 'Universal Material Map Describe Shader Graph Operator'
bl_description = 'Universal Material Map'
@staticmethod
def describe_node(node) -> dict:
node_definition = dict()
node_definition['name'] = node.name
node_definition['label'] = node.label
node_definition['location'] = [node.location[0], node.location[1]]
node_definition['width'] = node.width
node_definition['height'] = node.height
node_definition['parent'] = node.parent.name if node.parent else None
node_definition['class'] = type(node).__name__
node_definition['inputs'] = []
node_definition['outputs'] = []
node_definition['nodes'] = []
node_definition['links'] = []
node_definition['properties'] = []
node_definition['texts'] = []
if node_definition['class'] == 'NodeFrame':
node_definition['properties'].append(
{
'name': 'use_custom_color',
'value': node.use_custom_color,
}
)
node_definition['properties'].append(
{
'name': 'color',
'value': [node.color[0], node.color[1], node.color[2]],
}
)
node_definition['properties'].append(
{
'name': 'shrink',
'value': node.shrink,
}
)
if node.text is not None:
text_definition = dict()
text_definition['name'] = node.text.name
text_definition['contents'] = node.text.as_string()
node_definition['texts'].append(text_definition)
elif node_definition['class'] == 'ShaderNodeRGB':
for index, output in enumerate(node.outputs):
definition = dict()
definition['index'] = index
definition['name'] = output.name
definition['class'] = type(output).__name__
if definition['class'] == 'NodeSocketColor':
default_value = output.default_value
definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]]
else:
raise NotImplementedError()
node_definition['outputs'].append(definition)
elif node_definition['class'] == 'ShaderNodeMixRGB':
node_definition['properties'].append(
{
'name': 'blend_type',
'value': node.blend_type,
}
)
node_definition['properties'].append(
{
'name': 'use_clamp',
'value': node.use_clamp,
}
)
for index, input in enumerate(node.inputs):
definition = dict()
definition['index'] = index
definition['name'] = input.name
definition['class'] = type(input).__name__
if definition['class'] == 'NodeSocketFloatFactor':
definition['default_value'] = node.inputs[input.name].default_value
elif definition['class'] == 'NodeSocketColor':
default_value = node.inputs[input.name].default_value
definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]]
else:
raise NotImplementedError()
node_definition['inputs'].append(definition)
elif node_definition['class'] == 'ShaderNodeGroup':
for index, input in enumerate(node.inputs):
definition = dict()
definition['index'] = index
definition['name'] = input.name
definition['class'] = type(input).__name__
if definition['class'] == 'NodeSocketFloatFactor':
definition['min_value'] = node.node_tree.inputs[input.name].min_value
definition['max_value'] = node.node_tree.inputs[input.name].max_value
definition['default_value'] = node.inputs[input.name].default_value
elif definition['class'] == 'NodeSocketIntFactor':
definition['min_value'] = node.node_tree.inputs[input.name].min_value
definition['max_value'] = node.node_tree.inputs[input.name].max_value
definition['default_value'] = node.inputs[input.name].default_value
elif definition['class'] == 'NodeSocketColor':
default_value = node.inputs[input.name].default_value
definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]]
else:
raise NotImplementedError()
node_definition['inputs'].append(definition)
for index, output in enumerate(node.outputs):
definition = dict()
definition['index'] = index
definition['name'] = output.name
definition['class'] = type(output).__name__
node_definition['outputs'].append(definition)
for child in node.node_tree.nodes:
node_definition['nodes'].append(OT_DescribeShaderGraph.describe_node(child))
for link in node.node_tree.links:
if not isinstance(link, bpy.types.NodeLink):
continue
if not link.is_valid:
continue
link_definition = dict()
link_definition['from_node'] = link.from_node.name
link_definition['from_socket'] = link.from_socket.name
link_definition['to_node'] = link.to_node.name
link_definition['to_socket'] = link.to_socket.name
node_definition['links'].append(link_definition)
elif node_definition['class'] == 'ShaderNodeUVMap':
pass
elif node_definition['class'] == 'ShaderNodeTexImage':
pass
elif node_definition['class'] == 'ShaderNodeOutputMaterial':
pass
elif node_definition['class'] == 'ShaderNodeBsdfPrincipled':
pass
elif node_definition['class'] == 'ShaderNodeMapping':
pass
elif node_definition['class'] == 'ShaderNodeNormalMap':
pass
elif node_definition['class'] == 'ShaderNodeHueSaturation':
pass
elif node_definition['class'] == 'ShaderNodeSeparateRGB':
pass
elif node_definition['class'] == 'NodeGroupInput':
pass
elif node_definition['class'] == 'NodeGroupOutput':
pass
elif node_definition['class'] == 'ShaderNodeMath':
node_definition['properties'].append(
{
'name': 'operation',
'value': node.operation,
}
)
node_definition['properties'].append(
{
'name': 'use_clamp',
'value': node.use_clamp,
}
)
elif node_definition['class'] == 'ShaderNodeVectorMath':
node_definition['properties'].append(
{
'name': 'operation',
'value': node.operation,
}
)
else:
raise NotImplementedError(node_definition['class'])
return node_definition
def execute(self, context):
material = bpy.context.active_object.active_material
output = dict()
output['name'] = 'Principled Omni Glass'
output['nodes'] = []
output['links'] = []
for node in material.node_tree.nodes:
output['nodes'].append(OT_DescribeShaderGraph.describe_node(node))
for link in material.node_tree.links:
if not isinstance(link, bpy.types.NodeLink):
continue
if not link.is_valid:
continue
link_definition = dict()
link_definition['from_node'] = link.from_node.name
link_definition['from_socket'] = link.from_socket.name
link_definition['to_node'] = link.to_node.name
link_definition['to_socket'] = link.to_socket.name
output['links'].append(link_definition)
print(json.dumps(output, indent=4))
return {'FINISHED'}
def initialize():
if getattr(sys.modules[__name__], '__initialized'):
return
setattr(sys.modules[__name__], '__initialized', True)
util.register(converter=DataConverter())
util.register(converter=ObjectConverter())
print('Universal Material Map: Registered Converter classes.')
initialize()
| 67,817 | Python | 49.724009 | 263 | 0.552177 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/material.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import traceback
import bpy
from ..core.converter import util
def apply_data_to_instance(instance_name: str, source_class: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> dict:
## bugfix: Extract class correctly from exporters that name the class like a Python function call.
real_source_class = source_class.partition("(")[0]
try:
for material in bpy.data.materials:
if not isinstance(material, bpy.types.Material):
continue
if material.name == instance_name:
if util.can_apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material):
return util.apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material)
print(f'Omniverse UMM: Unable to apply data at import for material "{instance_name}". This is not an error - just means that conversion data does not support the material.')
result = dict()
result['umm_notification'] = 'incomplete_process'
result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there is no Conversion Graph for that scenario. No changes were applied to "{2}".'.format(real_source_class, render_context, instance_name)
return result
except Exception as error:
print('Warning: Universal Material Map: function "apply_data_to_instance": Unexpected error:')
print('\targument "instance_name" = "{0}"'.format(instance_name))
print('\targument "source_class" = "{0}"'.format(real_source_class))
print('\targument "render_context" = "{0}"'.format(render_context))
print('\targument "source_data" = "{0}"'.format(source_data))
print('\terror: {0}'.format(error))
print('\tcallstack: {0}'.format(traceback.format_exc()))
result = dict()
result['umm_notification'] = 'unexpected_error'
result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there was an unexpected error. Some changes may have been applied to "{2}". Details: {3}'.format(real_source_class, render_context, instance_name, error)
return result
def convert_instance_to_data(instance_name: str, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]:
try:
for material in bpy.data.materials:
if not isinstance(material, bpy.types.Material):
continue
if material.name == instance_name:
if util.can_convert_instance_to_data(instance=material, render_context=render_context):
return util.convert_instance_to_data(instance=material, render_context=render_context)
result = dict()
result['umm_notification'] = 'incomplete_process'
result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context)
return result
except Exception as error:
print('Warning: Universal Material Map: function "convert_instance_to_data": Unexpected error:')
print('\targument "instance_name" = "{0}"'.format(instance_name))
print('\targument "render_context" = "{0}"'.format(render_context))
print('\terror: {0}'.format(error))
print('\tcallstack: {0}'.format(traceback.format_exc()))
result = dict()
result['umm_notification'] = 'unexpected_error'
result['message'] = 'Not able to convert material "{0}" for render context "{1}" there was an unexpected error. Details: {2}'.format(instance_name, render_context, error)
return result
result = dict()
result['umm_notification'] = 'incomplete_process'
result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context)
return result
| 5,004 | Python | 57.197674 | 246 | 0.670464 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import os
import re
import sys
import json
import bpy
from ..core.data import Library
from ..core.feature import POLLING
from ..core.service import store
from ..core.service import delegate
from ..core.util import get_extension_from_image_file_format
LIBRARY_ID = '195c69e1-7765-4a16-bb3a-ecaa222876d9'
__initialized = False
developer_mode: bool = False
CORE_MATERIAL_PROPERTIES = [
('diffuse_color', 'RGBA'),
('metallic', 'VALUE'),
('specular_color', 'STRING'),
('roughness', 'VALUE'),
('use_backface_culling', 'BOOLEAN'),
('blend_method', 'STRING'),
('shadow_method', 'STRING'),
('alpha_threshold', 'VALUE'),
('use_screen_refraction', 'BOOLEAN'),
('refraction_depth', 'VALUE'),
('use_sss_translucency', 'BOOLEAN'),
('pass_index', 'INT'),
]
def show_message(message: str = '', title: str = 'Message Box', icon: str = 'INFO'):
try:
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)
except:
print('{0}\n{1}'.format(title, message))
def initialize():
if getattr(sys.modules[__name__], '__initialized'):
return
setattr(sys.modules[__name__], '__initialized', True)
directory = os.path.expanduser('~').replace('\\', '/')
if not directory.endswith('/Documents'):
directory = '{0}/Documents'.format(directory)
directory = '{0}/Omniverse/Blender/UMMLibrary'.format(directory)
library = Library.Create(
library_id=LIBRARY_ID,
name='Blender',
manifest=delegate.FilesystemManifest(root_directory='{0}'.format(directory)),
conversion_graph=delegate.Filesystem(root_directory='{0}/ConversionGraph'.format(directory)),
target=delegate.Filesystem(root_directory='{0}/Target'.format(directory)),
)
store.register_library(library=library)
from ..blender import converter
converter.initialize()
from ..blender import generator
generator.initialize()
if POLLING:
# TODO: On application exit > un_initialize()
pass
def un_initialize():
if POLLING:
store.on_shutdown()
def get_library():
"""
:return: omni.universalmaterialmap.core.data.Library
"""
initialize()
return store.get_library(library_id=LIBRARY_ID)
def __get_value_impl(socket: bpy.types.NodeSocketStandard, depth=0, max_depth=100) -> typing.Any:
# Local utility function which returns a file extension
# corresponding to the given image file format string.
# This mimics similar logic used in the Blender USD IO
# C++ implementation.
debug = False
if debug:
print('__get_value_impl: depth={0}'.format(depth))
if depth > max_depth:
if debug:
print('\t reached max_depth ({0}). terminating recursion'.format(max_depth))
return None
if debug:
print('\tsocket.is_linked'.format(socket.is_linked))
if socket.is_linked:
for link in socket.links:
if not isinstance(link, bpy.types.NodeLink):
if debug:
print('\t\tlink is not bpy.types.NodeLink: {0}'.format(type(link)))
continue
if not link.is_valid:
if debug:
print('\t\tlink is not valid')
continue
instance = link.from_node
if debug:
print('\t\tlink.from_node: {0}'.format(type(instance)))
if isinstance(instance, bpy.types.ShaderNodeTexImage):
print(f'UMM: image.filepath: "{instance.image.filepath}"')
print(f'UMM: image.source: "{instance.image.source}"')
print(f'UMM: image.file_format: "{instance.image.file_format}"')
if debug:
print('\t\tinstance.image: {0}'.format(instance.image))
if instance.image:
print('\t\tinstance.image.source: {0}'.format(instance.image.source))
if instance.image and (instance.image.source == 'FILE' or instance.image.source == 'TILED'):
value = instance.image.filepath
if (instance.image.source == 'TILED'):
# Find all numbers in the path.
numbers = re.findall('[0-9]+', value)
if (len(numbers) > 0):
# Get the string representation of the last number.
num_str = str(numbers[-1])
# Replace the number substring with '<UDIM>'.
split_items = value.rsplit(num_str, 1)
if (len(split_items)==2):
value = split_items[0] + '<UDIM>' + split_items[1]
if debug:
print('\t\tinstance.image.filepath: {0}'.format(value))
try:
if value and instance.image.packed_file:
# The image is packed, so ignore the filepath, which is likely
# invalid, and return just the base name.
value = bpy.path.basename(value)
# Make sure the file has a valid extension for
# the expected format.
file_format = instance.image.file_format
file_format = get_extension_from_image_file_format(file_format, base_name=value)
value = bpy.path.ensure_ext(value, '.' + file_format)
print(f'UMM: packed image data: "{[value, instance.image.colorspace_settings.name]}"')
return [value, instance.image.colorspace_settings.name]
if value is None or value == '':
file_format = instance.image.file_format
file_format = get_extension_from_image_file_format(file_format)
value = f'{instance.image.name}.{file_format}'
if debug:
print(f'\t\tvalue: {value}')
print(f'UMM: image data: "{[value, instance.image.colorspace_settings.name]}"')
return [value, instance.image.colorspace_settings.name]
return [os.path.abspath(bpy.path.abspath(value)), instance.image.colorspace_settings.name]
except Exception as error:
print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(instance.image.filepath, error))
return None
if isinstance(instance, bpy.types.ShaderNodeNormalMap):
for o in instance.inputs:
if o.name == 'Color':
value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth)
if value:
return value
for o in instance.inputs:
value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth)
if debug:
print('\t\tre-entrant: input="{0}", value="{1}"'.format(o.name, value))
if value:
return value
return None
def get_value(socket: bpy.types.NodeSocketStandard) -> typing.Any:
debug = False
value = __get_value_impl(socket=socket)
if debug:
print('get_value', value, socket.default_value)
return socket.default_value if not value else value
def _create_node_from_template(node_tree: bpy.types.NodeTree, node_definition: dict, parent: object = None) -> object:
node = node_tree.nodes.new(node_definition['class'])
if parent:
node.parent = parent
node.name = node_definition['name']
node.label = node_definition['label']
node.location = node_definition['location']
if node_definition['class'] == 'NodeFrame':
node.width = node_definition['width']
node.height = node_definition['height']
for o in node_definition['properties']:
setattr(node, o['name'], o['value'])
if node_definition['class'] == 'NodeFrame':
for text_definition in node_definition['texts']:
existing = None
for o in bpy.data.texts:
if o.name == text_definition['name']:
existing = o
break
if existing is None:
existing = bpy.data.texts.new(text_definition['name'])
existing.write(text_definition['contents'])
node.text = existing
node.location = node_definition['location']
elif node_definition['class'] == 'ShaderNodeGroup':
node.node_tree = bpy.data.node_groups.new('node tree', 'ShaderNodeTree')
child_cache = dict()
for child_definition in node_definition['nodes']:
child_cache[child_definition['name']] = _create_node_from_template(node_tree=node.node_tree, node_definition=child_definition)
for input_definition in node_definition['inputs']:
node.node_tree.inputs.new(input_definition['class'], input_definition['name'])
if input_definition['class'] == 'NodeSocketFloatFactor':
node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value']
node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value']
node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value']
node.inputs[input_definition['name']].default_value = input_definition['default_value']
if input_definition['class'] == 'NodeSocketIntFactor':
node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value']
node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value']
node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value']
node.inputs[input_definition['name']].default_value = input_definition['default_value']
if input_definition['class'] == 'NodeSocketColor':
node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value']
node.inputs[input_definition['name']].default_value = input_definition['default_value']
for output_definition in node_definition['outputs']:
node.node_tree.outputs.new(output_definition['class'], output_definition['name'])
for link_definition in node_definition['links']:
from_node = child_cache[link_definition['from_node']]
from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0]
to_node = child_cache[link_definition['to_node']]
to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0]
node.node_tree.links.new(from_socket, to_socket)
node.width = node_definition['width']
node.height = node_definition['height']
node.location = node_definition['location']
elif node_definition['class'] == 'ShaderNodeMixRGB':
for input_definition in node_definition['inputs']:
if input_definition['class'] == 'NodeSocketFloatFactor':
node.inputs[input_definition['name']].default_value = input_definition['default_value']
if input_definition['class'] == 'NodeSocketColor':
node.inputs[input_definition['name']].default_value = input_definition['default_value']
elif node_definition['class'] == 'ShaderNodeRGB':
for output_definition in node_definition['outputs']:
if output_definition['class'] == 'NodeSocketColor':
node.outputs[output_definition['name']].default_value = output_definition['default_value']
return node
def create_template(source_class: str, material: bpy.types.Material) -> None:
template_filepath = '{}'.format(__file__).replace('\\', '/')
template_filepath = template_filepath[:template_filepath.rfind('/')]
template_filepath = '{}/template/{}.json'.format(template_filepath, source_class.lower())
if not os.path.exists(template_filepath):
return
with open(template_filepath, 'r') as template_file:
template = json.load(template_file)
# Make sure we're using nodes.
material.use_nodes = True
# Remove existing nodes - we're starting from scratch.
to_delete = [o for o in material.node_tree.nodes]
while len(to_delete):
material.node_tree.nodes.remove(to_delete.pop())
# Create nodes according to template.
child_cache = dict()
for node_definition in template['nodes']:
if node_definition['parent'] is None:
node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition)
child_cache[node_definition['name']] = node
for node_definition in template['nodes']:
if node_definition['parent'] is not None:
parent = child_cache[node_definition['parent']]
node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent)
child_cache[node_definition['name']] = node
for link_definition in template['links']:
from_node = child_cache[link_definition['from_node']]
from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0]
to_node = child_cache[link_definition['to_node']]
to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0]
material.node_tree.links.new(from_socket, to_socket)
def create_from_template(material: bpy.types.Material, template: dict) -> None:
# Make sure we're using nodes.
material.use_nodes = True
# Create nodes according to template.
child_cache = dict()
for node_definition in template['nodes']:
if node_definition['parent'] is None:
node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition)
child_cache[node_definition['name']] = node
for node_definition in template['nodes']:
if node_definition['parent'] is not None:
parent = child_cache[node_definition['parent']]
node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent)
child_cache[node_definition['name']] = node
for link_definition in template['links']:
from_node = child_cache[link_definition['from_node']]
from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0]
to_node = child_cache[link_definition['to_node']]
to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0]
material.node_tree.links.new(from_socket, to_socket)
def get_parent_material(shader_node: object) -> bpy.types.Material:
for material in bpy.data.materials:
if shader_node == material:
return material
if not material.use_nodes:
continue
if not material.node_tree or not material.node_tree.nodes:
continue
for node in material.node_tree.nodes:
if shader_node == node:
return material
return None
def get_template_data_by_shader_node(shader_node: object) -> typing.Tuple[typing.Dict, typing.Dict, str, bpy.types.Material]:
material: bpy.types.Material = get_parent_material(shader_node=shader_node)
if material and material.use_nodes and material.node_tree and material.node_tree.nodes:
template_directory = '{}'.format(__file__).replace('\\', '/')
template_directory = template_directory[:template_directory.rfind('/')]
template_directory = f'{template_directory}/template'
for item in os.listdir(template_directory):
if item.lower().endswith('_map.json'):
continue
if not item.lower().endswith('.json'):
continue
template_filepath = f'{template_directory}/{item}'
with open(template_filepath, 'r') as template_file:
template = json.load(template_file)
material_has_all_template_nodes = True
for node_definition in template['nodes']:
found_node = False
for node in material.node_tree.nodes:
if node.name == node_definition['name']:
found_node = True
break
if not found_node:
material_has_all_template_nodes = False
break
if not material_has_all_template_nodes:
continue
template_has_all_material_nodes = True
for node in material.node_tree.nodes:
found_template = False
for node_definition in template['nodes']:
if node.name == node_definition['name']:
found_template = True
break
if not found_template:
template_has_all_material_nodes = False
break
if not template_has_all_material_nodes:
continue
template_shader_name = template['name']
map_filename = '{}_map.json'.format(item[:item.rfind('.')])
template_map_filepath = f'{template_directory}/{map_filename}'
with open(template_map_filepath, 'r') as template_map_file:
template_map = json.load(template_map_file)
return template, template_map, template_shader_name, material
return None, None, None, None
def get_template_data_by_class_name(class_name: str) -> typing.Tuple[typing.Dict, typing.Dict]:
template_directory = '{}'.format(__file__).replace('\\', '/')
template_directory = template_directory[:template_directory.rfind('/')]
template_directory = f'{template_directory}/template'
for item in os.listdir(template_directory):
if item.lower().endswith('_map.json'):
continue
if not item.lower().endswith('.json'):
continue
template_filepath = f'{template_directory}/{item}'
with open(template_filepath, 'r') as template_file:
template = json.load(template_file)
if not template['name'] == class_name:
continue
map_filename = '{}_map.json'.format(item[:item.rfind('.')])
template_map_filepath = f'{template_directory}/{map_filename}'
with open(template_map_filepath, 'r') as template_map_file:
template_map = json.load(template_map_file)
return template, template_map
return None, None
| 19,919 | Python | 43.663677 | 172 | 0.599377 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/menu.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from . import developer_mode
class UniversalMaterialMapMenu(bpy.types.Menu):
bl_label = "Omniverse"
bl_idname = "OBJECT_MT_umm_node_menu"
def draw(self, context):
layout = self.layout
layout.operator('universalmaterialmap.create_template_omnipbr', text='Replace with OmniPBR graph template')
layout.operator('universalmaterialmap.create_template_omniglass', text='Replace with OmniGlass graph template')
if developer_mode:
layout.operator('universalmaterialmap.generator', text='DEV: Generate Targets')
layout.operator('universalmaterialmap.instance_to_data_converter', text='DEV: Convert Instance to Data')
layout.operator('universalmaterialmap.data_to_instance_converter', text='DEV: Convert Data to Instance')
layout.operator('universalmaterialmap.data_to_data_converter', text='DEV: Convert Data to Data')
layout.operator('universalmaterialmap.apply_data_to_instance', text='DEV: Apply Data to Instance')
layout.operator('universalmaterialmap.describe_shader_graph', text='DEV: Describe Shader Graph')
| 1,999 | Python | 45.511627 | 119 | 0.724362 |
NVIDIA-Omniverse/kit-app-template/repo.toml | ########################################################################################################################
# Repo tool base settings
########################################################################################################################
[repo]
# Use the Kit Template repo configuration as a base. Only override things specific to the repo.
import_configs = [
"${root}/_repo/deps/repo_kit_tools/kit-template/repo.toml",
"${root}/_repo/deps/repo_kit_tools/kit-template/repo-external-app.toml",
]
# Repository Name
name = "kit-app-template"
########################################################################################################################
# Extensions precacher
########################################################################################################################
[repo_precache_exts]
# Apps to run and precache
apps = [
"${root}/source/apps/omni.usd_explorer.kit",
"${root}/source/apps/my_name.my_app.kit",
]
registries = [
{ name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/shared" },
{ name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" },
]
| 1,265 | TOML | 37.363635 | 136 | 0.422925 |
NVIDIA-Omniverse/kit-app-template/README.md | # Omniverse Kit App Template
[Omniverse Kit App Template](https://github.com/NVIDIA-Omniverse/kit-app-template) - is the place to start learning about developing Omniverse Apps.
This project contains everything necessary to develop and package an Omniverse App.
## Links
* Recommended: [Tutorial](https://docs.omniverse.nvidia.com/kit/docs/kit-app-template) for
getting started with application development.
* [Developer Guide](https://docs.omniverse.nvidia.com/dev-guide/latest/index.html).
## Build
1. Clone [this repo](https://github.com/NVIDIA-Omniverse/kit-app-template) to your local machine.
2. Open a command prompt and navigate to the root of your cloned repo.
3. Run `build.bat` to bootstrap your dev environment and build an example app.
4. Run `_build\windows-x86_64\release\my_name.my_app.bat` (or other apps) to open an example kit application.
You should have now launched your simple kit-based application!
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
| 1,048 | Markdown | 44.608694 | 148 | 0.781489 |
NVIDIA-Omniverse/kit-app-template/tools/deps/repo-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="repo_man" linkPath="../../_repo/deps/repo_man">
<package name="repo_man" version="1.50.6"/>
</dependency>
<dependency name="repo_build" linkPath="../../_repo/deps/repo_build">
<package name="repo_build" version="0.60.1"/>
</dependency>
<dependency name="repo_ci" linkPath="../../_repo/deps/repo_ci">
<package name="repo_ci" version="0.6.0" />
</dependency>
<dependency name="repo_changelog" linkPath="../../_repo/deps/repo_changelog">
<package name="repo_changelog" version="0.3.13"/>
</dependency>
<dependency name="repo_docs" linkPath="../../_repo/deps/repo_docs">
<package name="repo_docs" version="0.39.2"/>
</dependency>
<dependency name="repo_kit_tools" linkPath="../../_repo/deps/repo_kit_tools">
<package name="repo_kit_tools" version="0.14.17"/>
</dependency>
<dependency name="repo_test" linkPath="../_repo/deps/repo_test">
<package name="repo_test" version="2.16.1" />
</dependency>
<dependency name="repo_source" linkPath="../../_repo/deps/repo_source">
<package name="repo_source" version="0.4.3" />
</dependency>
<dependency name="repo_package" linkPath="../../_repo/deps/repo_package">
<package name="repo_package" version="5.9.3" />
</dependency>
<dependency name="repo_format" linkPath="../../_repo/deps/repo_format">
<package name="repo_format" version="2.8.0" />
</dependency>
<dependency name="repo_kit_template" linkPath="../../_repo/deps/repo_kit_template">
<package name="repo_kit_template" version="0.1.9" />
</dependency>
</project>
| 1,593 | XML | 43.277777 | 85 | 0.648462 |
NVIDIA-Omniverse/kit-app-template/tools/deps/kit-sdk.packman.xml | <project toolsVersion="5.0">
<dependency name="kit_sdk_${config}" linkPath="../../_build/${platform}/${config}/kit" tags="${config} non-redist">
<package name="kit-kernel" version="105.1.2+release.134727.de96b556.tc.${platform}.${config}"/>
</dependency>
</project>
| 274 | XML | 44.833326 | 117 | 0.664234 |
NVIDIA-Omniverse/kit-app-template/tools/deps/user.toml | [exts."omni.kit.registry.nucleus"]
registries = [
{ name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/shared" },
{ name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" },
]
| 296 | TOML | 48.499992 | 136 | 0.675676 |
NVIDIA-Omniverse/kit-app-template/tools/deps/kit-sdk-deps.packman.xml | <project toolsVersion="5.0">
<!-- Only edit this file to pull kit depedencies. -->
<!-- Put all extension-specific dependencies in `ext-deps.packman.xml`. -->
<!-- This file contains shared Kit SDK dependencies used by most kit extensions. -->
<!-- Import Kit SDK all-deps xml file to steal some deps from it: -->
<import path="../../_build/${platform}/${config}/kit/dev/all-deps.packman.xml">
<filter include="pybind11" />
<filter include="fmt" />
<filter include="python" />
<filter include="carb_sdk_plugins" />
<filter include="winsdk" />
</import>
<!-- Pull those deps of the same version as in Kit SDK. Override linkPath to point correctly, other properties can also be override, including version. -->
<dependency name="carb_sdk_plugins" linkPath="../../_build/target-deps/carb_sdk_plugins" tags="non-redist" />
<dependency name="pybind11" linkPath="../../_build/target-deps/pybind11" />
<dependency name="fmt" linkPath="../../_build/target-deps/fmt" />
<dependency name="python" linkPath="../../_build/target-deps/python" />
<!-- Import host deps from Kit SDK to keep in sync -->
<import path="../../_build/${platform}/${config}/kit/dev/deps/host-deps.packman.xml">
<filter include="premake" />
<filter include="msvc" />
<filter include="linbuild" />
</import>
<dependency name="premake" linkPath="../../_build/host-deps/premake" />
<dependency name="msvc" linkPath="../../_build/host-deps/msvc" />
<dependency name="winsdk" linkPath="../../_build/host-deps/winsdk" />
<dependency name="linbuild" linkPath="../../_build/host-deps/linbuild" tags="non-redist"/>
</project>
| 1,651 | XML | 49.060605 | 157 | 0.660206 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.32"
# The title and description fields are primarily for displaying extension info in UI
title = "Setup Extension for USD Explorer"
description = "an extensions that Setup my App"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://gitlab-master.nvidia.com/omniverse/usd_explorer"
# One of categories for UI.
category = "setup"
# Keywords for the extension
keywords = ["kit", "app", "setup"]
# Icon to show in the extension manager
icon = "data/icon.png"
# Preview to show in the extension manager
preview_image = "data/preview.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.quicklayout" = {}
"omni.kit.window.title" = {}
"omni.kit.browser.asset" = {}
"omni.kit.window.console" = {}
"omni.kit.window.content_browser" = {}
"omni.kit.window.material" = {}
"omni.kit.window.toolbar" = {version = "1.5.4", exact = true}
"omni.kit.property.bundle" = {}
"omni.kit.property.layer" = {}
"omni.kit.viewport.navigation.usd_explorer.bundle" = {}
"omni.kit.window.preferences" = {}
# from omni.view.app.setup
"omni.kit.viewport.menubar.camera" = { optional=true }
"omni.kit.widget.layers" = { optional=true }
"omni.kit.widgets.custom" = {}
"omni.kit.window.file" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.usd_explorer.setup"
[settings]
app.layout.name = "viewport_only"
app.application_mode = "review"
exts."omni.kit.viewport.menubar.camera".expand = true # Expand the extra-camera settings by default
exts."omni.kit.window.file".useNewFilePicker = true
exts."omni.kit.tool.asset_importer".useNewFilePicker = true
exts."omni.kit.tool.collect".useNewFilePicker = true
exts."omni.kit.widget.layers".useNewFilePicker = true
exts."omni.kit.renderer.core".imgui.enableMips = true
exts."omni.kit.browser.material".enabled = false
exts."omni.kit.window.material".load_after_startup = true
exts."omni.kit.widget.cloud_share".require_access_code = false
exts."omni.kit.mesh.raycast".bvhBuildOnFirstRequired = true # Avoids mesh raycast to initialize during stage open
app.content.emptyStageOnStart = true
app.viewport.createCameraModelRep = false # Disable creation of camera meshes in USD
# USDRT
app.usdrt.scene_delegate.enableProxyCubes = false
app.usdrt.scene_delegate.geometryStreaming.enabled = true
app.usdrt.scene_delegate.numFramesBetweenLoadBatches = 2
app.usdrt.scene_delegate.geometryStreaming.numberOfVerticesToLoadPerChunk = 600000
exts."omni.kit.viewport.navigation.camera_manipulator".defaultOperation = ""
[[test]]
dependencies = [
"omni.kit.core.tests",
"omni.kit.ui_test",
"omni.kit.mainwindow",
"omni.kit.viewport.window",
"omni.kit.viewport.utility",
]
args = [
"--/app/file/ignoreUnsavedOnExit=true",
# "--/renderer/enabled=pxr",
# "--/renderer/active=pxr",
"--/app/window/width=1280",
"--/app/window/height=720",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/exts/omni.kit.viewport.window/startup/windowName=Viewport",
"--reset-user",
"--no-window",
"--/app/fastShutdown=1"
]
| 3,294 | TOML | 33.322916 | 113 | 0.728597 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menubar_helper.py | from pathlib import Path
import carb
import carb.settings
import carb.tokens
import omni.ui as ui
from omni.ui import color as cl
ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons")
VIEW_MENUBAR_STYLE = {
"MenuBar.Window": {"background_color": 0xA0000000},
"MenuBar.Item.Background": { "background_color": 0, },
"Menu.Item.Background": { "background_color": 0, }
}
VIEWPORT_CAMERA_STYLE = {
"Menu.Item.Icon::Expand": {"image_url": f"{ICON_PATH}/caret_s2_right_dark.svg", "color": cl.viewport_menubar_light},
"Menu.Item.Icon::Expand:checked": {"image_url": f"{ICON_PATH}/caret_s2_left_dark.svg"},
}
class MenubarHelper:
def __init__(self) -> None:
self._settings = carb.settings.get_settings()
# Set menubar background and style
try:
from omni.kit.viewport.menubar.core import DEFAULT_MENUBAR_NAME
from omni.kit.viewport.menubar.core import get_instance as get_menubar_instance
instance = get_menubar_instance()
if not instance: # pragma: no cover
return
default_menubar = instance.get_menubar(DEFAULT_MENUBAR_NAME)
default_menubar.background_visible = True
default_menubar.style.update(VIEW_MENUBAR_STYLE)
default_menubar.show_separator = True
except ImportError: # pragma: no cover
carb.log_warn("Viewport menubar not found!")
try:
import omni.kit.viewport.menubar.camera
self._camera_menubar_instance = omni.kit.viewport.menubar.camera.get_instance()
if not self._camera_menubar_instance: # pragma: no cover
return
# Change expand button icon
self._camera_menubar_instance._camera_menu._style.update(VIEWPORT_CAMERA_STYLE)
# New menu item for camera speed
self._camera_menubar_instance.register_menu_item(self._create_camera_speed, order=100)
# OM-76591 - Removing "Create from view" item - Bob
self._camera_menubar_instance.deregister_menu_item(self._camera_menubar_instance._camera_menu._build_create_camera)
except ImportError:
carb.log_warn("Viewport menubar not found!")
self._camera_menubar_instance = None
except AttributeError: # pragma: no cover
self._camera_menubar_instance = None
# Hide default render and settings menubar
self._settings.set("/persistent/exts/omni.kit.viewport.menubar.render/visible", False)
self._settings.set("/persistent/exts/omni.kit.viewport.menubar.settings/visible", False)
def destroy(self) -> None:
if self._camera_menubar_instance:
self._camera_menubar_instance.deregister_menu_item(self._create_camera_speed)
def _create_camera_speed(self, _vc, _r: ui.Menu) -> None:
from omni.kit.viewport.menubar.core import SettingModel, SliderMenuDelegate
ui.MenuItem(
"Speed",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=SettingModel("/persistent/app/viewport/camMoveVelocity", draggable=True),
min=self._settings.get_as_float("/persistent/app/viewport/camVelocityMin") or 0.01,
max=self._settings.get_as_float("/persistent/app/viewport/camVelocityMax"),
tooltip="Set the Fly Mode navigation speed",
width=0,
reserve_status=True,
),
)
| 3,517 | Python | 42.974999 | 127 | 0.642593 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/__init__.py | from .setup import *
| 21 | Python | 9.999995 | 20 | 0.714286 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/setup.py | import asyncio
import weakref
from functools import partial
import os
from pathlib import Path
from typing import cast, Optional
import omni.client
import omni.ext
import omni.kit.menu.utils
import omni.kit.app
import omni.kit.context_menu
import omni.kit.ui
import omni.usd
from omni.kit.quicklayout import QuickLayout
from omni.kit.menu.utils import MenuLayout
from omni.kit.window.title import get_main_window_title
from omni.kit.usd.layers import LayerUtils
from omni.kit.viewport.menubar.core import get_instance as get_mb_inst, DEFAULT_MENUBAR_NAME
from omni.kit.viewport.menubar.core.viewport_menu_model import ViewportMenuModel
from omni.kit.viewport.utility import get_active_viewport, get_active_viewport_window, disable_selection
import carb
import carb.settings
import carb.dictionary
import carb.events
import carb.tokens
import carb.input
import omni.kit.imgui as _imgui
from pxr import Sdf, Usd
from .navigation import Navigation
from .menu_helper import MenuHelper
from .menubar_helper import MenubarHelper
from .stage_template import SunnySkyStage
from .ui_state_manager import UIStateManager
SETTINGS_PATH_FOCUSED = "/app/workspace/currentFocused"
APPLICATION_MODE_PATH = "/app/application_mode"
MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active"
CURRENT_TOOL_PATH = "/app/viewport/currentTool"
ROOT_WINDOW_NAME = "DockSpace"
ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons")
SETTINGS_STARTUP_EXPAND_VIEWPORT = "/app/startup/expandViewport"
VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu"
TELEPORT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/visible"
async def _load_layout_startup(layout_file: str, keep_windows_open: bool=False) -> None:
try:
# few frames delay to avoid the conflict with the layout of omni.kit.mainwindow
for i in range(3):
await omni.kit.app.get_app().next_update_async() # type: ignore
QuickLayout.load_file(layout_file, keep_windows_open)
# WOR: some layout don't happy collectly the first time
await omni.kit.app.get_app().next_update_async() # type: ignore
QuickLayout.load_file(layout_file, keep_windows_open)
except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception)
carb.log_warn(f"Failed to load layout {layout_file}: {exc}")
async def _load_layout(layout_file: str, keep_windows_open:bool=False) -> None:
try:
# few frames delay to avoid the conflict with the layout of omni.kit.mainwindow
for i in range(3):
await omni.kit.app.get_app().next_update_async() # type: ignore
QuickLayout.load_file(layout_file, keep_windows_open)
except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception)
carb.log_warn(f"Failed to load layout {layout_file}: {exc}")
async def _clear_startup_scene_edits() -> None:
try:
for i in range(50): # This could possibly be a smaller value. I want to ensure this happens after RTX startup
await omni.kit.app.get_app().next_update_async() # type: ignore
omni.usd.get_context().set_pending_edit(False)
except Exception as exc: # pragma: no cover
carb.log_warn(f"Failed to clear stage edits on startup: {exc}")
# This extension is mostly loading the Layout updating menu
class SetupExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
@property
def _app(self):
return omni.kit.app.get_app()
@property
def _settings(self):
return carb.settings.get_settings()
def on_startup(self, ext_id: str) -> None:
self._ext_id = ext_id
self._menubar_helper = MenubarHelper()
self._menu_helper = MenuHelper()
# using imgui directly to adjust some color and Variable
imgui = _imgui.acquire_imgui()
# match Create overides
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrab, carb.Float4(0.4, 0.4, 0.4, 1))
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabHovered, carb.Float4(0.6, 0.6, 0.6, 1))
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabActive, carb.Float4(0.8, 0.8, 0.8, 1))
# DockSplitterSize is the variable that drive the size of the Dock Split connection
imgui.push_style_var_float(_imgui.StyleVar.DockSplitterSize, 2)
# setup the Layout for your app
self._layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts")
layout_file = Path(self._layouts_path).joinpath(f"{self._settings.get('/app/layout/name')}.json")
self.__setup_window_task = asyncio.ensure_future(_load_layout_startup(f"{layout_file}", True))
self.review_layout_path = str(Path(self._layouts_path) / "comment_layout.json")
self.default_layout_path = str(Path(self._layouts_path) / "default.json")
self.layout_user_path = str(Path(self._layouts_path) / "layout_user.json")
# remove the user defined layout so that we always load the default layout when startup
if os.path.exists(self.layout_user_path):
os.remove(self.layout_user_path)
# setup the menu and their layout
self._current_layout_priority = 0
self._layout_menu_items = []
self._layout_file_menu()
self._menu_layout = []
if self._settings.get_as_bool('/app/view/debug/menus'):
self._layout_menu()
# setup the Application Title
window_title = get_main_window_title()
if window_title:
window_title.set_app_version(self._settings.get_as_string("/app/titleVersion"))
# self._context_menu()
self._register_my_menu()
self._navigation = Navigation()
self._navigation.on_startup(ext_id)
self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events(
APPLICATION_MODE_PATH, weakref.proxy(self)._on_application_mode_changed
)
self._set_viewport_menubar_visibility(False)
self._test = asyncio.ensure_future(_clear_startup_scene_edits())
# OM-95865: Ensure teleport on by default.
self._usd_context = omni.usd.get_context()
self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_open_event, name="TeleportDefaultOn"
)
if self._settings.get_as_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT):
self._set_viewport_fill_on()
self._stage_templates = [SunnySkyStage()]
disable_selection(get_active_viewport())
self._ui_state_manager = UIStateManager()
self._setup_ui_state_changes()
omni.kit.menu.utils.add_layout([
MenuLayout.Menu("Window", [
MenuLayout.Item("Viewport", source="Window/Viewport/Viewport 1"),
MenuLayout.Item("Playlist", remove=True),
MenuLayout.Item("Layout", remove=True),
MenuLayout.Item("" if any(v in self._app.get_app_version() for v in ("alpha", "beta")) else "Extensions", remove=True),
MenuLayout.Sort(exclude_items=["Extensions"], sort_submenus=True),
])
])
def show_documentation(*x):
import webbrowser
webbrowser.open("http://docs.omniverse.nvidia.com/explorer")
self._help_menu_items = [
omni.kit.menu.utils.MenuItemDescription(name="Documentation",
onclick_fn=show_documentation,
appear_after=[omni.kit.menu.utils.MenuItemOrder.FIRST])
]
omni.kit.menu.utils.add_menu_items(self._help_menu_items, name="Help")
def _on_stage_open_event(self, event: carb.events.IEvent) -> None:
if event.type == int(omni.usd.StageEventType.OPENED):
app_mode = self._settings.get_as_string(APPLICATION_MODE_PATH).lower()
# exit all tools
self._settings.set(CURRENT_TOOL_PATH, "none")
# OM-95865, OMFP-1993: Activate Teleport upon scene load ...
# OMFP-2743: ... but only when in Review mode.
if app_mode == "review":
asyncio.ensure_future(self._stage_post_open_teleport_toggle())
# toggle RMB viewport context menu based on application mode
value = False if app_mode == "review" else True
self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, value)
# teleport is activated after loading a stage and app is in Review mode
async def _stage_post_open_teleport_toggle(self) -> None:
await self._app.next_update_async()
if hasattr(self, "_usd_context") and self._usd_context is not None and not self._usd_context.is_new_stage():
self._settings.set("/exts/omni.kit.viewport.navigation.core/activeOperation", "teleport")
def _set_viewport_fill_on(self) -> None:
vp_window = get_active_viewport_window()
vp_widget = vp_window.viewport_widget if vp_window else None
if vp_widget:
vp_widget.expand_viewport = True
def _set_viewport_menubar_visibility(self, show: bool) -> None:
mb_inst = get_mb_inst()
if mb_inst and hasattr(mb_inst, "get_menubar"):
main_menubar = mb_inst.get_menubar(DEFAULT_MENUBAR_NAME)
if main_menubar.visible_model.as_bool != show:
main_menubar.visible_model.set_value(show)
ViewportMenuModel()._item_changed(None) # type: ignore
def _on_application_mode_changed(self, item: carb.dictionary.Item, _typ: carb.settings.ChangeEventType) -> None:
if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "review":
omni.usd.get_context().get_selection().clear_selected_prim_paths()
disable_selection(get_active_viewport())
current_mode: str = cast(str, item.get_dict())
asyncio.ensure_future(self.defer_load_layout(current_mode))
async def defer_load_layout(self, current_mode: str) -> None:
keep_windows = True
# Focus Mode Toolbar
self._settings.set_bool(SETTINGS_PATH_FOCUSED, True) # current_mode not in ("review", "layout"))
# Turn off all tools and modal
self._settings.set_string(CURRENT_TOOL_PATH, "none")
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False)
if current_mode == "review":
# save the current layout for restoring later if switch back
QuickLayout.save_file(self.layout_user_path)
# we don't want to keep any windows except the ones which are visible in self.review_layout_path
await _load_layout(self.review_layout_path, False)
else: # current_mode == "layout":
# check if there is any user modified layout, if yes use that one
layout_filename = self.layout_user_path if os.path.exists(self.layout_user_path) else self.default_layout_path
await _load_layout(layout_filename, keep_windows)
self._set_viewport_menubar_visibility(current_mode == "layout")
def _setup_ui_state_changes(self) -> None:
windows_to_hide_on_modal = ["Measure", "Section", "Waypoints"]
self._ui_state_manager.add_hide_on_modal(window_names=windows_to_hide_on_modal, restore=True)
window_titles = ["Markups", "Waypoints"]
for window in window_titles:
setting_name = f'/exts/omni.usd_explorer.setup/{window}/visible'
self._ui_state_manager.add_window_visibility_setting(window, setting_name)
# toggle icon visibilites based on window visibility
self._ui_state_manager.add_settings_copy_dependency(
source_path="/exts/omni.usd_explorer.setup/Markups/visible",
target_path="/exts/omni.kit.markup.core/show_icons",
)
self._ui_state_manager.add_settings_copy_dependency(
source_path="/exts/omni.usd_explorer.setup/Waypoints/visible",
target_path="/exts/omni.kit.waypoint.core/show_icons",
)
def _custom_quicklayout_menu(self) -> None:
# we setup a simple ways to Load custom layout from the exts
def add_layout_menu_entry(name, parameter, key):
import inspect
editor_menu = omni.kit.ui.get_editor_menu()
layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts")
menu_path = f"Layout/{name}"
menu = editor_menu.add_item(menu_path, None, False, self._current_layout_priority) # type: ignore
self._current_layout_priority = self._current_layout_priority + 1
if inspect.isfunction(parameter): # pragma: no cover (Never used, see commented out section below regarding quick save/load)
menu_action = omni.kit.menu.utils.add_action_to_menu(
menu_path,
lambda *_: asyncio.ensure_future(parameter()),
name,
(carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key),
)
else:
menu_action = omni.kit.menu.utils.add_action_to_menu(
menu_path,
lambda *_: asyncio.ensure_future(_load_layout(f"{layouts_path}/{parameter}.json")),
name,
(carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key),
)
self._layout_menu_items.append((menu, menu_action))
add_layout_menu_entry("Reset Layout", "default", carb.input.KeyboardInput.KEY_1)
add_layout_menu_entry("Viewport Only", "viewport_only", carb.input.KeyboardInput.KEY_2)
add_layout_menu_entry("Markup Editor", "markup_editor", carb.input.KeyboardInput.KEY_3)
# add_layout_menu_entry("Waypoint Viewer", "waypoint_viewer", carb.input.KeyboardInput.KEY_4)
# # you can enable Quick Save and Quick Load here
# if False:
# # create Quick Load & Quick Save
# from omni.kit.quicklayout import QuickLayout
# async def quick_save():
# QuickLayout.quick_save(None, None)
# async def quick_load():
# QuickLayout.quick_load(None, None)
# add_layout_menu_entry("Quick Save", quick_save, carb.input.KeyboardInput.KEY_7)
# add_layout_menu_entry("Quick Load", quick_load, carb.input.KeyboardInput.KEY_8)
def _register_my_menu(self) -> None:
context_menu: Optional[omni.kit.context_menu.ContextMenuExtension] = omni.kit.context_menu.get_instance()
if not context_menu: # pragma: no cover
return
def _layout_file_menu(self) -> None:
self._menu_file_layout = [
MenuLayout.Menu(
"File",
[
MenuLayout.Item("New"),
MenuLayout.Item("New From Stage Template"),
MenuLayout.Item("Open"),
MenuLayout.Item("Open Recent"),
MenuLayout.Seperator(),
MenuLayout.Item("Re-open with New Edit Layer"),
MenuLayout.Seperator(),
MenuLayout.Item("Share"),
MenuLayout.Seperator(),
MenuLayout.Item("Save"),
MenuLayout.Item("Save As..."),
MenuLayout.Item("Save With Options"),
MenuLayout.Item("Save Selected"),
MenuLayout.Item("Save Flattened As...", remove=True),
MenuLayout.Seperator(),
MenuLayout.Item("Collect As..."),
MenuLayout.Item("Export"),
MenuLayout.Seperator(),
MenuLayout.Item("Import"),
MenuLayout.Item("Add Reference"),
MenuLayout.Item("Add Payload"),
MenuLayout.Seperator(),
MenuLayout.Item("Exit"),
]
)
]
omni.kit.menu.utils.add_layout(self._menu_file_layout)
def _layout_menu(self) -> None:
self._menu_layout = [
MenuLayout.Menu(
"Window",
[
MenuLayout.SubMenu(
"Animation",
[
MenuLayout.Item("Timeline"),
MenuLayout.Item("Sequencer"),
MenuLayout.Item("Curve Editor"),
MenuLayout.Item("Retargeting"),
MenuLayout.Item("Animation Graph"),
MenuLayout.Item("Animation Graph Samples"),
],
),
MenuLayout.SubMenu(
"Layout",
[
MenuLayout.Item("Quick Save", remove=True),
MenuLayout.Item("Quick Load", remove=True),
],
),
MenuLayout.SubMenu(
"Browsers",
[
MenuLayout.Item("Content", source="Window/Content"),
MenuLayout.Item("Materials"),
MenuLayout.Item("Skies"),
],
),
MenuLayout.SubMenu(
"Rendering",
[
MenuLayout.Item("Render Settings"),
MenuLayout.Item("Movie Capture"),
MenuLayout.Item("MDL Material Graph"),
MenuLayout.Item("Tablet XR"),
],
),
MenuLayout.SubMenu(
"Simulation",
[
MenuLayout.Group(
"Flow",
[
MenuLayout.Item("Presets", source="Window/Flow/Presets"),
MenuLayout.Item("Monitor", source="Window/Flow/Monitor"),
],
),
MenuLayout.Group(
"Blast",
[
MenuLayout.Item("Settings", source="Window/Blast/Settings"),
MenuLayout.SubMenu(
"Documentation",
[
MenuLayout.Item("Kit UI", source="Window/Blast/Documentation/Kit UI"),
MenuLayout.Item(
"Programming", source="Window/Blast/Documentation/Programming"
),
MenuLayout.Item(
"USD Schemas", source="Window/Blast/Documentation/USD Schemas"
),
],
),
],
),
MenuLayout.Item("Debug"),
# MenuLayout.Item("Performance"),
MenuLayout.Group(
"Physics",
[
MenuLayout.Item("Demo Scenes"),
MenuLayout.Item("Settings", source="Window/Physics/Settings"),
MenuLayout.Item("Debug"),
MenuLayout.Item("Test Runner"),
MenuLayout.Item("Character Controller"),
MenuLayout.Item("OmniPVD"),
MenuLayout.Item("Physics Helpers"),
],
),
],
),
MenuLayout.SubMenu(
"Utilities",
[
MenuLayout.Item("Console"),
MenuLayout.Item("Profiler"),
MenuLayout.Item("USD Paths"),
MenuLayout.Item("Statistics"),
MenuLayout.Item("Activity Monitor"),
],
),
# Remove 'Viewport 2' entry
MenuLayout.SubMenu(
"Viewport",
[
MenuLayout.Item("Viewport 2", remove=True),
],
),
MenuLayout.Sort(exclude_items=["Extensions"]),
MenuLayout.Item("New Viewport Window", remove=True),
],
),
# that is you enable the Quick Layout Menu
MenuLayout.Menu(
"Layout",
[
MenuLayout.Item("Default", source="Reset Layout"),
MenuLayout.Item("Viewport Only"),
MenuLayout.Item("Markup Editor"),
MenuLayout.Item("Waypoint Viewer"),
MenuLayout.Seperator(),
MenuLayout.Item("UI Toggle Visibility", source="Window/UI Toggle Visibility"),
MenuLayout.Item("Fullscreen Mode", source="Window/Fullscreen Mode"),
MenuLayout.Seperator(),
MenuLayout.Item("Save Layout", source="Window/Layout/Save Layout..."),
MenuLayout.Item("Load Layout", source="Window/Layout/Load Layout..."),
# MenuLayout.Seperator(),
# MenuLayout.Item("Quick Save", source="Window/Layout/Quick Save"),
# MenuLayout.Item("Quick Load", source="Window/Layout/Quick Load"),
],
),
MenuLayout.Menu("Tools", [MenuLayout.SubMenu("Animation", remove=True)]),
]
omni.kit.menu.utils.add_layout(self._menu_layout) # type: ignore
# if you want to support the Quick Layout Menu
self._custom_quicklayout_menu()
def on_shutdown(self):
if self._menu_layout:
omni.kit.menu.utils.remove_layout(self._menu_layout) # type: ignore
self._menu_layout.clear()
self._layout_menu_items.clear()
self._navigation.on_shutdown()
del self._navigation
self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub)
del self._application_mode_changed_sub
self._stage_event_sub = None
# From View setup
self._menubar_helper.destroy()
if self._menu_helper and hasattr(self._menu_helper, "destroy"):
self._menu_helper.destroy()
self._menu_helper = None
self._stage_templates = []
| 23,462 | Python | 45.005882 | 167 | 0.557753 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/navigation.py | import asyncio
import carb
import carb.settings
import carb.tokens
import carb.dictionary
import omni.kit.app
import omni.ext
import omni.ui as ui
import omni.kit.actions.core
from omni.kit.viewport.navigation.core import (
NAVIGATION_TOOL_OPERATION_ACTIVE,
ViewportNavigationTooltip,
get_navigation_bar,
)
__all__ = ["Navigation"]
CURRENT_TOOL_PATH = "/app/viewport/currentTool"
SETTING_NAVIGATION_ROOT = "/exts/omni.kit.tool.navigation/"
NAVIGATION_BAR_VISIBLE_PATH = "/exts/omni.kit.viewport.navigation.core/isVisible"
APPLICATION_MODE_PATH = "/app/application_mode"
WALK_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.walk/visible"
CAPTURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.capture/visible"
MARKUP_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.markup/visible"
MEASURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.measure/visible"
SECTION_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.section/visible"
TELEPORT_SEPARATOR_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/spvisible"
WAYPOINT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.waypoint/visible"
VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu"
MENUBAR_APP_MODES_PATH = "/exts/omni.kit.usd_presenter.main.menubar/include_modify_mode"
WELCOME_WINDOW_VISIBLE_PATH = "/exts/omni.kit.usd_presenter.window.welcome/visible"
ACTIVE_OPERATION_PATH = "/exts/omni.kit.viewport.navigation.core/activeOperation"
class Navigation:
NAVIGATION_BAR_NAME = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id: str) -> None:
sections = ext_id.split("-")
self._ext_name = sections[0]
self._settings = carb.settings.get_settings()
self._navigation_bar = get_navigation_bar()
self._tool_bar_button = None
self._dict = carb.dictionary.get_dictionary()
self._panel_visible = True
self._navigation_bar.show()
self._settings.set(CURRENT_TOOL_PATH, "navigation")
self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport")
self._viewport_welcome_window_visibility_changed_sub = self._settings.subscribe_to_node_change_events(
WELCOME_WINDOW_VISIBLE_PATH, self._on_welcome_window_visibility_change
)
# OMFP-1799 Set nav bar visibility defaults. These should remain fixed now.
self._settings.set(WALK_VISIBLE_PATH, False)
self._settings.set(MARKUP_VISIBLE_PATH, True)
self._settings.set(WAYPOINT_VISIBLE_PATH, True)
self._settings.set(TELEPORT_SEPARATOR_VISIBLE_PATH, True)
self._settings.set(CAPTURE_VISIBLE_PATH, True)
self._settings.set(MEASURE_VISIBLE_PATH, True)
self._settings.set(SECTION_VISIBLE_PATH, True)
self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events(
APPLICATION_MODE_PATH, self._on_application_mode_changed
)
self._show_tooltips = False
self._nav_bar_visibility_sub = self._settings.subscribe_to_node_change_events(
NAVIGATION_BAR_VISIBLE_PATH, self._delay_reset_tooltip)
_prev_navbar_vis = None
_prev_tool = None
_prev_operation = None
def _on_welcome_window_visibility_change(self, item: carb.dictionary.Item, *_) -> None:
if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)):
return
welcome_window_vis = self._dict.get(item)
# preserve the state of the navbar upon closing the Welcome window if the app is in Layout mode
if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "layout":
# preserve the state of the navbar visibility
if welcome_window_vis:
self._prev_navbar_vis = self._settings.get_as_bool(NAVIGATION_BAR_VISIBLE_PATH)
self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis))
self._prev_tool = self._settings.get(CURRENT_TOOL_PATH)
self._prev_operation = self._settings.get(ACTIVE_OPERATION_PATH)
else: # restore the state of the navbar visibility
if self._prev_navbar_vis is not None:
self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, self._prev_navbar_vis)
self._prev_navbar_vis = None
if self._prev_tool is not None:
self._settings.set(CURRENT_TOOL_PATH, self._prev_tool)
if self._prev_operation is not None:
self._settings.set(ACTIVE_OPERATION_PATH, self._prev_operation)
return
else:
if welcome_window_vis:
self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "none")
else:
self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport")
self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis))
def _on_application_mode_changed(self, item: carb.dictionary.Item, *_) -> None:
if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)):
return
current_mode = self._dict.get(item)
self._test = asyncio.ensure_future(self._switch_by_mode(current_mode))
async def _switch_by_mode(self, current_mode: str) -> None:
await omni.kit.app.get_app().next_update_async()
state = True if current_mode == "review" else False
self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, state)
self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, not(state)) # toggle RMB viewport context menu
self._delay_reset_tooltip(None)
# OM-92161: Need to reset the tooltip when change the mode
def _delay_reset_tooltip(self, *_) -> None:
async def delay_set_tooltip() -> None:
for _i in range(4):
await omni.kit.app.get_app().next_update_async() # type: ignore
ViewportNavigationTooltip.set_visible(self._show_tooltips)
asyncio.ensure_future(delay_set_tooltip())
def _on_showtips_click(self, *_) -> None:
self._show_tooltips = not self._show_tooltips
ViewportNavigationTooltip.set_visible(self._show_tooltips)
def on_shutdown(self) -> None:
self._navigation_bar = None
self._viewport_welcome_window_visibility_changed_sub = None
self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub) # type:ignore
self._application_mode_changed_sub = None
self._dict = None
| 6,679 | Python | 45.713286 | 119 | 0.676898 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/ui_state_manager.py | import carb.dictionary
import carb.settings
import omni.ui as ui
from functools import partial
from typing import Any, Dict, List, Tuple, Union
MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active"
class UIStateManager:
def __init__(self) -> None:
self._settings = carb.settings.acquire_settings_interface()
self._modal_changed_sub = self._settings.subscribe_to_node_change_events(
MODAL_TOOL_ACTIVE_PATH,
self._on_modal_setting_changed
)
self._hide_on_modal: List[Tuple[str,bool]] = []
self._modal_restore_window_states: Dict[str,bool] = {}
self._settings_dependencies: Dict[Tuple(str,str), Dict[Any, Any]] = {}
self._settings_changed_subs = {}
self._window_settings = {}
self._window_vis_changed_id = ui.Workspace.set_window_visibility_changed_callback(self._on_window_vis_changed)
def destroy(self) -> None:
if self._settings:
if self._modal_changed_sub:
self._settings.unsubscribe_to_change_events(self._modal_changed_sub)
self._settings = None
self._hide_on_modal = []
self._modal_restore_window_states = {}
self._settings_dependencies = {}
self._window_settings = {}
if self._window_vis_changed_id:
ui.Workspace.remove_window_visibility_changed_callback(self._window_vis_changed_id)
self._window_vis_changed_id = None
def __del__(self) -> None:
self.destroy()
def add_hide_on_modal(self, window_names: Union[str, List[str]], restore: bool) -> None:
if isinstance(window_names, str):
window_names = [window_names]
for window_name in window_names:
if window_name not in self._hide_on_modal:
self._hide_on_modal.append((window_name, restore))
def remove_hide_on_modal(self, window_names: Union[str, List[str]]) -> None:
if isinstance(window_names, str):
window_names = [window_names]
self._hide_on_modal = [item for item in self._hide_on_modal if item[0] not in window_names]
def add_window_visibility_setting(self, window_name: str, setting_path: str) -> None:
window = ui.Workspace.get_window(window_name)
if window is not None:
self._settings.set(setting_path, window.visible)
else:
# handle the case when the window is created later
self._settings.set(setting_path, False)
if window_name not in self._window_settings.keys():
self._window_settings[window_name] = []
self._window_settings[window_name].append(setting_path)
def remove_window_visibility_setting(self, window_name: str, setting_path: str) -> None:
if window_name in self._window_settings.keys():
setting_list = self._window_settings[window_name]
if setting_path in setting_list:
setting_list.remove(setting_path)
if len(setting_list) == 0:
del self._window_settings[window_name]
def remove_all_window_visibility_settings(self, window_name: str) -> None:
if window_name in self._window_settings.keys():
del self._window_settings[window_name]
def add_settings_dependency(self, source_path: str, target_path: str, value_map: Dict[Any, Any]) -> None:
key = (source_path, target_path)
if key in self._settings_dependencies.keys():
carb.log_error(f'Settings dependency {source_path} -> {target_path} already exists. Ignoring.')
return
self._settings_dependencies[key] = value_map
self._settings_changed_subs[key] = self._settings.subscribe_to_node_change_events(
source_path,
partial(self._on_settings_dependency_changed, source_path)
)
def add_settings_copy_dependency(self, source_path: str, target_path: str) -> None:
self.add_settings_dependency(source_path, target_path, None)
def remove_settings_dependency(self, source_path: str, target_path: str) -> None:
key = (source_path, target_path)
if key in self._settings_dependencies.keys():
del self._settings_dependencies[key]
if key in self._settings_changed_subs.keys():
sub = self._settings_changed_subs.pop(key)
self._settings.unsubscribe_to_change_events(sub)
def _on_settings_dependency_changed(self, path: str, item, event_type) -> None:
value = self._settings.get(path)
# setting does not exist
if value is None:
return
target_settings = [source_target[1] for source_target in self._settings_dependencies.keys() if source_target[0] == path]
for target_setting in target_settings:
value_map = self._settings_dependencies[(path, target_setting)]
# None means copy everything
if value_map is None:
self._settings.set(target_setting, value)
elif value in value_map.keys():
self._settings.set(target_setting, value_map[value])
def _on_modal_setting_changed(self, item, event_type) -> None:
modal = self._settings.get_as_bool(MODAL_TOOL_ACTIVE_PATH)
if modal:
self._hide_windows()
else:
self._restore_windows()
def _hide_windows(self) -> None:
for window_info in self._hide_on_modal:
window_name, restore_later = window_info[0], window_info[1]
window = ui.Workspace.get_window(window_name)
if window is not None:
if restore_later:
self._modal_restore_window_states[window_name] = window.visible
window.visible = False
def _restore_windows(self) -> None:
for window_info in self._hide_on_modal:
window_name, restore_later = window_info[0], window_info[1]
if restore_later:
if window_name in self._modal_restore_window_states.keys():
old_visibility = self._modal_restore_window_states[window_name]
if old_visibility is not None:
window = ui.Workspace.get_window(window_name)
if window is not None:
window.visible = old_visibility
self._modal_restore_window_states[window_name] = None
def _on_window_vis_changed(self, title: str, state: bool) -> None:
if title in self._window_settings.keys():
for setting in self._window_settings[title]:
self._settings.set_bool(setting, state)
| 6,634 | Python | 44.136054 | 128 | 0.611999 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/stage_template.py | import carb
import omni.ext
import omni.kit.commands
from omni.kit.stage_templates import register_template, unregister_template
from pxr import Gf, Sdf, Usd, UsdGeom, UsdLux
class SunnySkyStage:
def __init__(self):
register_template("SunnySky", self.new_stage)
def __del__(self):
unregister_template("SunnySky")
def new_stage(self, rootname, usd_context_name):
# Create basic DistantLight
usd_context = omni.usd.get_context(usd_context_name)
stage = usd_context.get_stage()
# get up axis
up_axis = UsdGeom.GetStageUpAxis(stage)
with Usd.EditContext(stage, stage.GetRootLayer()):
# create Environment
omni.kit.commands.execute(
"CreatePrim",
prim_path="/Environment",
prim_type="Xform",
select_new_prim=False,
create_default_xform=True,
context_name=usd_context_name
)
texture_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/light_rigs/HDR/partly_cloudy.hdr")
# create Sky
omni.kit.commands.execute(
"CreatePrim",
prim_path="/Environment/Sky",
prim_type="DomeLight",
select_new_prim=False,
attributes={
UsdLux.Tokens.inputsIntensity: 1000,
UsdLux.Tokens.inputsTextureFile: texture_path,
UsdLux.Tokens.inputsTextureFormat: UsdLux.Tokens.latlong,
UsdLux.Tokens.inputsSpecular: 1,
UsdGeom.Tokens.visibility: "inherited",
} if hasattr(UsdLux.Tokens, 'inputsIntensity') else \
{
UsdLux.Tokens.intensity: 1000,
UsdLux.Tokens.textureFile: texture_path,
UsdLux.Tokens.textureFormat: UsdLux.Tokens.latlong,
UsdGeom.Tokens.visibility: "inherited",
},
create_default_xform=True,
context_name=usd_context_name
)
prim = stage.GetPrimAtPath("/Environment/Sky")
prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1))
prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0))
if up_axis == "Y":
prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(270, 0, 0))
else:
prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 90))
prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"])
# create DistantLight
omni.kit.commands.execute(
"CreatePrim",
prim_path="/Environment/DistantLight",
prim_type="DistantLight",
select_new_prim=False,
attributes={
UsdLux.Tokens.inputsAngle: 4.3,
UsdLux.Tokens.inputsIntensity: 3000,
UsdGeom.Tokens.visibility: "inherited",
} if hasattr(UsdLux.Tokens, 'inputsIntensity') else \
{
UsdLux.Tokens.angle: 4.3,
UsdLux.Tokens.intensity: 3000,
UsdGeom.Tokens.visibility: "inherited",
},
create_default_xform=True,
context_name=usd_context_name
)
prim = stage.GetPrimAtPath("/Environment/DistantLight")
prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1))
prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0))
if up_axis == "Y":
prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(310.6366313590111, -125.93251524567805, 0.8821359067542289))
else:
prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(41.35092544555664, 0.517652153968811, -35.92928695678711))
prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"])
| 4,590 | Python | 48.902173 | 166 | 0.56732 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menu_helper.py | import asyncio
import carb.settings
import omni.kit.app
import omni.kit.commands
import omni.kit.menu.utils
import omni.renderer_capture
from omni.kit.menu.utils import MenuLayout
SETTINGS_APPLICATION_MODE_PATH = "/app/application_mode"
class MenuHelper:
def __init__(self) -> None:
self._settings = carb.settings.get_settings()
self._current_layout = None
self._pending_layout = None
self._changing_layout_task: asyncio.Task = None
self._menu_layout_empty = []
self._menu_layout_modify = []
omni.kit.menu.utils.add_hook(self._menu_hook)
self._app_mode_sub = self._settings.subscribe_to_node_change_events(
SETTINGS_APPLICATION_MODE_PATH, self._on_application_mode_changed
)
self._menu_hook()
def destroy(self) -> None:
omni.kit.menu.utils.remove_hook(self._menu_hook)
if self._changing_layout_task and not self._changing_layout_task.done():
self._changing_layout_task.cancel()
self._changing_layout_task = None
if self._app_mode_sub:
self._settings.unsubscribe_to_change_events(self._app_mode_sub)
self._app_mode_sub = None
self._app_ready_sub = None
if self._current_layout:
omni.kit.menu.utils.remove_layout(self._current_layout)
self._current_layout = None
def _menu_hook(self, *args, **kwargs) -> None:
if self._settings.get_as_bool("/app/view/debug/menus"):
return
LAYOUT_EMPTY_ALLOWED_MENUS = set()
LAYOUT_MODIFY_ALLOWED_MENUS = {"File", "Edit", "Window", "Tools", "Help"}
# make NEW list object instead of clear original
# the original list may be held by self._current_layout and omni.kit.menu.utils
self._menu_layout_empty = []
self._menu_layout_modify = []
menu_instance = omni.kit.menu.utils.get_instance()
if not menu_instance: # pragma: no cover
return
# Build new layouts using allowlists
for key in menu_instance._menu_defs:
if key.lower().endswith("widget"):
continue
if key not in LAYOUT_EMPTY_ALLOWED_MENUS:
self._menu_layout_empty.append(MenuLayout.Menu(key, remove=True))
if key not in LAYOUT_MODIFY_ALLOWED_MENUS:
self._menu_layout_modify.append(MenuLayout.Menu(key, remove=True))
# Remove 'Viewport 2' entry
if key == "Window":
for menu_item_1 in menu_instance._menu_defs[key]:
for menu_item_2 in menu_item_1:
if menu_item_2.name == "Viewport":
menu_item_2.sub_menu = [mi for mi in menu_item_2.sub_menu if mi.name != "Viewport 2"]
if self._changing_layout_task is None or self._changing_layout_task.done():
self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout())
def _on_application_mode_changed(self, *args) -> None:
if self._changing_layout_task is None or self._changing_layout_task.done():
self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout())
async def _delayed_change_layout(self):
mode = self._settings.get_as_string(SETTINGS_APPLICATION_MODE_PATH)
if mode in ["present", "review"]:
pending_layout = self._menu_layout_empty
else:
pending_layout = self._menu_layout_modify
# Don't change layout inside of menu callback _on_application_mode_changed
# omni.ui throws error
if self._current_layout:
# OMFP-2737: Do no rebuild menu (change menu layout) if layout is same
# Here only check number of layout menu items and name of every of layout menu item
same_layout = len(self._current_layout) == len(pending_layout)
if same_layout:
for index, item in enumerate(self._current_layout):
if item.name != pending_layout[index].name:
same_layout = False
if same_layout:
return
omni.kit.menu.utils.remove_layout(self._current_layout)
self._current_layout = None
omni.kit.menu.utils.add_layout(pending_layout) # type: ignore
self._current_layout = pending_layout.copy()
self._changing_layout_task = None
| 4,434 | Python | 37.565217 | 113 | 0.608029 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_release_config.py | import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
class TestConfig(omni.kit.test.AsyncTestCase):
async def test_l1_public_release_configuration(self):
settings = carb.settings.get_settings()
app_version = settings.get("/app/version")
# This test covers a moment in time when we switch version to RC.
# Following test cases must be satisfied.
is_rc = "-rc." in app_version
# title_format_string = settings.get("exts/omni.kit.window.modifier.titlebar/titleFormatString")
# if is_rc:
# Make sure the title format string doesn't use app version if app version contains rc
# title_using_app_version = "/app/version" in title_format_string
# self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'")
# Make sure the title format string has "Beta" in it
# title_has_beta = "Beta" in title_format_string
# self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it")
# if is_rc:
# Make sure the title format string doesn't use app version if app version contains rc
# title_using_app_version = "/app/version" in title_format_string
# self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'")
# Make sure the title format string has "Beta" in it
# title_has_beta = "Beta" in title_format_string
# self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it")
# Make sure we set build to external when going into RC release mode
# external = settings.get("/privacy/externalBuild") or False
# self.assertEqual(
# external,
# is_rc,
# "check failed: is this an RC build? %s Is /privacy/externalBuild set to true? %s" % (is_rc, external),
# )
# if is_rc:
# # Make sure we remove some extensions from public release
# EXTENSIONS = [
# # "omni.kit.profiler.tracy",
# "omni.kit.window.jira",
# "omni.kit.testing.services",
# "omni.kit.tests.usd_stress",
# "omni.kit.tests.basic_validation",
# # "omni.kit.extension.reports",
# ]
# manager = omni.kit.app.get_app().get_extension_manager()
# ext_names = {e["name"] for e in manager.get_extensions()}
# for ext in EXTENSIONS:
# self.assertEqual(
# ext in ext_names,
# False,
# f"looks like {ext} was not removed from public build",
# )
async def test_l1_usd_explorer_and_usd_explorer_full_have_same_version(self):
manager = omni.kit.app.get_app().get_extension_manager()
EXTENSIONS = [
"omni.usd_explorer",
"omni.usd_explorer.full",
]
# need to find both extensions and they need the same version id
usd_explorer_exts = [e for e in manager.get_extensions() if e.get("name", "") in EXTENSIONS]
self.assertEqual(len(usd_explorer_exts), 2)
self.assertEqual(
usd_explorer_exts[0]["version"],
usd_explorer_exts[1]["version"],
"omni.usd_explorer.kit and omni.usd_explorer.full.kit have different versions",
)
| 3,572 | Python | 43.662499 | 143 | 0.594905 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_state_manager.py | ## Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import carb.settings
import omni.kit.app
import omni.ui as ui
from omni.kit.test import AsyncTestCase
from ..ui_state_manager import UIStateManager, MODAL_TOOL_ACTIVE_PATH
class TestUIStateManager(AsyncTestCase):
async def setUp(self):
self._sm = UIStateManager()
self._settings = carb.settings.get_settings()
async def tearDown(self):
self._sm = None
async def test_destroy(self):
self._sm.add_hide_on_modal('dummy', False)
self._sm.add_settings_copy_dependency('a', 'b')
self._sm.add_settings_dependency('c', 'd', {1: 2})
self._sm.add_window_visibility_setting('my_window', 'my_setting')
self._sm.destroy()
async def test_hide_on_modal(self):
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False)
self._sm.add_hide_on_modal('NO_RESTORE', False)
self._sm.add_hide_on_modal(['A_RESTORE', 'B_RESTORE'], True)
window_no_restore = ui.Window('NO_RESTORE')
window_restore_1 = ui.Window('A_RESTORE')
window_restore_2 = ui.Window('B_RESTORE')
window_no_restore.visible = True
window_restore_1.visible = True
window_restore_2.visible = False
await self._wait()
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True)
await self._wait()
self.assertFalse(window_no_restore.visible)
self.assertFalse(window_restore_1.visible)
self.assertFalse(window_restore_2.visible)
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False)
await self._wait()
self.assertFalse(window_no_restore.visible)
self.assertTrue(window_restore_1.visible)
self.assertFalse(window_restore_2.visible)
self._sm.remove_hide_on_modal(window_restore_1.title)
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True)
await self._wait()
self.assertTrue(window_restore_1.visible)
self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False)
async def test_window_visibility_setting(self):
window_name = 'Dummy'
setting_path = '/apps/dummy'
setting_path2 = '/apps/dummy2'
window = ui.Window(window_name)
window.visible = True
await self._wait()
self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path)
self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path2)
self.assertIsNotNone(self._settings.get(setting_path))
self.assertTrue(self._settings.get(setting_path))
self.assertTrue(self._settings.get(setting_path2))
window.visible = False
self.assertFalse(self._settings.get(setting_path))
self.assertFalse(self._settings.get(setting_path2))
window.visible = True
self.assertTrue(self._settings.get(setting_path))
self.assertTrue(self._settings.get(setting_path2))
self._sm.remove_window_visibility_setting(window_name=window_name, setting_path=setting_path)
window.visible = False
self.assertTrue(self._settings.get(setting_path))
self.assertFalse(self._settings.get(setting_path2))
self._sm.remove_all_window_visibility_settings(window_name=window_name)
window.visible = True
self.assertFalse(self._settings.get(setting_path2))
async def test_setting_dependency(self):
setting_path_copy_from = '/app/copy_from'
setting_path_copy_to = '/ext/copy_to'
setting_path_map_from = '/ext/map_from'
setting_path_map_to = '/something/map_to'
self._sm.add_settings_copy_dependency(setting_path_copy_from, setting_path_copy_to)
self._settings.set_string(setting_path_copy_from, 'hello_world')
self.assertEqual(self._settings.get(setting_path_copy_from), self._settings.get(setting_path_copy_to))
# doesn't work the other way around
self._settings.set_string(setting_path_copy_to, 'no_copy_back')
self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world')
self._sm.add_settings_dependency(setting_path_map_from, setting_path_map_to, {1: 2, 3: 4})
self._settings.set_int(setting_path_map_from, 1)
self.assertEqual(self._settings.get(setting_path_map_to), 2)
self._settings.set_int(setting_path_map_from, 3)
self.assertEqual(self._settings.get(setting_path_map_to), 4)
# not in the map
self._settings.set_int(setting_path_map_from, 42)
self.assertEqual(self._settings.get(setting_path_map_to), 4)
self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world')
self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back')
self._sm.remove_settings_dependency(setting_path_copy_from, setting_path_copy_to)
self._settings.set_string(setting_path_copy_from, 'this_is_not_copied')
self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back')
async def _wait(self, frames: int = 5):
for _ in range(frames):
await omni.kit.app.get_app().next_update_async() | 5,552 | Python | 42.046511 | 110 | 0.67219 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/__init__.py | # run startup tests first
from .test_app_startup import *
# run all other tests after
from .test_extensions import *
from .test_release_config import *
from .test import *
from .test_state_manager import *
| 206 | Python | 24.874997 | 34 | 0.757282 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test.py | import omni.kit.app
from omni.ui.tests.test_base import OmniUiTest
from omni.kit import ui_test
ext_id = 'omni.usd_explorer.setup'
class TestSetupToolExtension(OmniUiTest):
async def test_extension(self):
manager = omni.kit.app.get_app().get_extension_manager()
self.assertTrue(ext_id)
self.assertTrue(manager.is_extension_enabled(ext_id))
app = omni.kit.app.get_app()
for _ in range(500):
await app.next_update_async()
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertTrue(not manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
async def test_menubar_helper_camera_dependency(self):
manager = omni.kit.app.get_app().get_extension_manager()
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled('omni.kit.viewport.menubar.camera', True)
await ui_test.human_delay()
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
async def test_menu_helper(self):
from ..menu_helper import MenuHelper
menu_helper = MenuHelper()
menu_helper.destroy()
async def test_menubar_helper_menu(self):
from ..menubar_helper import MenubarHelper
menubar_helper = MenubarHelper()
menubar_helper._create_camera_speed(None, None)
menubar_helper.destroy()
async def test_menu_helper_debug_setting(self):
SETTINGS_VIEW_DEBUG_MENUS = '/app/view/debug/menus'
import carb.settings
settings = carb.settings.get_settings()
manager = omni.kit.app.get_app().get_extension_manager()
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
orig_value = settings.get(SETTINGS_VIEW_DEBUG_MENUS)
settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, True)
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, orig_value)
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
async def test_menu_helper_application_mode_change(self):
from ..menu_helper import SETTINGS_APPLICATION_MODE_PATH
import carb.settings
settings = carb.settings.get_settings()
settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify')
await ui_test.human_delay()
settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'welcome')
await ui_test.human_delay()
settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify')
await ui_test.human_delay()
settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'comment')
await ui_test.human_delay()
settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify')
await ui_test.human_delay()
async def test_menu_helper_widget_menu(self):
import omni.kit.menu.utils
omni.kit.menu.utils.add_menu_items([], name='test widget')
from ..menu_helper import MenuHelper
menu_helper = MenuHelper()
menu_helper.destroy()
async def test_startup_expand_viewport(self):
from ..setup import SETTINGS_STARTUP_EXPAND_VIEWPORT
import carb.settings
settings = carb.settings.get_settings()
orig_value = settings.get(SETTINGS_STARTUP_EXPAND_VIEWPORT)
settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, True)
manager = omni.kit.app.get_app().get_extension_manager()
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, orig_value)
manager.set_extension_enabled(ext_id, False)
await ui_test.human_delay()
self.assertFalse(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, True)
await ui_test.human_delay()
self.assertTrue(manager.is_extension_enabled(ext_id))
async def test_navigation_invalid_dict(self):
from ..navigation import Navigation
navigation = Navigation()
navigation._show_tooltips = False
navigation._dict = 42
navigation._on_application_mode_changed(None, None)
navigation._on_showtips_click()
async def test_navigation_current_tool_mode_change(self):
from ..navigation import CURRENT_TOOL_PATH, APPLICATION_MODE_PATH
import carb.settings
settings = carb.settings.get_settings()
settings.set_string(APPLICATION_MODE_PATH, 'modify')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'markup')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'navigation')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'markup')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'welcome')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'navigation')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'markup')
await ui_test.human_delay()
settings.set_string(CURRENT_TOOL_PATH, 'navigation')
await ui_test.human_delay()
async def test_setup_clear_startup_scene_edits(self):
from ..setup import _clear_startup_scene_edits
await _clear_startup_scene_edits()
import omni.usd
self.assertFalse(omni.usd.get_context().has_pending_edit())
async def test_stage_template(self):
import omni.kit.stage_templates
omni.kit.stage_templates.new_stage(template='SunnySky')
| 6,826 | Python | 34.190721 | 79 | 0.665397 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_app_startup.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.app
from omni.kit.test import AsyncTestCase
class TestAppStartup(AsyncTestCase):
async def test_l1_app_startup_time(self):
"""Get startup time - send to nvdf"""
for _ in range(60):
await omni.kit.app.get_app().next_update_async()
try:
from omni.kit.core.tests import app_startup_time
app_startup_time(self.id())
except: # noqa
pass
self.assertTrue(True)
async def test_l1_app_startup_warning_count(self):
"""Get the count of warnings during startup - send to nvdf"""
for _ in range(60):
await omni.kit.app.get_app().next_update_async()
try:
from omni.kit.core.tests import app_startup_warning_count
app_startup_warning_count(self.id())
except: # noqa
pass
self.assertTrue(True)
| 1,323 | Python | 32.948717 | 77 | 0.657596 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_extensions.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import sys
import carb.settings
import omni.kit.app
import omni.kit.actions.core
from omni.kit.core.tests import validate_extensions_load, validate_extensions_tests
from omni.kit.test import AsyncTestCase
from pxr import Usd, UsdGeom, Gf
class TestUSDExplorerExtensions(AsyncTestCase):
async def test_l1_extensions_have_tests(self):
"""Loop all enabled extensions to see if they have at least one (1) unittest"""
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# This list should be empty or near empty ideally
EXCLUSION_LIST = [
# extensions from Kit
"omni.mdl",
"omni.ansel.init",
# extensions from USD Explorer
]
# These extensions only run tests on win32 for now
if sys.platform != "win32":
EXCLUSION_LIST.append("omni.hydra.scene_api")
EXCLUSION_LIST.append("omni.rtx.tests")
self.assertEqual(validate_extensions_tests(EXCLUSION_LIST), 0)
async def test_l1_extensions_load(self):
"""Loop all enabled extensions to see if they loaded correctly"""
self.assertEqual(validate_extensions_load(), 0)
async def test_regression_omfp_2304(self):
"""Regression test for OMFP-2304"""
loaded_omni_kit_collaboration_selection_outline = False
manager = omni.kit.app.get_app().get_extension_manager()
for ext in manager.get_extensions():
if ext["name"] == "omni.kit.collaboration.selection_outline":
loaded_omni_kit_collaboration_selection_outline = True
break
self.assertTrue(loaded_omni_kit_collaboration_selection_outline)
async def _wait(self, frames: int = 10):
for _ in range(frames):
await omni.kit.app.get_app().next_update_async()
async def wait_stage_loading(self):
while True:
_, files_loaded, total_files = omni.usd.get_context().get_stage_loading_status()
if files_loaded or total_files:
await self._wait()
continue
break
await self._wait(100)
async def _get_1_1_1_rotation(self) -> Gf.Vec3d:
"""Loads a stage and returns the transformation of the (1,1,1) vector by the directional light's rotation"""
await self._wait()
omni.kit.actions.core.execute_action("omni.kit.window.file", "new")
await self.wait_stage_loading()
context = omni.usd.get_context()
self.assertIsNotNone(context)
stage = context.get_stage()
self.assertIsNotNone(stage)
prim_path = '/Environment/DistantLight'
prim = stage.GetPrimAtPath(prim_path)
self.assertTrue(prim.IsValid())
# Extract the prim's transformation matrix in world space
xformAPI = UsdGeom.XformCache()
transform_matrix_world = xformAPI.GetLocalToWorldTransform(prim)
unit_point = Gf.Vec3d(1, 1, 1)
transformed_point = transform_matrix_world.Transform(unit_point)
return transformed_point
async def test_regression_omfp_OMFP_3314(self):
"""Regression test for OMFP-3314"""
settings = carb.settings.get_settings()
UP_AXIS_PATH = "/persistent/app/stage/upAxis"
settings.set("/persistent/app/newStage/defaultTemplate", "SunnySky")
settings.set_string(UP_AXIS_PATH, "Z")
point_z_up = await self._get_1_1_1_rotation()
settings.set_string(UP_AXIS_PATH, "Y")
point_y_up = await self._get_1_1_1_rotation()
# with the default camera position:
# in y-up: z points bottom left, x points bottom right, y points up
# in z-up: x points bottom left, y points bottom right, z points up
places = 4
self.assertAlmostEqual(point_y_up[2], point_z_up[0], places=places)
self.assertAlmostEqual(point_y_up[0], point_z_up[1], places=places)
self.assertAlmostEqual(point_y_up[1], point_z_up[2], places=places)
| 4,461 | Python | 40.314814 | 116 | 0.656355 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.32] - 2023-11-02
### Changed
- OMFP-3224: Added regression test
- Added unit tests for state manager
## [1.0.31] - 2023-10-25
### Changed
- OMFP-3094: Restored Window/Viewport menu
## [1.0.30] - 2023-10-26
### Changed
- OMFP-2904: Show "Examples" by default in Layout mode
## [1.0.29] - 2023-10-25
### Changed
- OMFP-3224: Fix stage template light directions.
## [1.0.28] - 2023-10-23
### Changed
- OMFP-2654: Upgraded carb.imgui with omni.kit.imgui
## [1.0.27] - 2023-10-20
### Changed
- OMFP-2649: Missed the Layout item, it is now hidden as requested.
## [1.0.26] - 2023-10-20
### Changed
- Update embedded light rigs and textures
## [1.0.25] - 2023-10-19
### Changed
- Added regression test for OMFP-2304
## [1.0.24] - 2023-10-19
### Changed
- OMFP-1981: always load the default layout when startup the app
## [1.0.23] - 2023-10-18
### Changed
- OMFP-2649: Hiding menu entries.
## [1.0.22] - 2023-10-18
### Changed
- Updated About dialog PNG to match the new application icon.
## [1.0.21] - 2023-10-18
### Changed
- OMFP-2737: Do no rebuild menu (change menu layout) if layout is same
## [1.0.20] - 2023-10-18
### Changed
- make windows invisible which are not desired to be in Review mode, OMFP-2252 activity progress window and OMFP-1981 scene optimizer window.
- OMFP-1981: when user switch between modes, make sure the user defined layout in Layout mode is kept.
## [1.0.19] - 2023-10-17
### Changed
- OMFP-2547 - remove markup from modal list, markup window visibility is now handled in omni.kit.markup.core
## [1.0.18] - 2023-10-17
### Changed
- Fixed test
## [1.0.17] - 2023-10-16
### Changed
- Navigation bar visibility fixes
## [1.0.16] - 2023-10-13
### Changed
- Waypoint and markup visibilities are bound to their list windows
## [1.0.15] - 2023-10-12
### Changed
- OMFP-2417 - Rename 'comment' -> 'review' and 'modify' -> 'layout'
## [1.0.14] - 2023-10-12
### Changed
- Added more unit tests.
## [1.0.13] - 2023-10-11
### Changed
- OMFP-2328: Fix "Sunnysky" oriented incorrectly
## [1.0.12] - 2023-10-10
### Changed
- OMFP-2226 - Remove second Viewport menu item from layouts.
## [1.0.11] - 2023-10-11
### Changed
- Added UI state manager.
## [1.0.10] - 2023-10-10
### Changed
- Deactivate tools when app mode is changed.
## [1.0.9] - 2023-10-09
### Changed
- OMFP-2200 - Disabling the viewport expansion, this should keep us locked to a 16:9 aspect ratio.
## [1.0.8] - 2023-10-06
### Changed
- Added a new stage template and made it default
## [1.0.7] - 2023-10-06
### Changed
- Enable UI aware "expand_viewport" mode rather than lower-level fill_viewport mode
## [1.0.6] - 2023-10-05
### Changed
- Used allowlists for building main menu entries to guard against unexpected menus.
## [1.0.5] - 2023-10-05
### Fixed
- Regression in hiding viewport toolbar.
## [1.0.4] - 2023-10-04
### Changed
- Modify mode now shows selected menus on main menubar.
## [1.0.3] - 2023-10-04
- Hide Viewport top toolbar in Comment Mode
## [1.0.2] - 2023-10-03
- Navigation Toolbar hidden by default in Modify Mode
## [1.0.1] - 2023-09-27
- Renamed to omni.usd_explorer.setup
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 3,289 | Markdown | 23.37037 | 141 | 0.672545 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/docs/README.md | # omni.usd_explorer.setup | 25 | Markdown | 24.999975 | 25 | 0.8 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/extension.py | # Copyright 2019-2023 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import omni.ext
import omni.ui as ui
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print(f"[omni.hello.world] some_public_function was called with {x}")
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.hello.world] MyExtension startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self._count += 1
label.text = f"count: {self._count}"
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Add", clicked_fn=on_click)
ui.Button("Reset", clicked_fn=on_reset)
def on_shutdown(self):
print("[omni.hello.world] MyExtension shutdown")
| 2,141 | Python | 36.578947 | 119 | 0.64269 |
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/tests/test_hello_world.py | # Copyright 2019-2023 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.hello.world
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = omni.hello.world.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 2,253 | Python | 35.950819 | 142 | 0.70395 |
NVIDIA-Omniverse/kit-app-template/source/launcher/description.toml | name = "USD Explorer" # displayed application name
shortName = "USD Explorer" # displayed application name in smaller card and library view
version = "${version}" # version must be semantic
kind = "app" # enum of "app", "connector", and "experience" for now
latest = true # boolean for if this version is the latest version
slug = "my_company.usd_explorer" # unique identifier for component, all lower case, persists between versions
productArea = "My Company" # displayed before application name in launcher
category = "Apps" # category of content
channel = "beta" # 3 filter types [ "alpha", "beta", "release "]
enterpriseStatus = false # set true if you want this package to show in enterprise launcher
#values for filtering content, not implemented yet
tags = [
"Manufacturing",
"Product Design",
"Scene Composition",
"Visualization",
"Rendering"
]
#string array, each line is a new line, keep lines under 256 char and keep lines under 4
description = [
"My Company USD Explorer is an Omniverse app for Reviewing and Constructing large facilities such as factories, warehouses and more. It is built using NVIDIA Omniverse™ Kit. The Scene Description and in-memory model is based on Pixar's USD. Omniverse USD Composer takes advantage of the advanced workflows of USD like Layers, Variants, Instancing and much more.",
"When connected to a Omniverse Nucleus server, worlds can be authored LIVE across multiple Omniverse applications, machines and users for advanced collaborative workflows."
]
#array of links for more info on product
[[links]]
title = "Tutorials"
url = "http://omniverse.nvidia.com/tutorials"
[[links]]
title = "Forums"
url = "https://forums.developer.nvidia.com/c/omniverse/300"
[developer]
#name of developer
name = 'My Company'
# hyperlink on developer name (can be left as empty string)
url = 'https://www.my-company.com/'
[publisher]
#name of publisher
name = 'My Company'
# hyperlink on publisher name (can be left as empty string)
url = 'https://www.my-company.com/'
[url]
windows-x86_64 = 'windows-x86_64/package.zip'
linux-x86_64 = 'linux-x86_64/package.zip'
| 2,246 | TOML | 43.939999 | 363 | 0.704809 |
NVIDIA-Omniverse/kit-app-template/source/launcher/requirements.toml | # Optional note that will be shown below system requirements.
# Supports markdown.
note = "Note: Omniverse is built to run on any RTX-powered machine. For ideal performance, we recommend using GeForce RTX™ 2080, Quadro RTX™ 5000, or higher. For latest drivers, visit [NVIDIA Driver Downloads](https://www.nvidia.com/Download/index.aspx). For Quadro, select 'Quadro New Feature Driver (QNF)."
# System requirements specs.
# Supports line breaks.
[minimum]
cpuNames = "Intel I7\nAMD Ryzen"
cpuCores = "4"
ram = "16 GB"
storage = "512 GB SSD"
vram = "6 GB"
gpu = "Any RTX GPU"
[recommended]
cpuNames = "Intel I7\nAMD Ryzen"
cpuCores = "8"
ram = "32 GB"
storage = "512 GB M.2 SSD"
vram = "8 GB"
gpu = "GeForce RTX 2080\nQuadro RTX 5000"
| 734 | TOML | 33.999998 | 308 | 0.723433 |
NVIDIA-Omniverse/kit-app-template/source/launcher/launcher.toml | ## install and launch instructions by environment
[defaults.windows-x86_64]
url = ""
entrypoint = "${productRoot}/omni.usd_explorer.bat"
args = ["--/app/environment/name='launcher'"]
[defaults.windows-x86_64.open]
command = "${productRoot}/omni.usd_explorer.bat"
args = ['--exec "open_stage.py ${file}"', "--/app/environment/name='launcher'"]
[defaults.windows-x86_64.environment]
[defaults.windows-x86_64.install]
pre-install = ""
pre-install-args = []
install = "${productRoot}/pull_kit_sdk.bat"
install-args = []
post-install = "" # "${productRoot}/omni.usd_explorer.warmup.bat"
post-install-args = ["--/app/environment/name='launcher_warmup'"]
[defaults.windows-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
[defaults.linux-x86_64]
url = ""
entrypoint = "${productRoot}/omni.usd_explorer.sh"
args = ["--/app/environment/name='launcher'"]
[defaults.linux-x86_64.environment]
[defaults.linux-x86_64.install]
pre-install = ""
pre-install-args = []
install = "${productRoot}/pull_kit_sdk.sh"
install-args = []
post-install = "" # "${productRoot}/omni.usd_explorer.warmup.sh"
post-install-args = ["--/app/environment/name='launcher_warmup'"]
[defaults.linux-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
| 1,400 | TOML | 27.019999 | 79 | 0.696429 |
NVIDIA-Omniverse/IsaacGymEnvs/setup.py | """Installation script for the 'isaacgymenvs' python package."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from setuptools import setup, find_packages
import os
root_dir = os.path.dirname(os.path.realpath(__file__))
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# RL
"gym==0.23.1",
"torch",
"omegaconf",
"termcolor",
"jinja2",
"hydra-core>=1.2",
"rl-games>=1.6.0",
"pyvirtualdisplay",
"urdfpy==0.0.22",
"pysdf==0.1.9",
"warp-lang==0.10.1",
"trimesh==3.23.5",
]
# Installation operation
setup(
name="isaacgymenvs",
author="NVIDIA",
version="1.5.1",
description="Benchmark environments for high-speed robot learning in NVIDIA IsaacGym.",
keywords=["robotics", "rl"],
include_package_data=True,
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
packages=find_packages("."),
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.6, 3.7, 3.8"],
zip_safe=False,
)
# EOF
| 1,107 | Python | 21.612244 | 99 | 0.644986 |
NVIDIA-Omniverse/IsaacGymEnvs/README.md | # Isaac Gym Benchmark Environments
[Website](https://developer.nvidia.com/isaac-gym) | [Technical Paper](https://arxiv.org/abs/2108.10470) | [Videos](https://sites.google.com/view/isaacgym-nvidia)
### About this repository
This repository contains example RL environments for the NVIDIA Isaac Gym high performance environments described [in our NeurIPS 2021 Datasets and Benchmarks paper](https://openreview.net/forum?id=fgFBtYgJQX_)
### Installation
Download the Isaac Gym Preview 4 release from the [website](https://developer.nvidia.com/isaac-gym), then
follow the installation instructions in the documentation. We highly recommend using a conda environment
to simplify set up.
Ensure that Isaac Gym works on your system by running one of the examples from the `python/examples`
directory, like `joint_monkey.py`. Follow troubleshooting steps described in the Isaac Gym Preview 4
install instructions if you have any trouble running the samples.
Once Isaac Gym is installed and samples work within your current python environment, install this repo:
```bash
pip install -e .
```
### Creating an environment
We offer an easy-to-use API for creating preset vectorized environments. For more info on what a vectorized environment is and its usage, please refer to the Gym library [documentation](https://www.gymlibrary.dev/content/vectorising/#vectorized-environments).
```python
import isaacgym
import isaacgymenvs
import torch
num_envs = 2000
envs = isaacgymenvs.make(
seed=0,
task="Ant",
num_envs=num_envs,
sim_device="cuda:0",
rl_device="cuda:0",
)
print("Observation space is", envs.observation_space)
print("Action space is", envs.action_space)
obs = envs.reset()
for _ in range(20):
random_actions = 2.0 * torch.rand((num_envs,) + envs.action_space.shape, device = 'cuda:0') - 1.0
envs.step(random_actions)
```
### Running the benchmarks
To train your first policy, run this line:
```bash
python train.py task=Cartpole
```
Cartpole should train to the point that the pole stays upright within a few seconds of starting.
Here's another example - Ant locomotion:
```bash
python train.py task=Ant
```
Note that by default we show a preview window, which will usually slow down training. You
can use the `v` key while running to disable viewer updates and allow training to proceed
faster. Hit the `v` key again to resume viewing after a few seconds of training, once the
ants have learned to run a bit better.
Use the `esc` key or close the viewer window to stop training early.
Alternatively, you can train headlessly, as follows:
```bash
python train.py task=Ant headless=True
```
Ant may take a minute or two to train a policy you can run. When running headlessly, you
can stop it early using Control-C in the command line window.
### Loading trained models // Checkpoints
Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME`
defaults to the task name, but can also be overridden via the `experiment` argument.
To load a trained checkpoint and continue training, use the `checkpoint` argument:
```bash
python train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth
```
To load a trained checkpoint and only perform inference (no training), pass `test=True`
as an argument, along with the checkpoint name. To avoid rendering overhead, you may
also want to run with fewer environments using `num_envs=64`:
```bash
python train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64
```
Note that If there are special characters such as `[` or `=` in the checkpoint names,
you will need to escape them and put quotes around the string. For example,
`checkpoint="./runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"`
### Configuration and command line arguments
We use [Hydra](https://hydra.cc/docs/intro/) to manage the config. Note that this has some
differences from previous incarnations in older versions of Isaac Gym.
Key arguments to the `train.py` script are:
* `task=TASK` - selects which task to use. Any of `AllegroHand`, `AllegroHandDextremeADR`, `AllegroHandDextremeManualDR`, `AllegroKukaLSTM`, `AllegroKukaTwoArmsLSTM`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `FrankaCabinet`, `Humanoid`, `Ingenuity` `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM`, and `Trifinger` (these correspond to the config for each environment in the folder `isaacgymenvs/config/task`)
* `train=TRAIN` - selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`).
* `num_envs=NUM_ENVS` - selects the number of environments to use (overriding the default number of environments set in the task config).
* `seed=SEED` - sets a seed value for randomizations, and overrides the default seed set up in the task config
* `sim_device=SIM_DEVICE_TYPE` - Device used for physics simulation. Set to `cuda:0` (default) to use GPU and to `cpu` for CPU. Follows PyTorch-like device syntax.
* `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and also follows PyTorch-like device syntax.
* `graphics_device_id=GRAPHICS_DEVICE_ID` - Which Vulkan graphics device ID to use for rendering. Defaults to 0. **Note** - this may be different from CUDA device ID, and does **not** follow PyTorch-like device syntax.
* `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU and everything runs as fast as possible. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step.
* `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training.
* `checkpoint=CHECKPOINT_PATH` - Set to path to the checkpoint to load for training or testing.
* `headless=HEADLESS` - Whether to run in headless mode.
* `experiment=EXPERIMENT` - Sets the name of the experiment.
* `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments.
Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the discount rate for a rl_games training run, you can use `train.params.config.gamma=0.999`. Similarly, variables in task configs can also be set. For example, `task.env.enableDebugVis=True`.
#### Hydra Notes
Default values for each of these are found in the `isaacgymenvs/config/config.yaml` file.
The way that the `task` and `train` portions of the config works are through the use of config groups.
You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/)
The actual configs for `task` are in `isaacgymenvs/config/task/<TASK>.yaml` and for train in `isaacgymenvs/config/train/<TASK>PPO.yaml`.
In some places in the config you will find other variables referenced (for example,
`num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy.
This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation).
## Tasks
Source code for tasks can be found in `isaacgymenvs/tasks`.
Each task subclasses the `VecEnv` base class in `isaacgymenvs/base/vec_task.py`.
Refer to [docs/framework.md](docs/framework.md) for how to create your own tasks.
Full details on each of the tasks available can be found in the [RL examples documentation](docs/rl_examples.md).
## Domain Randomization
IsaacGymEnvs includes a framework for Domain Randomization to improve Sim-to-Real transfer of trained
RL policies. You can read more about it [here](docs/domain_randomization.md).
## Reproducibility and Determinism
If deterministic training of RL policies is important for your work, you may wish to review our [Reproducibility and Determinism Documentation](docs/reproducibility.md).
## Multi-GPU Training
You can run multi-GPU training using `torchrun` (i.e., `torch.distributed`) using this repository.
Here is an example command for how to run in this way -
`torchrun --standalone --nnodes=1 --nproc_per_node=2 train.py multi_gpu=True task=Ant <OTHER_ARGS>`
Where the `--nproc_per_node=` flag specifies how many processes to run and note the `multi_gpu=True` flag must be set on the train script in order for multi-GPU training to run.
## Population Based Training
You can run population based training to help find good hyperparameters or to train on very difficult environments which would otherwise
be hard to learn anything on without it. See [the readme](docs/pbt.md) for details.
## WandB support
You can run [WandB](https://wandb.ai/) with Isaac Gym Envs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` set. Make sure you have WandB installed with `pip install wandb` before activating.
## Capture videos
We implement the standard `env.render(mode='rgb_rray')` `gym` API to provide an image of the simulator viewer. Additionally, we can leverage `gym.wrappers.RecordVideo` to help record videos that shows agent's gameplay. Consider running the following file which should produce a video in the `videos` folder.
```python
import gym
import isaacgym
import isaacgymenvs
import torch
num_envs = 64
envs = isaacgymenvs.make(
seed=0,
task="Ant",
num_envs=num_envs,
sim_device="cuda:0",
rl_device="cuda:0",
graphics_device_id=0,
headless=False,
multi_gpu=False,
virtual_screen_capture=True,
force_render=False,
)
envs.is_vector_env = True
envs = gym.wrappers.RecordVideo(
envs,
"./videos",
step_trigger=lambda step: step % 10000 == 0, # record the videos every 10000 steps
video_length=100 # for each video record up to 100 steps
)
envs.reset()
print("the image of Isaac Gym viewer is an array of shape", envs.render(mode="rgb_array").shape)
for _ in range(100):
actions = 2.0 * torch.rand((num_envs,) + envs.action_space.shape, device = 'cuda:0') - 1.0
envs.step(actions)
```
## Capture videos during training
You can automatically capture the videos of the agents gameplay by toggling the `capture_video=True` flag and tune the capture frequency `capture_video_freq=1500` and video length via `capture_video_len=100`. You can set `force_render=False` to disable rendering when the videos are not captured.
```
python train.py capture_video=True capture_video_freq=1500 capture_video_len=100 force_render=False
```
You can also automatically upload the videos to Weights and Biases:
```
python train.py task=Ant wandb_activate=True wandb_entity=nvidia wandb_project=rl_games capture_video=True force_render=False
```
## Pre-commit
We use [pre-commit](https://pre-commit.com/) to helps us automate short tasks that improve code quality. Before making a commit to the repository, please ensure `pre-commit run --all-files` runs without error.
## Troubleshooting
Please review the Isaac Gym installation instructions first if you run into any issues.
You can either submit issues through GitHub or through the [Isaac Gym forum here](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/isaac-gym/322).
## Citing
Please cite this work as:
```
@misc{makoviychuk2021isaac,
title={Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning},
author={Viktor Makoviychuk and Lukasz Wawrzyniak and Yunrong Guo and Michelle Lu and Kier Storey and Miles Macklin and David Hoeller and Nikita Rudin and Arthur Allshire and Ankur Handa and Gavriel State},
year={2021},
journal={arXiv preprint arXiv:2108.10470}
}
```
**Note** if you use the DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training work or the code related to Population Based Training, please cite the following paper:
```
@inproceedings{
petrenko2023dexpbt,
author = {Aleksei Petrenko, Arthur Allshire, Gavriel State, Ankur Handa, Viktor Makoviychuk},
title = {DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training},
booktitle = {RSS},
year = {2023}
}
```
**Note** if you use the DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality work or the code related to Automatic Domain Randomisation, please cite the following paper:
```
@inproceedings{
handa2023dextreme,
author = {Ankur Handa, Arthur Allshire, Viktor Makoviychuk, Aleksei Petrenko, Ritvik Singh, Jingzhou Liu, Denys Makoviichuk, Karl Van Wyk, Alexander Zhurkevich, Balakumar Sundaralingam, Yashraj Narang, Jean-Francois Lafleche, Dieter Fox, Gavriel State},
title = {DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality},
booktitle = {ICRA},
year = {2023}
}
```
**Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work:
```
@misc{rudin2021learning,
title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning},
author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter},
year={2021},
journal = {arXiv preprint arXiv:2109.11978}
}
```
**Note** if you use the Trifinger environment in your work, please ensure you cite the following work:
```
@misc{isaacgym-trifinger,
title = {{Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger}},
author = {Allshire, Arthur and Mittal, Mayank and Lodaya, Varun and Makoviychuk, Viktor and Makoviichuk, Denys and Widmaier, Felix and Wuthrich, Manuel and Bauer, Stefan and Handa, Ankur and Garg, Animesh},
year = {2021},
journal = {arXiv preprint arXiv:2108.09779}
}
```
**Note** if you use the AMP: Adversarial Motion Priors environment in your work, please ensure you cite the following work:
```
@article{
2021-TOG-AMP,
author = {Peng, Xue Bin and Ma, Ze and Abbeel, Pieter and Levine, Sergey and Kanazawa, Angjoo},
title = {AMP: Adversarial Motion Priors for Stylized Physics-Based Character Control},
journal = {ACM Trans. Graph.},
issue_date = {August 2021},
volume = {40},
number = {4},
month = jul,
year = {2021},
articleno = {1},
numpages = {15},
url = {http://doi.acm.org/10.1145/3450626.3459670},
doi = {10.1145/3450626.3459670},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {motion control, physics-based character animation, reinforcement learning},
}
```
**Note** if you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper:
```
@inproceedings{
narang2022factory,
author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox},
title = {Factory: Fast contact for robotic assembly},
booktitle = {Robotics: Science and Systems},
year = {2022}
}
```
**Note** if you use the IndustReal training environments or algorithms in your work, please cite the following paper:
```
@inproceedings{
tang2023industreal,
author = {Bingjie Tang and Michael A Lin and Iretiayo Akinola and Ankur Handa and Gaurav S Sukhatme and Fabio Ramos and Dieter Fox and Yashraj Narang},
title = {IndustReal: Transferring contact-rich assembly tasks from simulation to reality},
booktitle = {Robotics: Science and Systems},
year = {2023}
}
``` | 15,616 | Markdown | 44.135838 | 455 | 0.75698 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/__init__.py | import hydra
from hydra import compose, initialize
from hydra.core.hydra_config import HydraConfig
from omegaconf import DictConfig, OmegaConf
from isaacgymenvs.utils.reformat import omegaconf_to_dict
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b)
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
def make(
seed: int,
task: str,
num_envs: int,
sim_device: str,
rl_device: str,
graphics_device_id: int = -1,
headless: bool = False,
multi_gpu: bool = False,
virtual_screen_capture: bool = False,
force_render: bool = True,
cfg: DictConfig = None
):
from isaacgymenvs.utils.rlgames_utils import get_rlgames_env_creator
# create hydra config if no config passed in
if cfg is None:
# reset current hydra config if already parsed (but not passed in here)
if HydraConfig.initialized():
task = HydraConfig.get().runtime.choices['task']
hydra.core.global_hydra.GlobalHydra.instance().clear()
with initialize(config_path="./cfg"):
cfg = compose(config_name="config", overrides=[f"task={task}"])
cfg_dict = omegaconf_to_dict(cfg.task)
cfg_dict['env']['numEnvs'] = num_envs
# reuse existing config
else:
cfg_dict = omegaconf_to_dict(cfg.task)
create_rlgpu_env = get_rlgames_env_creator(
seed=seed,
task_config=cfg_dict,
task_name=cfg_dict["name"],
sim_device=sim_device,
rl_device=rl_device,
graphics_device_id=graphics_device_id,
headless=headless,
multi_gpu=multi_gpu,
virtual_screen_capture=virtual_screen_capture,
force_render=force_render,
)
return create_rlgpu_env()
| 1,953 | Python | 33.892857 | 100 | 0.656938 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/train.py | # train.py
# Script to train policies in Isaac Gym
#
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf import DictConfig, OmegaConf
def preprocess_train_config(cfg, config_dict):
"""
Adding common configuration parameters to the rl_games train config.
An alternative to this is inferring them in task-specific .yaml files, but that requires repeating the same
variable interpolations in each config.
"""
train_cfg = config_dict['params']['config']
train_cfg['device'] = cfg.rl_device
train_cfg['population_based_training'] = cfg.pbt.enabled
train_cfg['pbt_idx'] = cfg.pbt.policy_idx if cfg.pbt.enabled else None
train_cfg['full_experiment_name'] = cfg.get('full_experiment_name')
print(f'Using rl_device: {cfg.rl_device}')
print(f'Using sim_device: {cfg.sim_device}')
print(train_cfg)
try:
model_size_multiplier = config_dict['params']['network']['mlp']['model_size_multiplier']
if model_size_multiplier != 1:
units = config_dict['params']['network']['mlp']['units']
for i, u in enumerate(units):
units[i] = u * model_size_multiplier
print(f'Modified MLP units by x{model_size_multiplier} to {config_dict["params"]["network"]["mlp"]["units"]}')
except KeyError:
pass
return config_dict
@hydra.main(version_base="1.1", config_name="config", config_path="./cfg")
def launch_rlg_hydra(cfg: DictConfig):
import logging
import os
from datetime import datetime
# noinspection PyUnresolvedReferences
import isaacgym
from isaacgymenvs.pbt.pbt import PbtAlgoObserver, initial_pbt_check
from isaacgymenvs.utils.rlgames_utils import multi_gpu_get_rank
from hydra.utils import to_absolute_path
from isaacgymenvs.tasks import isaacgym_task_map
import gym
from isaacgymenvs.utils.reformat import omegaconf_to_dict, print_dict
from isaacgymenvs.utils.utils import set_np_formatting, set_seed
if cfg.pbt.enabled:
initial_pbt_check(cfg)
from isaacgymenvs.utils.rlgames_utils import RLGPUEnv, RLGPUAlgoObserver, MultiObserver, ComplexObsRLGPUEnv
from isaacgymenvs.utils.wandb_utils import WandbAlgoObserver
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
from rl_games.algos_torch import model_builder
from isaacgymenvs.learning import amp_continuous
from isaacgymenvs.learning import amp_players
from isaacgymenvs.learning import amp_models
from isaacgymenvs.learning import amp_network_builder
import isaacgymenvs
time_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
run_name = f"{cfg.wandb_name}_{time_str}"
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = to_absolute_path(cfg.checkpoint)
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# set numpy formatting for printing only
set_np_formatting()
# global rank of the GPU
global_rank = int(os.getenv("RANK", "0"))
# sets seed. if seed is -1 will pick a random one
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic, rank=global_rank)
def create_isaacgym_env(**kwargs):
envs = isaacgymenvs.make(
cfg.seed,
cfg.task_name,
cfg.task.env.numEnvs,
cfg.sim_device,
cfg.rl_device,
cfg.graphics_device_id,
cfg.headless,
cfg.multi_gpu,
cfg.capture_video,
cfg.force_render,
cfg,
**kwargs,
)
if cfg.capture_video:
envs.is_vector_env = True
envs = gym.wrappers.RecordVideo(
envs,
f"videos/{run_name}",
step_trigger=lambda step: step % cfg.capture_video_freq == 0,
video_length=cfg.capture_video_len,
)
return envs
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: create_isaacgym_env(**kwargs),
})
ige_env_cls = isaacgym_task_map[cfg.task_name]
dict_cls = ige_env_cls.dict_obs_cls if hasattr(ige_env_cls, 'dict_obs_cls') and ige_env_cls.dict_obs_cls else False
if dict_cls:
obs_spec = {}
actor_net_cfg = cfg.train.params.network
obs_spec['obs'] = {'names': list(actor_net_cfg.inputs.keys()), 'concat': not actor_net_cfg.name == "complex_net", 'space_name': 'observation_space'}
if "central_value_config" in cfg.train.params.config:
critic_net_cfg = cfg.train.params.config.central_value_config.network
obs_spec['states'] = {'names': list(critic_net_cfg.inputs.keys()), 'concat': not critic_net_cfg.name == "complex_net", 'space_name': 'state_space'}
vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: ComplexObsRLGPUEnv(config_name, num_actors, obs_spec, **kwargs))
else:
vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
rlg_config_dict = omegaconf_to_dict(cfg.train)
rlg_config_dict = preprocess_train_config(cfg, rlg_config_dict)
observers = [RLGPUAlgoObserver()]
if cfg.pbt.enabled:
pbt_observer = PbtAlgoObserver(cfg)
observers.append(pbt_observer)
if cfg.wandb_activate:
cfg.seed += global_rank
if global_rank == 0:
# initialize wandb only once per multi-gpu run
wandb_observer = WandbAlgoObserver(cfg)
observers.append(wandb_observer)
# register new AMP network builder and agent
def build_runner(algo_observer):
runner = Runner(algo_observer)
runner.algo_factory.register_builder('amp_continuous', lambda **kwargs : amp_continuous.AMPAgent(**kwargs))
runner.player_factory.register_builder('amp_continuous', lambda **kwargs : amp_players.AMPPlayerContinuous(**kwargs))
model_builder.register_model('continuous_amp', lambda network, **kwargs : amp_models.ModelAMPContinuous(network))
model_builder.register_network('amp', lambda **kwargs : amp_network_builder.AMPBuilder())
return runner
# convert CLI arguments into dictionary
# create runner and set the settings
runner = build_runner(MultiObserver(observers))
runner.load(rlg_config_dict)
runner.reset()
# dump config dict
if not cfg.test:
experiment_dir = os.path.join('runs', cfg.train.params.config.name +
'_{date:%d-%H-%M-%S}'.format(date=datetime.now()))
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
runner.run({
'train': not cfg.test,
'play': cfg.test,
'checkpoint': cfg.checkpoint,
'sigma': cfg.sigma if cfg.sigma != '' else None
})
if __name__ == "__main__":
launch_rlg_hydra()
| 8,604 | Python | 38.113636 | 159 | 0.675035 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_datasets.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from rl_games.common import datasets
class AMPDataset(datasets.PPODataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len)
self._idx_buf = torch.randperm(batch_size)
return
def update_mu_sigma(self, mu, sigma):
raise NotImplementedError()
return
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names and v is not None:
input_dict[k] = v[sample_idx]
if (end >= self.batch_size):
self._shuffle_idx_buf()
return input_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
return | 2,564 | Python | 41.749999 | 90 | 0.704758 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/replay_buffer.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return | 3,986 | Python | 33.973684 | 90 | 0.632965 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_network_builder.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import layers
from rl_games.algos_torch import network_builder
import torch
import torch.nn as nn
import numpy as np
DISC_LOGIT_INIT_SCALE = 1.0
class AMPBuilder(network_builder.A2CBuilder):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
class Network(network_builder.A2CBuilder.Network):
def __init__(self, params, **kwargs):
super().__init__(params, **kwargs)
if self.is_continuous:
if (not self.space_config['learn_sigma']):
actions_num = kwargs.get('actions_num')
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=False, dtype=torch.float32), requires_grad=False)
sigma_init(self.sigma)
amp_input_shape = kwargs.get('amp_input_shape')
self._build_disc(amp_input_shape)
return
def load(self, params):
super().load(params)
self._disc_units = params['disc']['units']
self._disc_activation = params['disc']['activation']
self._disc_initializer = params['disc']['initializer']
return
def eval_critic(self, obs):
c_out = self.critic_cnn(obs)
c_out = c_out.contiguous().view(c_out.size(0), -1)
c_out = self.critic_mlp(c_out)
value = self.value_act(self.value(c_out))
return value
def eval_disc(self, amp_obs):
disc_mlp_out = self._disc_mlp(amp_obs)
disc_logits = self._disc_logits(disc_mlp_out)
return disc_logits
def get_disc_logit_weights(self):
return torch.flatten(self._disc_logits.weight)
def get_disc_weights(self):
weights = []
for m in self._disc_mlp.modules():
if isinstance(m, nn.Linear):
weights.append(torch.flatten(m.weight))
weights.append(torch.flatten(self._disc_logits.weight))
return weights
def _build_disc(self, input_shape):
self._disc_mlp = nn.Sequential()
mlp_args = {
'input_size' : input_shape[0],
'units' : self._disc_units,
'activation' : self._disc_activation,
'dense_func' : torch.nn.Linear
}
self._disc_mlp = self._build_mlp(**mlp_args)
mlp_out_size = self._disc_units[-1]
self._disc_logits = torch.nn.Linear(mlp_out_size, 1)
mlp_init = self.init_factory.create(**self._disc_initializer)
for m in self._disc_mlp.modules():
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
torch.nn.init.uniform_(self._disc_logits.weight, -DISC_LOGIT_INIT_SCALE, DISC_LOGIT_INIT_SCALE)
torch.nn.init.zeros_(self._disc_logits.bias)
return
def build(self, name, **kwargs):
net = AMPBuilder.Network(self.params, **kwargs)
return net | 4,898 | Python | 39.487603 | 134 | 0.620457 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/hrl_continuous.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
from datetime import datetime
from gym import spaces
import numpy as np
import os
import time
import yaml
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import a2c_common
from rl_games.common import datasets
from rl_games.common import schedulers
from rl_games.common import vecenv
import torch
from torch import optim
import isaacgymenvs.learning.common_agent as common_agent
import isaacgymenvs.learning.gen_amp as gen_amp
import isaacgymenvs.learning.gen_amp_models as gen_amp_models
import isaacgymenvs.learning.gen_amp_network_builder as gen_amp_network_builder
from tensorboardX import SummaryWriter
class HRLAgent(common_agent.CommonAgent):
def __init__(self, base_name, config):
with open(os.path.join(os.getcwd(), config['llc_config']), 'r') as f:
llc_config = yaml.load(f, Loader=yaml.SafeLoader)
llc_config_params = llc_config['params']
self._latent_dim = llc_config_params['config']['latent_dim']
super().__init__(base_name, config)
self._task_size = self.vec_env.env.get_task_obs_size()
self._llc_steps = config['llc_steps']
llc_checkpoint = config['llc_checkpoint']
assert(llc_checkpoint != "")
self._build_llc(llc_config_params, llc_checkpoint)
return
def env_step(self, actions):
actions = self.preprocess_actions(actions)
obs = self.obs['obs']
rewards = 0.0
done_count = 0.0
for t in range(self._llc_steps):
llc_actions = self._compute_llc_action(obs, actions)
obs, curr_rewards, curr_dones, infos = self.vec_env.step(llc_actions)
rewards += curr_rewards
done_count += curr_dones
rewards /= self._llc_steps
dones = torch.zeros_like(done_count)
dones[done_count > 0] = 1.0
if self.is_tensor_obses:
if self.value_size == 1:
rewards = rewards.unsqueeze(1)
return self.obs_to_tensors(obs), rewards.to(self.ppo_device), dones.to(self.ppo_device), infos
else:
if self.value_size == 1:
rewards = np.expand_dims(rewards, axis=1)
return self.obs_to_tensors(obs), torch.from_numpy(rewards).to(self.ppo_device).float(), torch.from_numpy(dones).to(self.ppo_device), infos
def cast_obs(self, obs):
obs = super().cast_obs(obs)
self._llc_agent.is_tensor_obses = self.is_tensor_obses
return obs
def preprocess_actions(self, actions):
clamped_actions = torch.clamp(actions, -1.0, 1.0)
if not self.is_tensor_obses:
clamped_actions = clamped_actions.cpu().numpy()
return clamped_actions
def _setup_action_space(self):
super()._setup_action_space()
self.actions_num = self._latent_dim
return
def _build_llc(self, config_params, checkpoint_file):
network_params = config_params['network']
network_builder = gen_amp_network_builder.GenAMPBuilder()
network_builder.load(network_params)
network = gen_amp_models.ModelGenAMPContinuous(network_builder)
llc_agent_config = self._build_llc_agent_config(config_params, network)
self._llc_agent = gen_amp.GenAMPAgent('llc', llc_agent_config)
self._llc_agent.restore(checkpoint_file)
print("Loaded LLC checkpoint from {:s}".format(checkpoint_file))
self._llc_agent.set_eval()
return
def _build_llc_agent_config(self, config_params, network):
llc_env_info = copy.deepcopy(self.env_info)
obs_space = llc_env_info['observation_space']
obs_size = obs_space.shape[0]
obs_size -= self._task_size
llc_env_info['observation_space'] = spaces.Box(obs_space.low[:obs_size], obs_space.high[:obs_size])
config = config_params['config']
config['network'] = network
config['num_actors'] = self.num_actors
config['features'] = {'observer' : self.algo_observer}
config['env_info'] = llc_env_info
return config
def _compute_llc_action(self, obs, actions):
llc_obs = self._extract_llc_obs(obs)
processed_obs = self._llc_agent._preproc_obs(llc_obs)
z = torch.nn.functional.normalize(actions, dim=-1)
mu, _ = self._llc_agent.model.a2c_network.eval_actor(obs=processed_obs, amp_latents=z)
llc_action = mu
llc_action = self._llc_agent.preprocess_actions(llc_action)
return llc_action
def _extract_llc_obs(self, obs):
obs_size = obs.shape[-1]
llc_obs = obs[..., :obs_size - self._task_size]
return llc_obs
| 6,339 | Python | 38.625 | 150 | 0.675974 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_continuous.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.algos_torch import torch_ext
from rl_games.common import a2c_common
from rl_games.common import schedulers
from rl_games.common import vecenv
from isaacgymenvs.utils.torch_jit_utils import to_torch
import time
from datetime import datetime
import numpy as np
from torch import optim
import torch
from torch import nn
import isaacgymenvs.learning.replay_buffer as replay_buffer
import isaacgymenvs.learning.common_agent as common_agent
from tensorboardX import SummaryWriter
class AMPAgent(common_agent.CommonAgent):
def __init__(self, base_name, params):
super().__init__(base_name, params)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
if self._normalize_amp_input:
self._amp_input_mean_std = RunningMeanStd(self._amp_observation_space.shape).to(self.ppo_device)
return
def init_tensors(self):
super().init_tensors()
self._build_amp_buffers()
return
def set_eval(self):
super().set_eval()
if self._normalize_amp_input:
self._amp_input_mean_std.eval()
return
def set_train(self):
super().set_train()
if self._normalize_amp_input:
self._amp_input_mean_std.train()
return
def get_stats_weights(self):
state = super().get_stats_weights()
if self._normalize_amp_input:
state['amp_input_mean_std'] = self._amp_input_mean_std.state_dict()
return state
def set_stats_weights(self, weights):
super().set_stats_weights(weights)
if self._normalize_amp_input:
self._amp_input_mean_std.load_state_dict(weights['amp_input_mean_std'])
return
def play_steps(self):
self.set_eval()
epinfos = []
update_list = self.update_list
for n in range(self.horizon_length):
self.obs, done_env_ids = self._env_reset_done()
self.experience_buffer.update_data('obses', n, self.obs['obs'])
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
for k in update_list:
self.experience_buffer.update_data(k, n, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data('states', n, self.obs['states'])
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
shaped_rewards = self.rewards_shaper(rewards)
self.experience_buffer.update_data('rewards', n, shaped_rewards)
self.experience_buffer.update_data('next_obses', n, self.obs['obs'])
self.experience_buffer.update_data('dones', n, self.dones)
self.experience_buffer.update_data('amp_obs', n, infos['amp_obs'])
terminated = infos['terminate'].float()
terminated = terminated.unsqueeze(-1)
next_vals = self._eval_critic(self.obs)
next_vals *= (1.0 - terminated)
self.experience_buffer.update_data('next_values', n, next_vals)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.algo_observer.process_infos(infos, done_indices)
not_dones = 1.0 - self.dones.float()
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
if (self.vec_env.env.viewer and (n == (self.horizon_length - 1))):
self._amp_debug(infos)
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_next_values = self.experience_buffer.tensor_dict['next_values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
mb_amp_obs = self.experience_buffer.tensor_dict['amp_obs']
amp_rewards = self._calc_amp_rewards(mb_amp_obs)
mb_rewards = self._combine_rewards(mb_rewards, amp_rewards)
mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values)
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns)
batch_dict['played_frames'] = self.batch_size
for k, v in amp_rewards.items():
batch_dict[k] = a2c_common.swap_and_flatten01(v)
return batch_dict
def prepare_dataset(self, batch_dict):
super().prepare_dataset(batch_dict)
self.dataset.values_dict['amp_obs'] = batch_dict['amp_obs']
self.dataset.values_dict['amp_obs_demo'] = batch_dict['amp_obs_demo']
self.dataset.values_dict['amp_obs_replay'] = batch_dict['amp_obs_replay']
return
def train_epoch(self):
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self._update_amp_demos()
num_obs_samples = batch_dict['amp_obs'].shape[0]
amp_obs_demo = self._amp_obs_demo_buffer.sample(num_obs_samples)['amp_obs']
batch_dict['amp_obs_demo'] = amp_obs_demo
if (self._amp_replay_buffer.get_total_count() == 0):
batch_dict['amp_obs_replay'] = batch_dict['amp_obs']
else:
batch_dict['amp_obs_replay'] = self._amp_replay_buffer.sample(num_obs_samples)['amp_obs']
self.set_train()
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
if self.has_central_value:
self.train_central_value()
train_info = None
if self.is_rnn:
frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement())
print(frames_mask_ratio)
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
curr_train_info = self.train_actor_critic(self.dataset[i])
if self.schedule_type == 'legacy':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item())
self.update_lr(self.last_lr)
if (train_info is None):
train_info = dict()
for k, v in curr_train_info.items():
train_info[k] = [v]
else:
for k, v in curr_train_info.items():
train_info[k].append(v)
av_kls = torch_ext.mean_list(train_info['kl'])
if self.schedule_type == 'standard':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
if self.schedule_type == 'standard_epoch':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
self._store_replay_amp_obs(batch_dict['amp_obs'])
train_info['play_time'] = play_time
train_info['update_time'] = update_time
train_info['total_time'] = total_time
self._record_train_batch_info(batch_dict, train_info)
return train_info
def calc_gradients(self, input_dict):
self.set_train()
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
amp_obs = input_dict['amp_obs'][0:self._amp_minibatch_size]
amp_obs = self._preproc_amp_obs(amp_obs)
amp_obs_replay = input_dict['amp_obs_replay'][0:self._amp_minibatch_size]
amp_obs_replay = self._preproc_amp_obs(amp_obs_replay)
amp_obs_demo = input_dict['amp_obs_demo'][0:self._amp_minibatch_size]
amp_obs_demo = self._preproc_amp_obs(amp_obs_demo)
amp_obs_demo.requires_grad_(True)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
'amp_obs' : amp_obs,
'amp_obs_replay' : amp_obs_replay,
'amp_obs_demo' : amp_obs_demo
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
disc_agent_logit = res_dict['disc_agent_logit']
disc_agent_replay_logit = res_dict['disc_agent_replay_logit']
disc_demo_logit = res_dict['disc_demo_logit']
a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip)
a_loss = a_info['actor_loss']
c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
c_loss = c_info['critic_loss']
b_loss = self.bound_loss(mu)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
disc_agent_cat_logit = torch.cat([disc_agent_logit, disc_agent_replay_logit], dim=0)
disc_info = self._disc_loss(disc_agent_cat_logit, disc_demo_logit, amp_obs_demo)
disc_loss = disc_info['disc_loss']
loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss \
+ self._disc_coef * disc_loss
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of the year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = not self.is_rnn
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.train_result = {
'entropy': entropy,
'kl': kl_dist,
'last_lr': self.last_lr,
'lr_mul': lr_mul,
'b_loss': b_loss
}
self.train_result.update(a_info)
self.train_result.update(c_info)
self.train_result.update(disc_info)
return
def _load_config_params(self, config):
super()._load_config_params(config)
self._task_reward_w = config['task_reward_w']
self._disc_reward_w = config['disc_reward_w']
self._amp_observation_space = self.env_info['amp_observation_space']
self._amp_batch_size = int(config['amp_batch_size'])
self._amp_minibatch_size = int(config['amp_minibatch_size'])
assert(self._amp_minibatch_size <= self.minibatch_size)
self._disc_coef = config['disc_coef']
self._disc_logit_reg = config['disc_logit_reg']
self._disc_grad_penalty = config['disc_grad_penalty']
self._disc_weight_decay = config['disc_weight_decay']
self._disc_reward_scale = config['disc_reward_scale']
self._normalize_amp_input = config.get('normalize_amp_input', True)
return
def _build_net_config(self):
config = super()._build_net_config()
config['amp_input_shape'] = self._amp_observation_space.shape
return config
def _init_train(self):
super()._init_train()
self._init_amp_demo_buf()
return
def _disc_loss(self, disc_agent_logit, disc_demo_logit, obs_demo):
# prediction loss
disc_loss_agent = self._disc_loss_neg(disc_agent_logit)
disc_loss_demo = self._disc_loss_pos(disc_demo_logit)
disc_loss = 0.5 * (disc_loss_agent + disc_loss_demo)
# logit reg
logit_weights = self.model.a2c_network.get_disc_logit_weights()
disc_logit_loss = torch.sum(torch.square(logit_weights))
disc_loss += self._disc_logit_reg * disc_logit_loss
# grad penalty
disc_demo_grad = torch.autograd.grad(disc_demo_logit, obs_demo, grad_outputs=torch.ones_like(disc_demo_logit),
create_graph=True, retain_graph=True, only_inputs=True)
disc_demo_grad = disc_demo_grad[0]
disc_demo_grad = torch.sum(torch.square(disc_demo_grad), dim=-1)
disc_grad_penalty = torch.mean(disc_demo_grad)
disc_loss += self._disc_grad_penalty * disc_grad_penalty
# weight decay
if (self._disc_weight_decay != 0):
disc_weights = self.model.a2c_network.get_disc_weights()
disc_weights = torch.cat(disc_weights, dim=-1)
disc_weight_decay = torch.sum(torch.square(disc_weights))
disc_loss += self._disc_weight_decay * disc_weight_decay
disc_agent_acc, disc_demo_acc = self._compute_disc_acc(disc_agent_logit, disc_demo_logit)
disc_info = {
'disc_loss': disc_loss,
'disc_grad_penalty': disc_grad_penalty,
'disc_logit_loss': disc_logit_loss,
'disc_agent_acc': disc_agent_acc,
'disc_demo_acc': disc_demo_acc,
'disc_agent_logit': disc_agent_logit,
'disc_demo_logit': disc_demo_logit
}
return disc_info
def _disc_loss_neg(self, disc_logits):
bce = torch.nn.BCEWithLogitsLoss()
loss = bce(disc_logits, torch.zeros_like(disc_logits))
return loss
def _disc_loss_pos(self, disc_logits):
bce = torch.nn.BCEWithLogitsLoss()
loss = bce(disc_logits, torch.ones_like(disc_logits))
return loss
def _compute_disc_acc(self, disc_agent_logit, disc_demo_logit):
agent_acc = disc_agent_logit < 0
agent_acc = torch.mean(agent_acc.float())
demo_acc = disc_demo_logit > 0
demo_acc = torch.mean(demo_acc.float())
return agent_acc, demo_acc
def _fetch_amp_obs_demo(self, num_samples):
amp_obs_demo = self.vec_env.env.fetch_amp_obs_demo(num_samples)
return amp_obs_demo
def _build_amp_buffers(self):
batch_shape = self.experience_buffer.obs_base_shape
self.experience_buffer.tensor_dict['amp_obs'] = torch.zeros(batch_shape + self._amp_observation_space.shape,
device=self.ppo_device)
amp_obs_demo_buffer_size = int(self.config['amp_obs_demo_buffer_size'])
self._amp_obs_demo_buffer = replay_buffer.ReplayBuffer(amp_obs_demo_buffer_size, self.ppo_device)
self._amp_replay_keep_prob = self.config['amp_replay_keep_prob']
replay_buffer_size = int(self.config['amp_replay_buffer_size'])
self._amp_replay_buffer = replay_buffer.ReplayBuffer(replay_buffer_size, self.ppo_device)
self.tensor_list += ['amp_obs']
return
def _init_amp_demo_buf(self):
buffer_size = self._amp_obs_demo_buffer.get_buffer_size()
num_batches = int(np.ceil(buffer_size / self._amp_batch_size))
for i in range(num_batches):
curr_samples = self._fetch_amp_obs_demo(self._amp_batch_size)
self._amp_obs_demo_buffer.store({'amp_obs': curr_samples})
return
def _update_amp_demos(self):
new_amp_obs_demo = self._fetch_amp_obs_demo(self._amp_batch_size)
self._amp_obs_demo_buffer.store({'amp_obs': new_amp_obs_demo})
return
def _preproc_amp_obs(self, amp_obs):
if self._normalize_amp_input:
amp_obs = self._amp_input_mean_std(amp_obs)
return amp_obs
def _combine_rewards(self, task_rewards, amp_rewards):
disc_r = amp_rewards['disc_rewards']
combined_rewards = self._task_reward_w * task_rewards + \
+ self._disc_reward_w * disc_r
return combined_rewards
def _eval_disc(self, amp_obs):
proc_amp_obs = self._preproc_amp_obs(amp_obs)
return self.model.a2c_network.eval_disc(proc_amp_obs)
def _calc_amp_rewards(self, amp_obs):
disc_r = self._calc_disc_rewards(amp_obs)
output = {
'disc_rewards': disc_r
}
return output
def _calc_disc_rewards(self, amp_obs):
with torch.no_grad():
disc_logits = self._eval_disc(amp_obs)
prob = 1 / (1 + torch.exp(-disc_logits))
disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.ppo_device)))
disc_r *= self._disc_reward_scale
return disc_r
def _store_replay_amp_obs(self, amp_obs):
buf_size = self._amp_replay_buffer.get_buffer_size()
buf_total_count = self._amp_replay_buffer.get_total_count()
if (buf_total_count > buf_size):
keep_probs = to_torch(np.array([self._amp_replay_keep_prob] * amp_obs.shape[0]), device=self.ppo_device)
keep_mask = torch.bernoulli(keep_probs) == 1.0
amp_obs = amp_obs[keep_mask]
self._amp_replay_buffer.store({'amp_obs': amp_obs})
return
def _record_train_batch_info(self, batch_dict, train_info):
train_info['disc_rewards'] = batch_dict['disc_rewards']
return
def _log_train_info(self, train_info, frame):
super()._log_train_info(train_info, frame)
self.writer.add_scalar('losses/disc_loss', torch_ext.mean_list(train_info['disc_loss']).item(), frame)
self.writer.add_scalar('info/disc_agent_acc', torch_ext.mean_list(train_info['disc_agent_acc']).item(), frame)
self.writer.add_scalar('info/disc_demo_acc', torch_ext.mean_list(train_info['disc_demo_acc']).item(), frame)
self.writer.add_scalar('info/disc_agent_logit', torch_ext.mean_list(train_info['disc_agent_logit']).item(), frame)
self.writer.add_scalar('info/disc_demo_logit', torch_ext.mean_list(train_info['disc_demo_logit']).item(), frame)
self.writer.add_scalar('info/disc_grad_penalty', torch_ext.mean_list(train_info['disc_grad_penalty']).item(), frame)
self.writer.add_scalar('info/disc_logit_loss', torch_ext.mean_list(train_info['disc_logit_loss']).item(), frame)
disc_reward_std, disc_reward_mean = torch.std_mean(train_info['disc_rewards'])
self.writer.add_scalar('info/disc_reward_mean', disc_reward_mean.item(), frame)
self.writer.add_scalar('info/disc_reward_std', disc_reward_std.item(), frame)
return
def _amp_debug(self, info):
with torch.no_grad():
amp_obs = info['amp_obs']
amp_obs = amp_obs[0:1]
disc_pred = self._eval_disc(amp_obs)
amp_rewards = self._calc_amp_rewards(amp_obs)
disc_reward = amp_rewards['disc_rewards']
disc_pred = disc_pred.detach().cpu().numpy()[0, 0]
disc_reward = disc_reward.cpu().numpy()[0, 0]
print("disc_pred: ", disc_pred, disc_reward)
return | 23,314 | Python | 40.933453 | 157 | 0.6035 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_players.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.player import BasePlayer
import isaacgymenvs.learning.common_player as common_player
class AMPPlayerContinuous(common_player.CommonPlayer):
def __init__(self, params):
config = params['config']
self._normalize_amp_input = config.get('normalize_amp_input', True)
self._disc_reward_scale = config['disc_reward_scale']
self._print_disc_prediction = config.get('print_disc_prediction', False)
super().__init__(params)
return
def restore(self, fn):
super().restore(fn)
if self._normalize_amp_input:
checkpoint = torch_ext.load_checkpoint(fn)
self._amp_input_mean_std.load_state_dict(checkpoint['amp_input_mean_std'])
return
def _build_net(self, config):
super()._build_net(config)
if self._normalize_amp_input:
self._amp_input_mean_std = RunningMeanStd(config['amp_input_shape']).to(self.device)
self._amp_input_mean_std.eval()
return
def _post_step(self, info):
super()._post_step(info)
if self._print_disc_prediction:
self._amp_debug(info)
return
def _build_net_config(self):
config = super()._build_net_config()
if (hasattr(self, 'env')):
config['amp_input_shape'] = self.env.amp_observation_space.shape
else:
config['amp_input_shape'] = self.env_info['amp_observation_space']
return config
def _amp_debug(self, info):
with torch.no_grad():
amp_obs = info['amp_obs']
amp_obs = amp_obs[0:1]
disc_pred = self._eval_disc(amp_obs.to(self.device))
amp_rewards = self._calc_amp_rewards(amp_obs.to(self.device))
disc_reward = amp_rewards['disc_rewards']
disc_pred = disc_pred.detach().cpu().numpy()[0, 0]
disc_reward = disc_reward.cpu().numpy()[0, 0]
print("disc_pred: ", disc_pred, disc_reward)
return
def _preproc_amp_obs(self, amp_obs):
if self._normalize_amp_input:
amp_obs = self._amp_input_mean_std(amp_obs)
return amp_obs
def _eval_disc(self, amp_obs):
proc_amp_obs = self._preproc_amp_obs(amp_obs)
return self.model.a2c_network.eval_disc(proc_amp_obs)
def _calc_amp_rewards(self, amp_obs):
disc_r = self._calc_disc_rewards(amp_obs)
output = {
'disc_rewards': disc_r
}
return output
def _calc_disc_rewards(self, amp_obs):
with torch.no_grad():
disc_logits = self._eval_disc(amp_obs)
prob = 1.0 / (1.0 + torch.exp(-disc_logits))
disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.device)))
disc_r *= self._disc_reward_scale
return disc_r
| 4,535 | Python | 38.103448 | 98 | 0.657773 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/common_agent.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
from datetime import datetime
from gym import spaces
import numpy as np
import os
import time
import yaml
from rl_games.algos_torch import a2c_continuous
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import a2c_common
from rl_games.common import datasets
from rl_games.common import schedulers
from rl_games.common import vecenv
import torch
from torch import optim
from . import amp_datasets as amp_datasets
from tensorboardX import SummaryWriter
class CommonAgent(a2c_continuous.A2CAgent):
def __init__(self, base_name, params):
a2c_common.A2CBase.__init__(self, base_name, params)
config = params['config']
self._load_config_params(config)
self.is_discrete = False
self._setup_action_space()
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
self.clip_actions = config.get('clip_actions', True)
self.network_path = self.nn_dir
net_config = self._build_net_config()
self.model = self.network.build(net_config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : torch_ext.shape_whc_to_cwh(self.state_shape),
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = amp_datasets.AMPDataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
self.algo_observer.after_init(self)
return
def init_tensors(self):
super().init_tensors()
self.experience_buffer.tensor_dict['next_obses'] = torch.zeros_like(self.experience_buffer.tensor_dict['obses'])
self.experience_buffer.tensor_dict['next_values'] = torch.zeros_like(self.experience_buffer.tensor_dict['values'])
self.tensor_list += ['next_obses']
return
def train(self):
self.init_tensors()
self.last_mean_rewards = -100500
start_time = time.time()
total_time = 0
rep_count = 0
self.frame = 0
self.obs = self.env_reset()
self.curr_frames = self.batch_size_envs
self.model_output_file = os.path.join(self.network_path,
self.config['name'] + '_{date:%d-%H-%M-%S}'.format(date=datetime.now()))
self._init_train()
# global rank of the GPU
# multi-gpu training is not currently supported for AMP
self.global_rank = int(os.getenv("RANK", "0"))
while True:
epoch_num = self.update_epoch()
train_info = self.train_epoch()
sum_time = train_info['total_time']
total_time += sum_time
frame = self.frame
if self.global_rank == 0:
scaled_time = sum_time
scaled_play_time = train_info['play_time']
curr_frames = self.curr_frames
self.frame += curr_frames
if self.print_stats:
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
self.writer.add_scalar('performance/total_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self._log_train_info(train_info, frame)
self.algo_observer.after_print_stats(frame, epoch_num, total_time)
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
for i in range(self.value_size):
self.writer.add_scalar('rewards/frame'.format(i), mean_rewards[i], frame)
self.writer.add_scalar('rewards/iter'.format(i), mean_rewards[i], epoch_num)
self.writer.add_scalar('rewards/time'.format(i), mean_rewards[i], total_time)
self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
if self.has_self_play_config:
self.self_play_manager.update(self)
if self.save_freq > 0:
if (epoch_num % self.save_freq == 0):
self.save(self.model_output_file + "_" + str(epoch_num))
if epoch_num > self.max_epochs:
self.save(self.model_output_file)
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
return
def train_epoch(self):
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self.set_train()
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
if self.has_central_value:
self.train_central_value()
train_info = None
if self.is_rnn:
frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement())
print(frames_mask_ratio)
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
curr_train_info = self.train_actor_critic(self.dataset[i])
print(type(curr_train_info))
if self.schedule_type == 'legacy':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item())
self.update_lr(self.last_lr)
if (train_info is None):
train_info = dict()
for k, v in curr_train_info.items():
train_info[k] = [v]
else:
for k, v in curr_train_info.items():
train_info[k].append(v)
av_kls = torch_ext.mean_list(train_info['kl'])
if self.schedule_type == 'standard':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
if self.schedule_type == 'standard_epoch':
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
train_info['play_time'] = play_time
train_info['update_time'] = update_time
train_info['total_time'] = total_time
self._record_train_batch_info(batch_dict, train_info)
return train_info
def play_steps(self):
self.set_eval()
epinfos = []
update_list = self.update_list
for n in range(self.horizon_length):
self.obs, done_env_ids = self._env_reset_done()
self.experience_buffer.update_data('obses', n, self.obs['obs'])
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
for k in update_list:
self.experience_buffer.update_data(k, n, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data('states', n, self.obs['states'])
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
shaped_rewards = self.rewards_shaper(rewards)
self.experience_buffer.update_data('rewards', n, shaped_rewards)
self.experience_buffer.update_data('next_obses', n, self.obs['obs'])
self.experience_buffer.update_data('dones', n, self.dones)
terminated = infos['terminate'].float()
terminated = terminated.unsqueeze(-1)
next_vals = self._eval_critic(self.obs)
next_vals *= (1.0 - terminated)
self.experience_buffer.update_data('next_values', n, next_vals)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.algo_observer.process_infos(infos, done_indices)
not_dones = 1.0 - self.dones.float()
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_next_values = self.experience_buffer.tensor_dict['next_values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values)
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns)
batch_dict['played_frames'] = self.batch_size
return batch_dict
def calc_gradients(self, input_dict):
self.set_train()
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['value']
entropy = res_dict['entropy']
mu = res_dict['mu']
sigma = res_dict['sigma']
a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip)
a_loss = a_info['actor_loss']
c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
c_loss = c_info['critic_loss']
b_loss = self.bound_loss(mu)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of the year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = not self.is_rnn
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.train_result = {
'entropy': entropy,
'kl': kl_dist,
'last_lr': self.last_lr,
'lr_mul': lr_mul,
'b_loss': b_loss
}
self.train_result.update(a_info)
self.train_result.update(c_info)
return
def discount_values(self, mb_fdones, mb_values, mb_rewards, mb_next_values):
lastgaelam = 0
mb_advs = torch.zeros_like(mb_rewards)
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - mb_fdones[t]
not_done = not_done.unsqueeze(1)
delta = mb_rewards[t] + self.gamma * mb_next_values[t] - mb_values[t]
lastgaelam = delta + self.gamma * self.tau * not_done * lastgaelam
mb_advs[t] = lastgaelam
return mb_advs
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.ppo_device))**2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.ppo_device))**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss
def _load_config_params(self, config):
self.last_lr = config['learning_rate']
return
def _build_net_config(self):
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size', 1),
'normalize_value' : self.normalize_value,
'normalize_input': self.normalize_input,
}
return config
def _setup_action_space(self):
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
# todo introduce device instead of cuda()
self.actions_low = torch.from_numpy(action_space.low.copy()).float().to(self.ppo_device)
self.actions_high = torch.from_numpy(action_space.high.copy()).float().to(self.ppo_device)
return
def _init_train(self):
return
def _env_reset_done(self):
obs, done_env_ids = self.vec_env.reset_done()
return self.obs_to_tensors(obs), done_env_ids
def _eval_critic(self, obs_dict):
self.model.eval()
obs = obs_dict['obs']
processed_obs = self._preproc_obs(obs)
if self.normalize_input:
processed_obs = self.model.norm_obs(processed_obs)
value = self.model.a2c_network.eval_critic(processed_obs)
if self.normalize_value:
value = self.value_mean_std(value, True)
return value
def _actor_loss(self, old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip):
clip_frac = None
if (self.ppo):
ratio = torch.exp(old_action_log_probs_batch - action_log_probs)
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip,
1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
clipped = torch.abs(ratio - 1.0) > curr_e_clip
clip_frac = torch.mean(clipped.float())
clip_frac = clip_frac.detach()
else:
a_loss = (action_log_probs * advantage)
info = {
'actor_loss': a_loss,
'actor_clip_frac': clip_frac
}
return info
def _critic_loss(self, value_preds_batch, values, curr_e_clip, return_batch, clip_value):
if clip_value:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip)
value_losses = (values - return_batch)**2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - values)**2
info = {
'critic_loss': c_loss
}
return info
def _record_train_batch_info(self, batch_dict, train_info):
return
def _log_train_info(self, train_info, frame):
self.writer.add_scalar('performance/update_time', train_info['update_time'], frame)
self.writer.add_scalar('performance/play_time', train_info['play_time'], frame)
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(train_info['actor_loss']).item(), frame)
self.writer.add_scalar('losses/c_loss', torch_ext.mean_list(train_info['critic_loss']).item(), frame)
self.writer.add_scalar('losses/bounds_loss', torch_ext.mean_list(train_info['b_loss']).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(train_info['entropy']).item(), frame)
self.writer.add_scalar('info/last_lr', train_info['last_lr'][-1] * train_info['lr_mul'][-1], frame)
self.writer.add_scalar('info/lr_mul', train_info['lr_mul'][-1], frame)
self.writer.add_scalar('info/e_clip', self.e_clip * train_info['lr_mul'][-1], frame)
self.writer.add_scalar('info/clip_frac', torch_ext.mean_list(train_info['actor_clip_frac']).item(), frame)
self.writer.add_scalar('info/kl', torch_ext.mean_list(train_info['kl']).item(), frame)
return
| 21,575 | Python | 39.863636 | 157 | 0.585724 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/common_player.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from rl_games.algos_torch import players
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.player import BasePlayer
class CommonPlayer(players.PpoPlayerContinuous):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config['normalize_value']
self._setup_action_space()
self.mask = [False]
net_config = self._build_net_config()
self._build_net(net_config)
return
def run(self):
n_games = self.games_num
render = self.render_env
n_game_life = self.n_game_life
is_determenistic = self.is_deterministic
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
games_played = 0
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
op_agent = getattr(self.env, "create_agent", None)
if op_agent:
agent_inited = True
if has_masks_func:
has_masks = self.env.has_action_mask()
need_init_rnn = self.is_rnn
for _ in range(n_games):
if games_played >= n_games:
break
obs_dict = self.env_reset(self.env)
batch_size = 1
batch_size = self.get_batch_size(obs_dict['obs'], batch_size)
if need_init_rnn:
self.init_rnn()
need_init_rnn = False
cr = torch.zeros(batch_size, dtype=torch.float32)
steps = torch.zeros(batch_size, dtype=torch.float32)
print_game_res = False
for n in range(self.max_steps):
obs_dict, done_env_ids = self._env_reset_done()
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(obs_dict, masks, is_determenistic)
else:
action = self.get_action(obs_dict, is_determenistic)
obs_dict, r, done, info = self.env_step(self.env, action)
cr += r
steps += 1
self._post_step(info)
if render:
self.env.render(mode = 'human')
time.sleep(self.render_sleep)
all_done_indices = done.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
done_count = len(done_indices)
games_played += done_count
if done_count > 0:
if self.is_rnn:
for s in self.states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
cur_rewards = cr[done_indices].sum().item()
cur_steps = steps[done_indices].sum().item()
cr = cr * (1.0 - done.float())
steps = steps * (1.0 - done.float())
sum_rewards += cur_rewards
sum_steps += cur_steps
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
print_game_res = True
game_res = info.get('battle_won', 0.5)
if 'scores' in info:
print_game_res = True
game_res = info.get('scores', 0.5)
if self.print_stats:
if print_game_res:
print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count, 'w:', game_res)
else:
print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count)
sum_game_res += game_res
if batch_size//self.num_agents == 1 or games_played >= n_games:
break
print(sum_rewards)
if print_game_res:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life)
else:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life)
return
def obs_to_torch(self, obs):
obs = super().obs_to_torch(obs)
obs_dict = {
'obs': obs
}
return obs_dict
def get_action(self, obs_dict, is_determenistic = False):
output = super().get_action(obs_dict['obs'], is_determenistic)
return output
def _build_net(self, config):
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
return
def _env_reset_done(self):
obs, done_env_ids = self.env.reset_done()
return self.obs_to_torch(obs), done_env_ids
def _post_step(self, info):
return
def _build_net_config(self):
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size', 1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
return config
def _setup_action_space(self):
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
return | 7,570 | Python | 37.627551 | 181 | 0.571731 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_hand.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from isaacgymenvs.tasks.base.vec_task import VecTask
class AllegroHand(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self.cfg["env"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block", "egg", "pen"]
self.ignore_z = (self.object_type == "pen")
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
"pen": "mjcf/open_ai_assets/hand/pen.xml"
}
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"])
self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"])
# can be "full_no_vel", "full", "full_state"
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full_no_vel": 50,
"full": 72,
"full_state": 88
}
self.up_axis = 'z'
self.use_vel_obs = False
self.fingertip_obs = True
self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"]
num_states = 0
if self.asymmetric_obs:
num_states = 88
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = 16
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
# sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
# self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs]
self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0]
self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
print("Num dofs: ", self.num_dofs)
self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
allegro_hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf"
if "asset" in self.cfg["env"]:
asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
allegro_hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", allegro_hand_asset_file)
object_asset_file = self.asset_files_dict[self.object_type]
# load shadow hand_ asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
allegro_hand_asset = self.gym.load_asset(self.sim, asset_root, allegro_hand_asset_file, asset_options)
self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(allegro_hand_asset)
self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(allegro_hand_asset)
self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(allegro_hand_asset)
print("Num dofs: ", self.num_shadow_hand_dofs)
self.num_shadow_hand_actuators = self.num_shadow_hand_dofs
self.actuated_dof_indices = [i for i in range(self.num_shadow_hand_dofs)]
# set shadow_hand dof properties
shadow_hand_dof_props = self.gym.get_asset_dof_properties(allegro_hand_asset)
self.shadow_hand_dof_lower_limits = []
self.shadow_hand_dof_upper_limits = []
self.shadow_hand_dof_default_pos = []
self.shadow_hand_dof_default_vel = []
self.sensors = []
sensor_pose = gymapi.Transform()
for i in range(self.num_shadow_hand_dofs):
self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i])
self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i])
self.shadow_hand_dof_default_pos.append(0.0)
self.shadow_hand_dof_default_vel.append(0.0)
print("Max effort: ", shadow_hand_dof_props['effort'][i])
shadow_hand_dof_props['effort'][i] = 0.5
shadow_hand_dof_props['stiffness'][i] = 3
shadow_hand_dof_props['damping'][i] = 0.1
shadow_hand_dof_props['friction'][i] = 0.01
shadow_hand_dof_props['armature'][i] = 0.001
self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device)
self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device)
self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device)
self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device)
self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device)
# load manipulated object and goal assets
object_asset_options = gymapi.AssetOptions()
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
shadow_hand_start_pose = gymapi.Transform()
shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx))
shadow_hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi)
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = shadow_hand_start_pose.p.x
pose_dy, pose_dz = -0.2, 0.06
object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy
object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz
if self.object_type == "pen":
object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02
self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = object_start_pose.p + self.goal_displacement
goal_start_pose.p.z -= 0.04
# compute aggregate size
max_agg_bodies = self.num_shadow_hand_bodies + 2
max_agg_shapes = self.num_shadow_hand_shapes + 2
self.allegro_hands = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(allegro_hand_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add hand - collision filter = -1 to use asset collision filters set in mjcf loader
allegro_hand_actor = self.gym.create_actor(env_ptr, allegro_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0)
self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z,
shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, allegro_hand_actor, shadow_hand_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, allegro_hand_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
# add object
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
# add goal object
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.allegro_hands.append(allegro_hand_actor)
object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward(
self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes,
self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,
self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale,
self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty,
self.max_consecutive_successes, self.av_factor, (self.object_type == "pen")
)
self.extras['consecutive_successes'] = self.consecutive_successes.mean()
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs)))
if self.total_resets > 0:
print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets))
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state()
else:
print("Unknown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, 16:23] = self.object_pose
self.obs_buf[:, 23:30] = self.goal_pose
self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 34:50] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
# 2*16 = 32 -16
self.obs_buf[:, 32:39] = self.object_pose
self.obs_buf[:, 39:42] = self.object_linvel
self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 45:52] = self.goal_pose
self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 56:72] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 48
self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
fingertip_obs_start = goal_obs_start + 11 # 72
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 72 + 16 = 88
obs_end = fingertip_obs_start
self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos,
self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits)
self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel
self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 3*self.num_shadow_hand_dofs # 48
self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose
self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose
self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
fingertip_obs_start = goal_obs_start + 11 # 72
# obs_end = 96 + 65 + 30 = 191
# obs_total = obs_end + num_actions = 72 + 16 = 88
obs_end = fingertip_obs_start #+ num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions
def reset_target_pose(self, env_ids, apply_reset=False):
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])
if apply_reset:
goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids, goal_env_ids):
# generate random values
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()
self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \
self.reset_position_noise * rand_floats[:, 0:2]
self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \
self.reset_position_noise * rand_floats[:, self.up_axis_idx]
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
if self.object_type == "pen":
rand_angle_y = torch.tensor(0.3)
new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y,
self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids])
self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot
self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13])
object_indices = torch.unique(torch.cat([self.object_indices[env_ids],
self.goal_object_indices[env_ids],
self.goal_object_indices[goal_env_ids]]).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(object_indices), len(object_indices))
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))
# reset shadow hand
delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos
delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1)
pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta
self.shadow_hand_dof_pos[env_ids, :] = pos
self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2]
self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos
self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos
hand_indices = self.hand_indices[env_ids].to(torch.int32)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.prev_targets),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
def pre_physics_step(self, actions):
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset()
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids, goal_env_ids)
self.actions = actions.clone().to(self.device)
if self.use_relative_control:
targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions,
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,
self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices],
self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices])
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn(
self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])
objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.object_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_hand_reward(
rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes,
max_episode_length: float, object_pos, object_rot, target_pos, target_rot,
dist_reward_scale: float, rot_reward_scale: float, rot_eps: float,
actions, action_penalty_scale: float,
success_tolerance: float, reach_goal_bonus: float, fall_dist: float,
fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool
):
# Distance from the hand to the object
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
if ignore_z_rot:
success_tolerance = 2.0 * success_tolerance
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions ** 2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
# Fall penalty: distance to the goal is larger than a threshold
reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward)
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
timed_out = progress_buf >= max_episode_length - 1
resets = torch.where(timed_out, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
if max_consecutive_successes > 0:
reward = torch.where(timed_out, reward + 0.5 * fall_penalty, reward)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor),
quat_from_angle_axis(rand1 * np.pi, y_unit_tensor))
@torch.jit.script
def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor):
rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor),
quat_from_angle_axis(rand0 * np.pi, z_unit_tensor))
return rot
| 40,972 | Python | 54.897681 | 223 | 0.622157 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ball_balance.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float, tensor_clamp, torch_random_dir_2
from .base.vec_task import VecTask
def _indent_xml(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent_xml(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class BallBalance(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.action_speed_scale = self.cfg["env"]["actionSpeedScale"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
sensors_per_env = 3
actors_per_env = 2
dofs_per_env = 6
bodies_per_env = 7 + 1
# Observations:
# 0:3 - activated DOF positions
# 3:6 - activated DOF velocities
# 6:9 - ball position
# 9:12 - ball linear velocity
# 12:15 - sensor force (same for each sensor)
# 15:18 - sensor torque 1
# 18:21 - sensor torque 2
# 21:24 - sensor torque 3
self.cfg["env"]["numObservations"] = 24
# Actions: target velocities for the 3 actuated DOFs
self.cfg["env"]["numActions"] = 3
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, actors_per_env, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
vec_sensor_tensor = gymtorch.wrap_tensor(self.sensor_tensor).view(self.num_envs, sensors_per_env, 6)
self.root_states = vec_root_tensor
self.tray_positions = vec_root_tensor[..., 0, 0:3]
self.ball_positions = vec_root_tensor[..., 1, 0:3]
self.ball_orientations = vec_root_tensor[..., 1, 3:7]
self.ball_linvels = vec_root_tensor[..., 1, 7:10]
self.ball_angvels = vec_root_tensor[..., 1, 10:13]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.sensor_forces = vec_sensor_tensor[..., 0:3]
self.sensor_torques = vec_sensor_tensor[..., 3:6]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_dof_states = self.dof_states.clone()
self.initial_root_states = vec_root_tensor.clone()
self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(actors_per_env * self.num_envs, dtype=torch.int32, device=self.device).view(self.num_envs, actors_per_env)
self.all_bbot_indices = actors_per_env * torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
# vis
self.axes_geom = gymutil.AxesGeometry(0.2)
def create_sim(self):
self.dt = self.sim_params.dt
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_balance_bot_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_balance_bot_asset(self):
# there is an asset balance_bot.xml, here we override some features.
tray_radius = 0.5
tray_thickness = 0.02
leg_radius = 0.02
leg_outer_offset = tray_radius - 0.1
leg_length = leg_outer_offset - 2 * leg_radius
leg_inner_offset = leg_outer_offset - leg_length / math.sqrt(2)
tray_height = leg_length * math.sqrt(2) + 2 * leg_radius + 0.5 * tray_thickness
root = ET.Element('mujoco')
root.attrib["model"] = "BalanceBot"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
worldbody = ET.SubElement(root, "worldbody")
tray = ET.SubElement(worldbody, "body")
tray.attrib["name"] = "tray"
tray.attrib["pos"] = "%g %g %g" % (0, 0, tray_height)
tray_joint = ET.SubElement(tray, "joint")
tray_joint.attrib["name"] = "root_joint"
tray_joint.attrib["type"] = "free"
tray_geom = ET.SubElement(tray, "geom")
tray_geom.attrib["type"] = "cylinder"
tray_geom.attrib["size"] = "%g %g" % (tray_radius, 0.5 * tray_thickness)
tray_geom.attrib["pos"] = "0 0 0"
tray_geom.attrib["density"] = "100"
leg_angles = [0.0, 2.0 / 3.0 * math.pi, 4.0 / 3.0 * math.pi]
for i in range(len(leg_angles)):
angle = leg_angles[i]
upper_leg_from = gymapi.Vec3()
upper_leg_from.x = leg_outer_offset * math.cos(angle)
upper_leg_from.y = leg_outer_offset * math.sin(angle)
upper_leg_from.z = -leg_radius - 0.5 * tray_thickness
upper_leg_to = gymapi.Vec3()
upper_leg_to.x = leg_inner_offset * math.cos(angle)
upper_leg_to.y = leg_inner_offset * math.sin(angle)
upper_leg_to.z = upper_leg_from.z - leg_length / math.sqrt(2)
upper_leg_pos = (upper_leg_from + upper_leg_to) * 0.5
upper_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.75 * math.pi, angle)
upper_leg = ET.SubElement(tray, "body")
upper_leg.attrib["name"] = "upper_leg" + str(i)
upper_leg.attrib["pos"] = "%g %g %g" % (upper_leg_pos.x, upper_leg_pos.y, upper_leg_pos.z)
upper_leg.attrib["quat"] = "%g %g %g %g" % (upper_leg_quat.w, upper_leg_quat.x, upper_leg_quat.y, upper_leg_quat.z)
upper_leg_geom = ET.SubElement(upper_leg, "geom")
upper_leg_geom.attrib["type"] = "capsule"
upper_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length)
upper_leg_geom.attrib["density"] = "1000"
upper_leg_joint = ET.SubElement(upper_leg, "joint")
upper_leg_joint.attrib["name"] = "upper_leg_joint" + str(i)
upper_leg_joint.attrib["type"] = "hinge"
upper_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length)
upper_leg_joint.attrib["axis"] = "0 1 0"
upper_leg_joint.attrib["limited"] = "true"
upper_leg_joint.attrib["range"] = "-45 45"
lower_leg_pos = gymapi.Vec3(-0.5 * leg_length, 0, 0.5 * leg_length)
lower_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.5 * math.pi, 0)
lower_leg = ET.SubElement(upper_leg, "body")
lower_leg.attrib["name"] = "lower_leg" + str(i)
lower_leg.attrib["pos"] = "%g %g %g" % (lower_leg_pos.x, lower_leg_pos.y, lower_leg_pos.z)
lower_leg.attrib["quat"] = "%g %g %g %g" % (lower_leg_quat.w, lower_leg_quat.x, lower_leg_quat.y, lower_leg_quat.z)
lower_leg_geom = ET.SubElement(lower_leg, "geom")
lower_leg_geom.attrib["type"] = "capsule"
lower_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length)
lower_leg_geom.attrib["density"] = "1000"
lower_leg_joint = ET.SubElement(lower_leg, "joint")
lower_leg_joint.attrib["name"] = "lower_leg_joint" + str(i)
lower_leg_joint.attrib["type"] = "hinge"
lower_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length)
lower_leg_joint.attrib["axis"] = "0 1 0"
lower_leg_joint.attrib["limited"] = "true"
lower_leg_joint.attrib["range"] = "-70 90"
_indent_xml(root)
ET.ElementTree(root).write("balance_bot.xml")
# save some useful robot parameters
self.tray_height = tray_height
self.leg_radius = leg_radius
self.leg_length = leg_length
self.leg_outer_offset = leg_outer_offset
self.leg_angles = leg_angles
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "."
asset_file = "balance_bot.xml"
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
bbot_options = gymapi.AssetOptions()
bbot_options.fix_base_link = False
bbot_options.slices_per_cylinder = 40
bbot_asset = self.gym.load_asset(self.sim, asset_root, asset_file, bbot_options)
# printed view of asset built
# self.gym.debug_print_asset(bbot_asset)
self.num_bbot_dofs = self.gym.get_asset_dof_count(bbot_asset)
bbot_dof_props = self.gym.get_asset_dof_properties(bbot_asset)
self.bbot_dof_lower_limits = []
self.bbot_dof_upper_limits = []
for i in range(self.num_bbot_dofs):
self.bbot_dof_lower_limits.append(bbot_dof_props['lower'][i])
self.bbot_dof_upper_limits.append(bbot_dof_props['upper'][i])
self.bbot_dof_lower_limits = to_torch(self.bbot_dof_lower_limits, device=self.device)
self.bbot_dof_upper_limits = to_torch(self.bbot_dof_upper_limits, device=self.device)
bbot_pose = gymapi.Transform()
bbot_pose.p.z = self.tray_height
# create force sensors attached to the tray body
bbot_tray_idx = self.gym.find_asset_rigid_body_index(bbot_asset, "tray")
for angle in self.leg_angles:
sensor_pose = gymapi.Transform()
sensor_pose.p.x = self.leg_outer_offset * math.cos(angle)
sensor_pose.p.y = self.leg_outer_offset * math.sin(angle)
self.gym.create_asset_force_sensor(bbot_asset, bbot_tray_idx, sensor_pose)
# create ball asset
self.ball_radius = 0.1
ball_options = gymapi.AssetOptions()
ball_options.density = 200
ball_asset = self.gym.create_sphere(self.sim, self.ball_radius, ball_options)
self.envs = []
self.bbot_handles = []
self.obj_handles = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
bbot_handle = self.gym.create_actor(env_ptr, bbot_asset, bbot_pose, "bbot", i, 0, 0)
actuated_dofs = np.array([1, 3, 5])
free_dofs = np.array([0, 2, 4])
dof_props = self.gym.get_actor_dof_properties(env_ptr, bbot_handle)
dof_props['driveMode'][actuated_dofs] = gymapi.DOF_MODE_POS
dof_props['stiffness'][actuated_dofs] = 4000.0
dof_props['damping'][actuated_dofs] = 100.0
dof_props['driveMode'][free_dofs] = gymapi.DOF_MODE_NONE
dof_props['stiffness'][free_dofs] = 0
dof_props['damping'][free_dofs] = 0
self.gym.set_actor_dof_properties(env_ptr, bbot_handle, dof_props)
lower_leg_handles = []
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg0"))
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg1"))
lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg2"))
# create attractors to hold the feet in place
attractor_props = gymapi.AttractorProperties()
attractor_props.stiffness = 5e7
attractor_props.damping = 5e3
attractor_props.axes = gymapi.AXIS_TRANSLATION
for j in range(3):
angle = self.leg_angles[j]
attractor_props.rigid_handle = lower_leg_handles[j]
# attractor world pose to keep the feet in place
attractor_props.target.p.x = self.leg_outer_offset * math.cos(angle)
attractor_props.target.p.z = self.leg_radius
attractor_props.target.p.y = self.leg_outer_offset * math.sin(angle)
# attractor local pose in lower leg body
attractor_props.offset.p.z = 0.5 * self.leg_length
self.gym.create_rigid_body_attractor(env_ptr, attractor_props)
ball_pose = gymapi.Transform()
ball_pose.p.x = 0.2
ball_pose.p.z = 2.0
ball_handle = self.gym.create_actor(env_ptr, ball_asset, ball_pose, "ball", i, 0, 0)
self.obj_handles.append(ball_handle)
# pretty colors
self.gym.set_rigid_body_color(env_ptr, ball_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.99, 0.66, 0.25))
self.gym.set_rigid_body_color(env_ptr, bbot_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.48, 0.65, 0.8))
for j in range(1, 7):
self.gym.set_rigid_body_color(env_ptr, bbot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.15, 0.2, 0.3))
self.envs.append(env_ptr)
self.bbot_handles.append(bbot_handle)
def compute_observations(self):
#print("~!~!~!~! Computing obs")
actuated_dof_indices = torch.tensor([1, 3, 5], device=self.device)
#print(self.dof_states[:, actuated_dof_indices, :])
self.obs_buf[..., 0:3] = self.dof_positions[..., actuated_dof_indices]
self.obs_buf[..., 3:6] = self.dof_velocities[..., actuated_dof_indices]
self.obs_buf[..., 6:9] = self.ball_positions
self.obs_buf[..., 9:12] = self.ball_linvels
self.obs_buf[..., 12:15] = self.sensor_forces[..., 0] / 20 # !!! lousy normalization
self.obs_buf[..., 15:18] = self.sensor_torques[..., 0] / 20 # !!! lousy normalization
self.obs_buf[..., 18:21] = self.sensor_torques[..., 1] / 20 # !!! lousy normalization
self.obs_buf[..., 21:24] = self.sensor_torques[..., 2] / 20 # !!! lousy normalization
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_bbot_reward(
self.tray_positions,
self.ball_positions,
self.ball_linvels,
self.ball_radius,
self.reset_buf, self.progress_buf, self.max_episode_length
)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# reset bbot and ball root states
self.root_states[env_ids] = self.initial_root_states[env_ids]
min_d = 0.001 # min horizontal dist from origin
max_d = 0.5 # max horizontal dist from origin
min_height = 1.0
max_height = 2.0
min_horizontal_speed = 0
max_horizontal_speed = 5
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self.device)
dirs = torch_random_dir_2((num_resets, 1), self.device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self.device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self.device).squeeze()
self.ball_positions[env_ids, 0] = hpos[..., 0]
self.ball_positions[env_ids, 2] = torch_rand_float(min_height, max_height, (num_resets, 1), self.device).squeeze()
self.ball_positions[env_ids, 1] = hpos[..., 1]
self.ball_orientations[env_ids, 0:3] = 0
self.ball_orientations[env_ids, 3] = 1
self.ball_linvels[env_ids, 0] = hvels[..., 0]
self.ball_linvels[env_ids, 2] = vspeeds
self.ball_linvels[env_ids, 1] = hvels[..., 1]
self.ball_angvels[env_ids] = 0
# reset root state for bbots and balls in selected envs
actor_indices = self.all_actor_indices[env_ids].flatten()
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), len(actor_indices))
# reset DOF states for bbots in selected envs
bbot_indices = self.all_bbot_indices[env_ids].flatten()
self.dof_states[env_ids] = self.initial_dof_states[env_ids]
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(bbot_indices), len(bbot_indices))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, _actions):
# resets
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = _actions.to(self.device)
actuated_indices = torch.LongTensor([1, 3, 5])
# update position targets from actions
self.dof_position_targets[..., actuated_indices] += self.dt * self.action_speed_scale * actions
self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets))
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# vis
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
for i in range(self.num_envs):
env = self.envs[i]
bbot_handle = self.bbot_handles[i]
body_handles = []
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg0"))
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg1"))
body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg2"))
for lhandle in body_handles:
lpose = self.gym.get_rigid_transform(env, lhandle)
gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, env, lpose)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_bbot_reward(tray_positions, ball_positions, ball_velocities, ball_radius, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# calculating the norm for ball distance to desired height above the ground plane (i.e. 0.7)
ball_dist = torch.sqrt(ball_positions[..., 0] * ball_positions[..., 0] +
(ball_positions[..., 2] - 0.7) * (ball_positions[..., 2] - 0.7) +
(ball_positions[..., 1]) * ball_positions[..., 1])
ball_speed = torch.sqrt(ball_velocities[..., 0] * ball_velocities[..., 0] +
ball_velocities[..., 1] * ball_velocities[..., 1] +
ball_velocities[..., 2] * ball_velocities[..., 2])
pos_reward = 1.0 / (1.0 + ball_dist)
speed_reward = 1.0 / (1.0 + ball_speed)
reward = pos_reward * speed_reward
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(ball_positions[..., 2] < ball_radius * 1.5, torch.ones_like(reset_buf), reset)
return reward, reset
| 22,414 | Python | 45.991614 | 217 | 0.605559 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal_terrain.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os, time
from isaacgym import gymtorch
from isaacgym import gymapi
from .base.vec_task import VecTask
import torch
from typing import Tuple, Dict
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, normalize, quat_apply, quat_rotate_inverse
from isaacgymenvs.tasks.base.vec_task import VecTask
class AnymalTerrain(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.height_samples = None
self.custom_origins = False
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.init_done = False
# normalization
self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"]
self.height_meas_scale = self.cfg["env"]["learn"]["heightMeasurementScale"]
self.action_scale = self.cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["termination"] = self.cfg["env"]["learn"]["terminalReward"]
self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["lin_vel_z"] = self.cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["ang_vel_xy"] = self.cfg["env"]["learn"]["angularVelocityXYRewardScale"]
self.rew_scales["orient"] = self.cfg["env"]["learn"]["orientationRewardScale"]
self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]
self.rew_scales["joint_acc"] = self.cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["base_height"] = self.cfg["env"]["learn"]["baseHeightRewardScale"]
self.rew_scales["air_time"] = self.cfg["env"]["learn"]["feetAirTimeRewardScale"]
self.rew_scales["collision"] = self.cfg["env"]["learn"]["kneeCollisionRewardScale"]
self.rew_scales["stumble"] = self.cfg["env"]["learn"]["feetStumbleRewardScale"]
self.rew_scales["action_rate"] = self.cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["hip"] = self.cfg["env"]["learn"]["hipRewardScale"]
#command ranges
self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
self.base_init_state = pos + rot + v_lin + v_ang
# default joint positions
self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
# other
self.decimation = self.cfg["env"]["control"]["decimation"]
self.dt = self.decimation * self.cfg["sim"]["dt"]
self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s/ self.dt + 0.5)
self.push_interval = int(self.cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5)
self.allow_knee_contacts = self.cfg["env"]["learn"]["allowKneeContacts"]
self.Kp = self.cfg["env"]["control"]["stiffness"]
self.Kd = self.cfg["env"]["control"]["damping"]
self.curriculum = self.cfg["env"]["terrain"]["curriculum"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.graphics_device_id != -1:
p = self.cfg["env"]["viewer"]["pos"]
lookat = self.cfg["env"]["viewer"]["lookat"]
cam_pos = gymapi.Vec3(p[0], p[1], p[2])
cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
# create some wrapper tensors for different slices
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis
# initialize some data used later on
self.common_step_counter = 0
self.extras = {}
self.noise_scale_vec = self._get_noise_scale_vec(self.cfg)
self.commands = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) # x vel, y vel, yaw vel, heading
self.commands_scale = torch.tensor([self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False,)
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.forward_vec = to_torch([1., 0., 0.], device=self.device).repeat((self.num_envs, 1))
self.torques = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.last_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False)
self.last_dof_vel = torch.zeros_like(self.dof_vel)
self.height_points = self.init_height_points()
self.measured_heights = None
# joint positions offsets
self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.num_actions):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
# reward episode sums
torch_zeros = lambda : torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(),
"orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(),
"air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros()}
self.reset_idx(torch.arange(self.num_envs, device=self.device))
self.init_done = True
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
terrain_type = self.cfg["env"]["terrain"]["terrainType"]
if terrain_type=='plane':
self._create_ground_plane()
elif terrain_type=='trimesh':
self._create_trimesh()
self.custom_origins = True
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _get_noise_scale_vec(self, cfg):
noise_vec = torch.zeros_like(self.obs_buf[0])
self.add_noise = self.cfg["env"]["learn"]["addNoise"]
noise_level = self.cfg["env"]["learn"]["noiseLevel"]
noise_vec[:3] = self.cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale
noise_vec[3:6] = self.cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale
noise_vec[6:9] = self.cfg["env"]["learn"]["gravityNoise"] * noise_level
noise_vec[9:12] = 0. # commands
noise_vec[12:24] = self.cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale
noise_vec[24:36] = self.cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale
noise_vec[36:176] = self.cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale
noise_vec[176:188] = 0. # previous actions
return noise_vec
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"]
plane_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"]
plane_params.restitution = self.cfg["env"]["terrain"]["restitution"]
self.gym.add_ground(self.sim, plane_params)
def _create_trimesh(self):
self.terrain = Terrain(self.cfg["env"]["terrain"], num_robots=self.num_envs)
tm_params = gymapi.TriangleMeshParams()
tm_params.nb_vertices = self.terrain.vertices.shape[0]
tm_params.nb_triangles = self.terrain.triangles.shape[0]
tm_params.transform.p.x = -self.terrain.border_size
tm_params.transform.p.y = -self.terrain.border_size
tm_params.transform.p.z = 0.0
tm_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"]
tm_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"]
tm_params.restitution = self.cfg["env"]["terrain"]["restitution"]
self.gym.add_triangle_mesh(self.sim, self.terrain.vertices.flatten(order='C'), self.terrain.triangles.flatten(order='C'), tm_params)
self.height_samples = torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device)
def _create_envs(self, num_envs, spacing, num_per_row):
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = self.cfg["env"]["urdfAsset"]["file"]
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
asset_options.collapse_fixed_joints = True
asset_options.replace_cylinder_with_capsule = True
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"]
asset_options.density = 0.001
asset_options.angular_damping = 0.0
asset_options.linear_damping = 0.0
asset_options.armature = 0.0
asset_options.thickness = 0.01
asset_options.disable_gravity = False
anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(anymal_asset)
self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset)
# prepare friction randomization
rigid_shape_prop = self.gym.get_asset_rigid_shape_properties(anymal_asset)
friction_range = self.cfg["env"]["learn"]["frictionRange"]
num_buckets = 100
friction_buckets = torch_rand_float(friction_range[0], friction_range[1], (num_buckets,1), device=self.device)
self.base_init_state = to_torch(self.base_init_state, device=self.device, requires_grad=False)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*self.base_init_state[:3])
body_names = self.gym.get_asset_rigid_body_names(anymal_asset)
self.dof_names = self.gym.get_asset_dof_names(anymal_asset)
foot_name = self.cfg["env"]["urdfAsset"]["footName"]
knee_name = self.cfg["env"]["urdfAsset"]["kneeName"]
feet_names = [s for s in body_names if foot_name in s]
self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False)
knee_names = [s for s in body_names if knee_name in s]
self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False)
self.base_index = 0
dof_props = self.gym.get_asset_dof_properties(anymal_asset)
# env origins
self.env_origins = torch.zeros(self.num_envs, 3, device=self.device, requires_grad=False)
if not self.curriculum: self.cfg["env"]["terrain"]["maxInitMapLevel"] = self.cfg["env"]["terrain"]["numLevels"] - 1
self.terrain_levels = torch.randint(0, self.cfg["env"]["terrain"]["maxInitMapLevel"]+1, (self.num_envs,), device=self.device)
self.terrain_types = torch.randint(0, self.cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device)
if self.custom_origins:
self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float)
spacing = 0.
env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
env_upper = gymapi.Vec3(spacing, spacing, spacing)
self.anymal_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_handle = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row)
if self.custom_origins:
self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]]
pos = self.env_origins[i].clone()
pos[:2] += torch_rand_float(-1., 1., (2, 1), device=self.device).squeeze(1)
start_pose.p = gymapi.Vec3(*pos)
for s in range(len(rigid_shape_prop)):
rigid_shape_prop[s].friction = friction_buckets[i % num_buckets]
self.gym.set_asset_rigid_shape_properties(anymal_asset, rigid_shape_prop)
anymal_handle = self.gym.create_actor(env_handle, anymal_asset, start_pose, "anymal", i, 0, 0)
self.gym.set_actor_dof_properties(env_handle, anymal_handle, dof_props)
self.envs.append(env_handle)
self.anymal_handles.append(anymal_handle)
for i in range(len(feet_names)):
self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i])
for i in range(len(knee_names)):
self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i])
self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base")
def check_termination(self):
self.reset_buf = torch.norm(self.contact_forces[:, self.base_index, :], dim=1) > 1.
if not self.allow_knee_contacts:
knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1.
self.reset_buf |= torch.any(knee_contact, dim=1)
self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
def compute_observations(self):
self.measured_heights = self.get_heights()
heights = torch.clip(self.root_states[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.) * self.height_meas_scale
self.obs_buf = torch.cat(( self.base_lin_vel * self.lin_vel_scale,
self.base_ang_vel * self.ang_vel_scale,
self.projected_gravity,
self.commands[:, :3] * self.commands_scale,
self.dof_pos * self.dof_pos_scale,
self.dof_vel * self.dof_vel_scale,
heights,
self.actions
), dim=-1)
def compute_reward(self):
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * self.rew_scales["ang_vel_z"]
# other base velocity penalties
rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"]
# orientation penalty
rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"]
# base height penalty
rew_base_height = torch.square(self.root_states[:, 2] - 0.52) * self.rew_scales["base_height"] # TODO add target base height to cfg
# torque penalty
rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"]
# joint acc penalty
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"]
# collision penalty
knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1.
rew_collision = torch.sum(knee_contact, dim=1) * self.rew_scales["collision"] # sum vs any ?
# stumbling penalty
stumble = (torch.norm(self.contact_forces[:, self.feet_indices, :2], dim=2) > 5.) * (torch.abs(self.contact_forces[:, self.feet_indices, 2]) < 1.)
rew_stumble = torch.sum(stumble, dim=1) * self.rew_scales["stumble"]
# action rate penalty
rew_action_rate = torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
# air time reward
# contact = torch.norm(contact_forces[:, feet_indices, :], dim=2) > 1.
contact = self.contact_forces[:, self.feet_indices, 2] > 1.
first_contact = (self.feet_air_time > 0.) * contact
self.feet_air_time += self.dt
rew_airTime = torch.sum((self.feet_air_time - 0.5) * first_contact, dim=1) * self.rew_scales["air_time"] # reward only on first contact with the ground
rew_airTime *= torch.norm(self.commands[:, :2], dim=1) > 0.1 #no reward for zero command
self.feet_air_time *= ~contact
# cosmetic penalty for hip motion
rew_hip = torch.sum(torch.abs(self.dof_pos[:, [0, 3, 6, 9]] - self.default_dof_pos[:, [0, 3, 6, 9]]), dim=1)* self.rew_scales["hip"]
# total reward
self.rew_buf = rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height +\
rew_torque + rew_joint_acc + rew_collision + rew_action_rate + rew_airTime + rew_hip + rew_stumble
self.rew_buf = torch.clip(self.rew_buf, min=0., max=None)
# add termination reward
self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf
# log episode reward sums
self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy
self.episode_sums["ang_vel_z"] += rew_ang_vel_z
self.episode_sums["lin_vel_z"] += rew_lin_vel_z
self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy
self.episode_sums["orient"] += rew_orient
self.episode_sums["torques"] += rew_torque
self.episode_sums["joint_acc"] += rew_joint_acc
self.episode_sums["collision"] += rew_collision
self.episode_sums["stumble"] += rew_stumble
self.episode_sums["action_rate"] += rew_action_rate
self.episode_sums["air_time"] += rew_airTime
self.episode_sums["base_height"] += rew_base_height
self.episode_sums["hip"] += rew_hip
def reset_idx(self, env_ids):
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
if self.custom_origins:
self.update_terrain_level(env_ids)
self.root_states[env_ids] = self.base_init_state
self.root_states[env_ids, :3] += self.env_origins[env_ids]
self.root_states[env_ids, :2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device)
else:
self.root_states[env_ids] = self.base_init_state
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.commands[env_ids, 0] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids, 1] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids, 3] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(1) # set small commands to zero
self.last_actions[env_ids] = 0.
self.last_dof_vel[env_ids] = 0.
self.feet_air_time[env_ids] = 0.
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]['rew_' + key] = torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s
self.episode_sums[key][env_ids] = 0.
self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float())
def update_terrain_level(self, env_ids):
if not self.init_done or not self.curriculum:
# don't change on initial reset
return
distance = torch.norm(self.root_states[env_ids, :2] - self.env_origins[env_ids, :2], dim=1)
self.terrain_levels[env_ids] -= 1 * (distance < torch.norm(self.commands[env_ids, :2])*self.max_episode_length_s*0.25)
self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2)
self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows
self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]]
def push_robots(self):
self.root_states[:, 7:9] = torch_rand_float(-1., 1., (self.num_envs, 2), device=self.device) # lin vel x/y
self.gym.set_actor_root_state_tensor(self.sim, gymtorch.unwrap_tensor(self.root_states))
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
for i in range(self.decimation):
torques = torch.clip(self.Kp*(self.action_scale*self.actions + self.default_dof_pos - self.dof_pos) - self.Kd*self.dof_vel,
-80., 80.)
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(torques))
self.torques = torques.view(self.torques.shape)
self.gym.simulate(self.sim)
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
self.gym.refresh_dof_state_tensor(self.sim)
def post_physics_step(self):
# self.gym.refresh_dof_state_tensor(self.sim) # done in step
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.progress_buf += 1
self.randomize_buf += 1
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_quat = self.root_states[:, 3:7]
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 7:10])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 10:13])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.)
# compute observations, rewards, resets, ...
self.check_termination()
self.compute_reward()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
if self.viewer and self.enable_viewer_sync and self.debug_viz:
# draw height lines
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
sphere_geom = gymutil.WireframeSphereGeometry(0.02, 4, 4, None, color=(1, 1, 0))
for i in range(self.num_envs):
base_pos = (self.root_states[i, :3]).cpu().numpy()
heights = self.measured_heights[i].cpu().numpy()
height_points = quat_apply_yaw(self.base_quat[i].repeat(heights.shape[0]), self.height_points[i]).cpu().numpy()
for j in range(heights.shape[0]):
x = height_points[j, 0] + base_pos[0]
y = height_points[j, 1] + base_pos[1]
z = heights[j]
sphere_pose = gymapi.Transform(gymapi.Vec3(x, y, z), r=None)
gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], sphere_pose)
def init_height_points(self):
# 1mx1.6m rectangle (without center line)
y = 0.1 * torch.tensor([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False) # 10-50cm on each side
x = 0.1 * torch.tensor([-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False) # 20-80cm on each side
grid_x, grid_y = torch.meshgrid(x, y)
self.num_height_points = grid_x.numel()
points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False)
points[:, :, 0] = grid_x.flatten()
points[:, :, 1] = grid_y.flatten()
return points
def get_heights(self, env_ids=None):
if self.cfg["env"]["terrain"]["terrainType"] == 'plane':
return torch.zeros(self.num_envs, self.num_height_points, device=self.device, requires_grad=False)
elif self.cfg["env"]["terrain"]["terrainType"] == 'none':
raise NameError("Can't measure height with terrain type 'none'")
if env_ids:
points = quat_apply_yaw(self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]) + (self.root_states[env_ids, :3]).unsqueeze(1)
else:
points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (self.root_states[:, :3]).unsqueeze(1)
points += self.terrain.border_size
points = (points/self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0]-2)
py = torch.clip(py, 0, self.height_samples.shape[1]-2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px+1, py+1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
# terrain generator
from isaacgym.terrain_utils import *
class Terrain:
def __init__(self, cfg, num_robots) -> None:
self.type = cfg["terrainType"]
if self.type in ["none", 'plane']:
return
self.horizontal_scale = 0.1
self.vertical_scale = 0.005
self.border_size = 20
self.num_per_env = 2
self.env_length = cfg["mapLength"]
self.env_width = cfg["mapWidth"]
self.proportions = [np.sum(cfg["terrainProportions"][:i+1]) for i in range(len(cfg["terrainProportions"]))]
self.env_rows = cfg["numLevels"]
self.env_cols = cfg["numTerrains"]
self.num_maps = self.env_rows * self.env_cols
self.num_per_env = int(num_robots / self.num_maps)
self.env_origins = np.zeros((self.env_rows, self.env_cols, 3))
self.width_per_env_pixels = int(self.env_width / self.horizontal_scale)
self.length_per_env_pixels = int(self.env_length / self.horizontal_scale)
self.border = int(self.border_size/self.horizontal_scale)
self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border
self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border
self.height_field_raw = np.zeros((self.tot_rows , self.tot_cols), dtype=np.int16)
if cfg["curriculum"]:
self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows)
else:
self.randomized_terrain()
self.heightsamples = self.height_field_raw
self.vertices, self.triangles = convert_heightfield_to_trimesh(self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"])
def randomized_terrain(self):
for k in range(self.num_maps):
# Env coordinates in the world
(i, j) = np.unravel_index(k, (self.env_rows, self.env_cols))
# Heightfield coordinate system from now on
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
terrain = SubTerrain("terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale)
choice = np.random.uniform(0, 1)
if choice < 0.1:
if np.random.choice([0, 1]):
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2)
else:
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
elif choice < 0.6:
# step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18])
step_height = np.random.choice([-0.15, 0.15])
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.)
elif choice < 1.:
discrete_obstacles_terrain(terrain, 0.15, 1., 2., 40, platform_size=3.)
self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length/2. - 1) / self.horizontal_scale)
x2 = int((self.env_length/2. + 1) / self.horizontal_scale)
y1 = int((self.env_width/2. - 1) / self.horizontal_scale)
y2 = int((self.env_width/2. + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
def curiculum(self, num_robots, num_terrains, num_levels):
num_robots_per_map = int(num_robots / num_terrains)
left_over = num_robots % num_terrains
idx = 0
for j in range(num_terrains):
for i in range(num_levels):
terrain = SubTerrain("terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale)
difficulty = i / num_levels
choice = j / num_terrains
slope = difficulty * 0.4
step_height = 0.05 + 0.175 * difficulty
discrete_obstacles_height = 0.025 + difficulty * 0.15
stepping_stones_size = 2 - 1.8 * difficulty
if choice < self.proportions[0]:
if choice < 0.05:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
elif choice < self.proportions[1]:
if choice < 0.15:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2)
elif choice < self.proportions[3]:
if choice<self.proportions[2]:
step_height *= -1
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.)
elif choice < self.proportions[4]:
discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1., 2., 40, platform_size=3.)
else:
stepping_stones_terrain(terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0., platform_size=3.)
# Heightfield coordinate system
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw
robots_in_map = num_robots_per_map
if j < left_over:
robots_in_map +=1
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length/2. - 1) / self.horizontal_scale)
x2 = int((self.env_length/2. + 1) / self.horizontal_scale)
y1 = int((self.env_width/2. - 1) / self.horizontal_scale)
y2 = int((self.env_width/2. + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
@torch.jit.script
def quat_apply_yaw(quat, vec):
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, :2] = 0.
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def wrap_to_pi(angles):
angles %= 2*np.pi
angles -= 2*np.pi * (angles > np.pi)
return angles
| 38,280 | Python | 54.640988 | 217 | 0.610789 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/trifinger.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul
from collections import OrderedDict
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
from isaacgymenvs.utils.torch_jit_utils import *
from isaacgymenvs.tasks.base.vec_task import VecTask
from types import SimpleNamespace
from collections import deque
from typing import Deque, Dict, Tuple, Union
# python
import enum
import numpy as np
# ################### #
# Dimensions of robot #
# ################### #
class TrifingerDimensions(enum.Enum):
"""
Dimensions of the tri-finger robot.
Note: While it may not seem necessary for tri-finger robot since it is fixed base, for floating
base systems having this dimensions class is useful.
"""
# general state
# cartesian position + quaternion orientation
PoseDim = 7,
# linear velocity + angular velcoity
VelocityDim = 6
# state: pose + velocity
StateDim = 13
# force + torque
WrenchDim = 6
# for robot
# number of fingers
NumFingers = 3
# for three fingers
JointPositionDim = 9
JointVelocityDim = 9
JointTorqueDim = 9
# generalized coordinates
GeneralizedCoordinatesDim = JointPositionDim
GeneralizedVelocityDim = JointVelocityDim
# for objects
ObjectPoseDim = 7
ObjectVelocityDim = 6
# ################# #
# Different objects #
# ################# #
# radius of the area
ARENA_RADIUS = 0.195
class CuboidalObject:
"""
Fields for a cuboidal object.
@note Motivation for this class is that if domain randomization is performed over the
size of the cuboid, then its attributes are automatically updated as well.
"""
# 3D radius of the cuboid
radius_3d: float
# distance from wall to the center
max_com_distance_to_center: float
# minimum and mximum height for spawning the object
min_height: float
max_height = 0.1
NumKeypoints = 8
ObjectPositionDim = 3
KeypointsCoordsDim = NumKeypoints * ObjectPositionDim
def __init__(self, size: Union[float, Tuple[float, float, float]]):
"""Initialize the cuboidal object.
Args:
size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that
object is a cube.
"""
# decide the size depedning on input type
if isinstance(size, float):
self._size = (size, size, size)
else:
self._size = size
# compute remaining attributes
self.__compute()
"""
Properties
"""
@property
def size(self) -> Tuple[float, float, float]:
"""
Returns the dimensions of the cuboid object (x, y, z) in meters.
"""
return self._size
"""
Configurations
"""
@size.setter
def size(self, size: Union[float, Tuple[float, float, float]]):
""" Set size of the object.
Args:
size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed
that object is a cube.
"""
# decide the size depedning on input type
if isinstance(size, float):
self._size = (size, size, size)
else:
self._size = size
# compute attributes
self.__compute()
"""
Private members
"""
def __compute(self):
"""Compute the attributes for the object.
"""
# compute 3D radius of the cuboid
max_len = max(self._size)
self.radius_3d = max_len * np.sqrt(3) / 2
# compute distance from wall to the center
self.max_com_distance_to_center = ARENA_RADIUS - self.radius_3d
# minimum height for spawning the object
self.min_height = self._size[2] / 2
class Trifinger(VecTask):
# constants
# directory where assets for the simulator are present
_trifinger_assets_dir = os.path.join(project_dir, "../", "assets", "trifinger")
# robot urdf (path relative to `_trifinger_assets_dir`)
_robot_urdf_file = "robot_properties_fingers/urdf/pro/trifingerpro.urdf"
# stage urdf (path relative to `_trifinger_assets_dir`)
# _stage_urdf_file = "robot_properties_fingers/urdf/trifinger_stage.urdf"
_table_urdf_file = "robot_properties_fingers/urdf/table_without_border.urdf"
_boundary_urdf_file = "robot_properties_fingers/urdf/high_table_boundary.urdf"
# object urdf (path relative to `_trifinger_assets_dir`)
# TODO: Make object URDF configurable.
_object_urdf_file = "objects/urdf/cube_multicolor_rrc.urdf"
# physical dimensions of the object
# TODO: Make object dimensions configurable.
_object_dims = CuboidalObject(0.065)
# dimensions of the system
_dims = TrifingerDimensions
# Constants for limits
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/trifinger_platform.py#L68
# maximum joint torque (in N-m) applicable on each actuator
_max_torque_Nm = 0.36
# maximum joint velocity (in rad/s) on each actuator
_max_velocity_radps = 10
# History of state: Number of timesteps to save history for
# Note: Currently used only to manage history of object and frame states.
# This can be extended to other observations (as done in ANYmal).
_state_history_len = 2
# buffers to store the simulation data
# goal poses for the object [num. of instances, 7] where 7: (x, y, z, quat)
_object_goal_poses_buf: torch.Tensor
# DOF state of the system [num. of instances, num. of dof, 2] where last index: pos, vel
_dof_state: torch.Tensor
# Rigid body state of the system [num. of instances, num. of bodies, 13] where 13: (x, y, z, quat, v, omega)
_rigid_body_state: torch.Tensor
# Root prim states [num. of actors, 13] where 13: (x, y, z, quat, v, omega)
_actors_root_state: torch.Tensor
# Force-torque sensor array [num. of instances, num. of bodies * wrench]
_ft_sensors_values: torch.Tensor
# DOF position of the system [num. of instances, num. of dof]
_dof_position: torch.Tensor
# DOF velocity of the system [num. of instances, num. of dof]
_dof_velocity: torch.Tensor
# DOF torque of the system [num. of instances, num. of dof]
_dof_torque: torch.Tensor
# Fingertip links state list([num. of instances, num. of fingers, 13]) where 13: (x, y, z, quat, v, omega)
# The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step.
_fingertips_frames_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len)
# Object prim state [num. of instances, 13] where 13: (x, y, z, quat, v, omega)
# The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step.
_object_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len)
# stores the last action output
_last_action: torch.Tensor
# keeps track of the number of goal resets
_successes: torch.Tensor
# keeps track of number of consecutive successes
_consecutive_successes: float
_robot_limits: dict = {
"joint_position": SimpleNamespace(
# matches those on the real robot
low=np.array([-0.33, 0.0, -2.7] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([1.0, 1.57, 0.0] * _dims.NumFingers.value, dtype=np.float32),
default=np.array([0.0, 0.9, -2.0] * _dims.NumFingers.value, dtype=np.float32),
),
"joint_velocity": SimpleNamespace(
low=np.full(_dims.JointVelocityDim.value, -_max_velocity_radps, dtype=np.float32),
high=np.full(_dims.JointVelocityDim.value, _max_velocity_radps, dtype=np.float32),
default=np.zeros(_dims.JointVelocityDim.value, dtype=np.float32),
),
"joint_torque": SimpleNamespace(
low=np.full(_dims.JointTorqueDim.value, -_max_torque_Nm, dtype=np.float32),
high=np.full(_dims.JointTorqueDim.value, _max_torque_Nm, dtype=np.float32),
default=np.zeros(_dims.JointTorqueDim.value, dtype=np.float32),
),
"fingertip_position": SimpleNamespace(
low=np.array([-0.4, -0.4, 0], dtype=np.float32),
high=np.array([0.4, 0.4, 0.5], dtype=np.float32),
),
"fingertip_orientation": SimpleNamespace(
low=-np.ones(4, dtype=np.float32),
high=np.ones(4, dtype=np.float32),
),
"fingertip_velocity": SimpleNamespace(
low=np.full(_dims.VelocityDim.value, -0.2, dtype=np.float32),
high=np.full(_dims.VelocityDim.value, 0.2, dtype=np.float32),
),
"fingertip_wrench": SimpleNamespace(
low=np.full(_dims.WrenchDim.value, -1.0, dtype=np.float32),
high=np.full(_dims.WrenchDim.value, 1.0, dtype=np.float32),
),
# used if we want to have joint stiffness/damping as parameters`
"joint_stiffness": SimpleNamespace(
low=np.array([1.0, 1.0, 1.0] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([50.0, 50.0, 50.0] * _dims.NumFingers.value, dtype=np.float32),
),
"joint_damping": SimpleNamespace(
low=np.array([0.01, 0.03, 0.0001] * _dims.NumFingers.value, dtype=np.float32),
high=np.array([1.0, 3.0, 0.01] * _dims.NumFingers.value, dtype=np.float32),
),
}
# limits of the object (mapped later: str -> torch.tensor)
_object_limits: dict = {
"position": SimpleNamespace(
low=np.array([-0.3, -0.3, 0], dtype=np.float32),
high=np.array([0.3, 0.3, 0.3], dtype=np.float32),
default=np.array([0, 0, _object_dims.min_height], dtype=np.float32)
),
# difference between two positions
"position_delta": SimpleNamespace(
low=np.array([-0.6, -0.6, 0], dtype=np.float32),
high=np.array([0.6, 0.6, 0.3], dtype=np.float32),
default=np.array([0, 0, 0], dtype=np.float32)
),
"orientation": SimpleNamespace(
low=-np.ones(4, dtype=np.float32),
high=np.ones(4, dtype=np.float32),
default=np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32),
),
"velocity": SimpleNamespace(
low=np.full(_dims.VelocityDim.value, -0.5, dtype=np.float32),
high=np.full(_dims.VelocityDim.value, 0.5, dtype=np.float32),
default=np.zeros(_dims.VelocityDim.value, dtype=np.float32)
),
"scale": SimpleNamespace(
low=np.full(1, 0.0, dtype=np.float32),
high=np.full(1, 1.0, dtype=np.float32),
),
}
# PD gains for the robot (mapped later: str -> torch.tensor)
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L49-L65
_robot_dof_gains = {
# The kp and kd gains of the PD control of the fingers.
# Note: This depends on simulation step size and is set for a rate of 250 Hz.
"stiffness": [10.0, 10.0, 10.0] * _dims.NumFingers.value,
"damping": [0.1, 0.3, 0.001] * _dims.NumFingers.value,
# The kd gains used for damping the joint motor velocities during the
# safety torque check on the joint motors.
"safety_damping": [0.08, 0.08, 0.04] * _dims.NumFingers.value
}
action_dim = _dims.JointTorqueDim.value
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.obs_spec = {
"robot_q": self._dims.GeneralizedCoordinatesDim.value,
"robot_u": self._dims.GeneralizedVelocityDim.value,
"object_q": self._dims.ObjectPoseDim.value,
"object_q_des": self._dims.ObjectPoseDim.value,
"command": self.action_dim
}
if self.cfg["env"]["asymmetric_obs"]:
self.state_spec = {
# observations spec
**self.obs_spec,
# extra observations (added separately to make computations simpler)
"object_u": self._dims.ObjectVelocityDim.value,
"fingertip_state": self._dims.NumFingers.value * self._dims.StateDim.value,
"robot_a": self._dims.GeneralizedVelocityDim.value,
"fingertip_wrench": self._dims.NumFingers.value * self._dims.WrenchDim.value,
}
else:
self.state_spec = self.obs_spec
self.action_spec = {
"command": self.action_dim
}
self.cfg["env"]["numObservations"] = sum(self.obs_spec.values())
self.cfg["env"]["numStates"] = sum(self.state_spec.values())
self.cfg["env"]["numActions"] = sum(self.action_spec.values())
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
# define prims present in the scene
prim_names = ["robot", "table", "boundary", "object", "goal_object"]
# mapping from name to asset instance
self.gym_assets = dict.fromkeys(prim_names)
# mapping from name to gym indices
self.gym_indices = dict.fromkeys(prim_names)
# mapping from name to gym rigid body handles
# name of finger tips links i.e. end-effector frames
fingertips_frames = ["finger_tip_link_0", "finger_tip_link_120", "finger_tip_link_240"]
self._fingertips_handles = OrderedDict.fromkeys(fingertips_frames, None)
# mapping from name to gym dof index
robot_dof_names = list()
for finger_pos in ['0', '120', '240']:
robot_dof_names += [f'finger_base_to_upper_joint_{finger_pos}',
f'finger_upper_to_middle_joint_{finger_pos}',
f'finger_middle_to_lower_joint_{finger_pos}']
self._robot_dof_indices = OrderedDict.fromkeys(robot_dof_names, None)
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.viewer != None:
cam_pos = gymapi.Vec3(0.7, 0.0, 0.7)
cam_target = gymapi.Vec3(0.0, 0.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# change constant buffers from numpy/lists into torch tensors
# limits for robot
for limit_name in self._robot_limits:
# extract limit simple-namespace
limit_dict = self._robot_limits[limit_name].__dict__
# iterate over namespace attributes
for prop, value in limit_dict.items():
limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device)
# limits for the object
for limit_name in self._object_limits:
# extract limit simple-namespace
limit_dict = self._object_limits[limit_name].__dict__
# iterate over namespace attributes
for prop, value in limit_dict.items():
limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device)
# PD gains for actuation
for gain_name, value in self._robot_dof_gains.items():
self._robot_dof_gains[gain_name] = torch.tensor(value, dtype=torch.float, device=self.device)
# store the sampled goal poses for the object: [num. of instances, 7]
self._object_goal_poses_buf = torch.zeros((self.num_envs, 7), device=self.device, dtype=torch.float)
# get force torque sensor if enabled
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
# # joint torques
# dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
# self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,
# self._dims.JointTorqueDim.value)
# # force-torque sensor
num_ft_dims = self._dims.NumFingers.value * self._dims.WrenchDim.value
# sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
# self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self._dims.JointTorqueDim.value)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
# refresh the buffer (to copy memory?)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create wrapper tensors for reference (consider everything as pointer to actual memory)
# DOF
self._dof_state = gymtorch.wrap_tensor(dof_state_tensor).view(self.num_envs, -1, 2)
self._dof_position = self._dof_state[..., 0]
self._dof_velocity = self._dof_state[..., 1]
# rigid body
self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
# root actors
self._actors_root_state = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
# frames history
action_dim = sum(self.action_spec.values())
self._last_action = torch.zeros(self.num_envs, action_dim, dtype=torch.float, device=self.device)
fingertip_handles_indices = list(self._fingertips_handles.values())
object_indices = self.gym_indices["object"]
# timestep 0 is current tensor
curr_history_length = 0
while curr_history_length < self._state_history_len:
# add tensors to history list
print(self._rigid_body_state.shape)
self._fingertips_frames_state_history.append(self._rigid_body_state[:, fingertip_handles_indices])
self._object_state_history.append(self._actors_root_state[object_indices])
# update current history length
curr_history_length += 1
self._observations_scale = SimpleNamespace(low=None, high=None)
self._states_scale = SimpleNamespace(low=None, high=None)
self._action_scale = SimpleNamespace(low=None, high=None)
self._successes = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self._successes_pos = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self._successes_quat = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self.__configure_mdp_spaces()
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_scene_assets()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.distance = 0.013
plane_params.static_friction = 1.0
plane_params.dynamic_friction = 1.0
self.gym.add_ground(self.sim, plane_params)
def _create_scene_assets(self):
""" Define Gym assets for stage, robot and object.
"""
# define assets
self.gym_assets["robot"] = self.__define_robot_asset()
self.gym_assets["table"] = self.__define_table_asset()
self.gym_assets["boundary"] = self.__define_boundary_asset()
self.gym_assets["object"] = self.__define_object_asset()
self.gym_assets["goal_object"] = self.__define_goal_object_asset()
# display the properties (only for debugging)
# robot
print("Trifinger Robot Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["robot"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["robot"])}')
print(f'\t Number of dofs: {self.gym.get_asset_dof_count(self.gym_assets["robot"])}')
print(f'\t Number of actuated dofs: {self._dims.JointTorqueDim.value}')
# stage
print("Trifinger Table Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["table"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["table"])}')
print("Trifinger Boundary Asset: ")
print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["boundary"])}')
print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["boundary"])}')
def _create_envs(self, num_envs, spacing, num_per_row):
# define the dof properties for the robot
robot_dof_props = self.gym.get_asset_dof_properties(self.gym_assets["robot"])
# set dof properites based on the control mode
for k, dof_index in enumerate(self._robot_dof_indices.values()):
# note: since safety checks are employed, the simulator PD controller is not
# used. Instead the torque is computed manually and applied, even if the
# command mode is 'position'.
robot_dof_props['driveMode'][dof_index] = gymapi.DOF_MODE_EFFORT
robot_dof_props['stiffness'][dof_index] = 0.0
robot_dof_props['damping'][dof_index] = 0.0
# set dof limits
robot_dof_props['effort'][dof_index] = self._max_torque_Nm
robot_dof_props['velocity'][dof_index] = self._max_velocity_radps
robot_dof_props['lower'][dof_index] = float(self._robot_limits["joint_position"].low[k])
robot_dof_props['upper'][dof_index] = float(self._robot_limits["joint_position"].high[k])
self.envs = []
# define lower and upper region bound for each environment
env_lower_bound = gymapi.Vec3(-self.cfg["env"]["envSpacing"], -self.cfg["env"]["envSpacing"], 0.0)
env_upper_bound = gymapi.Vec3(self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"])
num_envs_per_row = int(np.sqrt(self.num_envs))
# initialize gym indices buffer as a list
# note: later the list is converted to torch tensor for ease in interfacing with IsaacGym.
for asset_name in self.gym_indices.keys():
self.gym_indices[asset_name] = list()
# count number of shapes and bodies
max_agg_bodies = 0
max_agg_shapes = 0
for asset in self.gym_assets.values():
max_agg_bodies += self.gym.get_asset_rigid_body_count(asset)
max_agg_shapes += self.gym.get_asset_rigid_shape_count(asset)
# iterate and create environment instances
for env_index in range(self.num_envs):
# create environment
env_ptr = self.gym.create_env(self.sim, env_lower_bound, env_upper_bound, num_envs_per_row)
# begin aggregration mode if enabled - this can improve simulation performance
if self.cfg["env"]["aggregate_mode"]:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add trifinger robot to environment
trifinger_actor = self.gym.create_actor(env_ptr, self.gym_assets["robot"], gymapi.Transform(),
"robot", env_index, 0, 0)
trifinger_idx = self.gym.get_actor_index(env_ptr, trifinger_actor, gymapi.DOMAIN_SIM)
# add table to environment
table_handle = self.gym.create_actor(env_ptr, self.gym_assets["table"], gymapi.Transform(),
"table", env_index, 1, 0)
table_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM)
# add stage to environment
boundary_handle = self.gym.create_actor(env_ptr, self.gym_assets["boundary"], gymapi.Transform(),
"boundary", env_index, 1, 0)
boundary_idx = self.gym.get_actor_index(env_ptr, boundary_handle, gymapi.DOMAIN_SIM)
# add object to environment
object_handle = self.gym.create_actor(env_ptr, self.gym_assets["object"], gymapi.Transform(),
"object", env_index, 0, 0)
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
# add goal object to environment
goal_handle = self.gym.create_actor(env_ptr, self.gym_assets["goal_object"], gymapi.Transform(),
"goal_object", env_index + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
# change settings of DOF
self.gym.set_actor_dof_properties(env_ptr, trifinger_actor, robot_dof_props)
# add color to instances
stage_color = gymapi.Vec3(0.73, 0.68, 0.72)
self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color)
self.gym.set_rigid_body_color(env_ptr, boundary_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color)
# end aggregation mode if enabled
if self.cfg["env"]["aggregate_mode"]:
self.gym.end_aggregate(env_ptr)
# add instances to list
self.envs.append(env_ptr)
self.gym_indices["robot"].append(trifinger_idx)
self.gym_indices["table"].append(table_idx)
self.gym_indices["boundary"].append(boundary_idx)
self.gym_indices["object"].append(object_idx)
self.gym_indices["goal_object"].append(goal_object_idx)
# convert gym indices from list to tensor
for asset_name, asset_indices in self.gym_indices.items():
self.gym_indices[asset_name] = torch.tensor(asset_indices, dtype=torch.long, device=self.device)
def __configure_mdp_spaces(self):
"""
Configures the observations, state and action spaces.
"""
# Action scale for the MDP
# Note: This is order sensitive.
if self.cfg["env"]["command_mode"] == "position":
# action space is joint positions
self._action_scale.low = self._robot_limits["joint_position"].low
self._action_scale.high = self._robot_limits["joint_position"].high
elif self.cfg["env"]["command_mode"] == "torque":
# action space is joint torques
self._action_scale.low = self._robot_limits["joint_torque"].low
self._action_scale.high = self._robot_limits["joint_torque"].high
else:
msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']."
raise ValueError(msg)
# Observations scale for the MDP
# check if policy outputs normalized action [-1, 1] or not.
if self.cfg["env"]["normalize_action"]:
obs_action_scale = SimpleNamespace(
low=torch.full((self.action_dim,), -1, dtype=torch.float, device=self.device),
high=torch.full((self.action_dim,), 1, dtype=torch.float, device=self.device)
)
else:
obs_action_scale = self._action_scale
object_obs_low = torch.cat([
self._object_limits["position"].low,
self._object_limits["orientation"].low,
]*2)
object_obs_high = torch.cat([
self._object_limits["position"].high,
self._object_limits["orientation"].high,
]*2)
# Note: This is order sensitive.
self._observations_scale.low = torch.cat([
self._robot_limits["joint_position"].low,
self._robot_limits["joint_velocity"].low,
object_obs_low,
obs_action_scale.low
])
self._observations_scale.high = torch.cat([
self._robot_limits["joint_position"].high,
self._robot_limits["joint_velocity"].high,
object_obs_high,
obs_action_scale.high
])
# State scale for the MDP
if self.cfg["env"]["asymmetric_obs"]:
# finger tip scaling
fingertip_state_scale = SimpleNamespace(
low=torch.cat([
self._robot_limits["fingertip_position"].low,
self._robot_limits["fingertip_orientation"].low,
self._robot_limits["fingertip_velocity"].low,
]),
high=torch.cat([
self._robot_limits["fingertip_position"].high,
self._robot_limits["fingertip_orientation"].high,
self._robot_limits["fingertip_velocity"].high,
])
)
states_low = [
self._observations_scale.low,
self._object_limits["velocity"].low,
fingertip_state_scale.low.repeat(self._dims.NumFingers.value),
self._robot_limits["joint_torque"].low,
self._robot_limits["fingertip_wrench"].low.repeat(self._dims.NumFingers.value),
]
states_high = [
self._observations_scale.high,
self._object_limits["velocity"].high,
fingertip_state_scale.high.repeat(self._dims.NumFingers.value),
self._robot_limits["joint_torque"].high,
self._robot_limits["fingertip_wrench"].high.repeat(self._dims.NumFingers.value),
]
# Note: This is order sensitive.
self._states_scale.low = torch.cat(states_low)
self._states_scale.high = torch.cat(states_high)
# check that dimensions of scalings are correct
# count number of dimensions
state_dim = sum(self.state_spec.values())
obs_dim = sum(self.obs_spec.values())
action_dim = sum(self.action_spec.values())
# check that dimensions match
# observations
if self._observations_scale.low.shape[0] != obs_dim or self._observations_scale.high.shape[0] != obs_dim:
msg = f"Observation scaling dimensions mismatch. " \
f"\tLow: {self._observations_scale.low.shape[0]}, " \
f"\tHigh: {self._observations_scale.high.shape[0]}, " \
f"\tExpected: {obs_dim}."
raise AssertionError(msg)
# state
if self.cfg["env"]["asymmetric_obs"] \
and (self._states_scale.low.shape[0] != state_dim or self._states_scale.high.shape[0] != state_dim):
msg = f"States scaling dimensions mismatch. " \
f"\tLow: {self._states_scale.low.shape[0]}, " \
f"\tHigh: {self._states_scale.high.shape[0]}, " \
f"\tExpected: {state_dim}."
raise AssertionError(msg)
# actions
if self._action_scale.low.shape[0] != action_dim or self._action_scale.high.shape[0] != action_dim:
msg = f"Actions scaling dimensions mismatch. " \
f"\tLow: {self._action_scale.low.shape[0]}, " \
f"\tHigh: {self._action_scale.high.shape[0]}, " \
f"\tExpected: {action_dim}."
raise AssertionError(msg)
# print the scaling
print(f'MDP Raw observation bounds\n'
f'\tLow: {self._observations_scale.low}\n'
f'\tHigh: {self._observations_scale.high}')
print(f'MDP Raw state bounds\n'
f'\tLow: {self._states_scale.low}\n'
f'\tHigh: {self._states_scale.high}')
print(f'MDP Raw action bounds\n'
f'\tLow: {self._action_scale.low}\n'
f'\tHigh: {self._action_scale.high}')
def compute_reward(self, actions):
self.rew_buf[:] = 0.
self.reset_buf[:] = 0.
self.rew_buf[:], self.reset_buf[:], log_dict = compute_trifinger_reward(
self.obs_buf,
self.reset_buf,
self.progress_buf,
self.max_episode_length,
self.cfg["sim"]["dt"],
self.cfg["env"]["reward_terms"]["finger_move_penalty"]["weight"],
self.cfg["env"]["reward_terms"]["finger_reach_object_rate"]["weight"],
self.cfg["env"]["reward_terms"]["object_dist"]["weight"],
self.cfg["env"]["reward_terms"]["object_rot"]["weight"],
self.env_steps_count,
self._object_goal_poses_buf,
self._object_state_history[0],
self._object_state_history[1],
self._fingertips_frames_state_history[0],
self._fingertips_frames_state_history[1],
self.cfg["env"]["reward_terms"]["keypoints_dist"]["activate"]
)
self.extras.update({"env/rewards/"+k: v.mean() for k, v in log_dict.items()})
def compute_observations(self):
# refresh memory buffers
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
joint_torques = self._dof_torque
tip_wrenches = self._ft_sensors_values
else:
joint_torques = torch.zeros(self.num_envs, self._dims.JointTorqueDim.value, dtype=torch.float32, device=self.device)
tip_wrenches = torch.zeros(self.num_envs, self._dims.NumFingers.value * self._dims.WrenchDim.value, dtype=torch.float32, device=self.device)
# extract frame handles
fingertip_handles_indices = list(self._fingertips_handles.values())
object_indices = self.gym_indices["object"]
# update state histories
self._fingertips_frames_state_history.appendleft(self._rigid_body_state[:, fingertip_handles_indices])
self._object_state_history.appendleft(self._actors_root_state[object_indices])
# fill the observations and states buffer
self.obs_buf[:], self.states_buf[:] = compute_trifinger_observations_states(
self.cfg["env"]["asymmetric_obs"],
self._dof_position,
self._dof_velocity,
self._object_state_history[0],
self._object_goal_poses_buf,
self.actions,
self._fingertips_frames_state_history[0],
joint_torques,
tip_wrenches,
)
# normalize observations if flag is enabled
if self.cfg["env"]["normalize_obs"]:
# for normal obs
self.obs_buf = scale_transform(
self.obs_buf,
lower=self._observations_scale.low,
upper=self._observations_scale.high
)
def reset_idx(self, env_ids):
# randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
# A) Reset episode stats buffers
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self._successes[env_ids] = 0
self._successes_pos[env_ids] = 0
self._successes_quat[env_ids] = 0
# B) Various randomizations at the start of the episode:
# -- Robot base position.
# -- Stage position.
# -- Coefficient of restituion and friction for robot, object, stage.
# -- Mass and size of the object
# -- Mass of robot links
# -- Robot joint state
robot_initial_state_config = self.cfg["env"]["reset_distribution"]["robot_initial_state"]
self._sample_robot_state(
env_ids,
distribution=robot_initial_state_config["type"],
dof_pos_stddev=robot_initial_state_config["dof_pos_stddev"],
dof_vel_stddev=robot_initial_state_config["dof_vel_stddev"]
)
# -- Sampling of initial pose of the object
object_initial_state_config = self.cfg["env"]["reset_distribution"]["object_initial_state"]
self._sample_object_poses(
env_ids,
distribution=object_initial_state_config["type"],
)
# -- Sampling of goal pose of the object
self._sample_object_goal_poses(
env_ids,
difficulty=self.cfg["env"]["task_difficulty"]
)
# C) Extract trifinger indices to reset
robot_indices = self.gym_indices["robot"][env_ids].to(torch.int32)
object_indices = self.gym_indices["object"][env_ids].to(torch.int32)
goal_object_indices = self.gym_indices["goal_object"][env_ids].to(torch.int32)
all_indices = torch.unique(torch.cat([robot_indices, object_indices, goal_object_indices]))
# D) Set values into simulator
# -- DOF
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(robot_indices), len(robot_indices))
# -- actor root states
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._actors_root_state),
gymtorch.unwrap_tensor(all_indices), len(all_indices))
def _sample_robot_state(self, instances: torch.Tensor, distribution: str = 'default',
dof_pos_stddev: float = 0.0, dof_vel_stddev: float = 0.0):
"""Samples the robot DOF state based on the settings.
Type of robot initial state distribution: ["default", "random"]
- "default" means that robot is in default configuration.
- "random" means that noise is added to default configuration
- "none" means that robot is configuration is not reset between episodes.
Args:
instances: A tensor constraining indices of environment instances to reset.
distribution: Name of distribution to sample initial state from: ['default', 'random']
dof_pos_stddev: Noise scale to DOF position (used if 'type' is 'random')
dof_vel_stddev: Noise scale to DOF velocity (used if 'type' is 'random')
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample dof state based on distribution type
if distribution == "none":
return
elif distribution == "default":
# set to default configuration
self._dof_position[instances] = self._robot_limits["joint_position"].default
self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default
elif distribution == "random":
# sample uniform random from (-1, 1)
dof_state_dim = self._dims.JointPositionDim.value + self._dims.JointVelocityDim.value
dof_state_noise = 2 * torch.rand((num_samples, dof_state_dim,), dtype=torch.float,
device=self.device) - 1
# set to default configuration
self._dof_position[instances] = self._robot_limits["joint_position"].default
self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default
# add noise
# DOF position
start_offset = 0
end_offset = self._dims.JointPositionDim.value
self._dof_position[instances] += dof_pos_stddev * dof_state_noise[:, start_offset:end_offset]
# DOF velocity
start_offset = end_offset
end_offset += self._dims.JointVelocityDim.value
self._dof_velocity[instances] += dof_vel_stddev * dof_state_noise[:, start_offset:end_offset]
else:
msg = f"Invalid robot initial state distribution. Input: {distribution} not in [`default`, `random`]."
raise ValueError(msg)
# reset robot fingertips state history
for idx in range(1, self._state_history_len):
self._fingertips_frames_state_history[idx][instances] = 0.0
def _sample_object_poses(self, instances: torch.Tensor, distribution: str):
"""Sample poses for the cube.
Type of distribution: ["default", "random", "none"]
- "default" means that pose is default configuration.
- "random" means that pose is randomly sampled on the table.
- "none" means no resetting of object pose between episodes.
Args:
instances: A tensor constraining indices of environment instances to reset.
distribution: Name of distribution to sample initial state from: ['default', 'random']
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample poses based on distribution type
if distribution == "none":
return
elif distribution == "default":
pos_x, pos_y, pos_z = self._object_limits["position"].default
orientation = self._object_limits["orientation"].default
elif distribution == "random":
# For initialization
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
# add a small offset to the height to account for scale randomisation (prevent ground intersection)
pos_z = self._object_dims.size[2] / 2 + 0.0015
orientation = random_yaw_orientation(num_samples, self.device)
else:
msg = f"Invalid object initial state distribution. Input: {distribution} " \
"not in [`default`, `random`, `none`]."
raise ValueError(msg)
# set buffers into simulator
# extract indices for goal object
object_indices = self.gym_indices["object"][instances]
# set values into buffer
# object buffer
self._object_state_history[0][instances, 0] = pos_x
self._object_state_history[0][instances, 1] = pos_y
self._object_state_history[0][instances, 2] = pos_z
self._object_state_history[0][instances, 3:7] = orientation
self._object_state_history[0][instances, 7:13] = 0
# reset object state history
for idx in range(1, self._state_history_len):
self._object_state_history[idx][instances] = 0.0
# root actor buffer
self._actors_root_state[object_indices] = self._object_state_history[0][instances]
def _sample_object_goal_poses(self, instances: torch.Tensor, difficulty: int):
"""Sample goal poses for the cube and sets them into the desired goal pose buffer.
Args:
instances: A tensor constraining indices of environment instances to reset.
difficulty: Difficulty level. The higher, the more difficult is the goal.
Possible levels are:
- -1: Random goal position on the table, including yaw orientation.
- 1: Random goal position on the table, no orientation.
- 2: Fixed goal position in the air with x,y = 0. No orientation.
- 3: Random goal position in the air, no orientation.
- 4: Random goal pose in the air, including orientation.
"""
# number of samples to generate
num_samples = instances.size()[0]
# sample poses based on task difficulty
if difficulty == -1:
# For initialization
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = self._object_dims.size[2] / 2
orientation = random_yaw_orientation(num_samples, self.device)
elif difficulty == 1:
# Random goal position on the table, no orientation.
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = self._object_dims.size[2] / 2
orientation = default_orientation(num_samples, self.device)
elif difficulty == 2:
# Fixed goal position in the air with x,y = 0. No orientation.
pos_x, pos_y = 0.0, 0.0
pos_z = self._object_dims.min_height + 0.05
orientation = default_orientation(num_samples, self.device)
elif difficulty == 3:
# Random goal position in the air, no orientation.
pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device)
pos_z = random_z(num_samples, self._object_dims.min_height, self._object_dims.max_height, self.device)
orientation = default_orientation(num_samples, self.device)
elif difficulty == 4:
# Random goal pose in the air, including orientation.
# Note: Set minimum height such that the cube does not intersect with the
# ground in any orientation
max_goal_radius = self._object_dims.max_com_distance_to_center
max_height = self._object_dims.max_height
orientation = random_orientation(num_samples, self.device)
# pick x, y, z according to the maximum height / radius at the current point
# in the cirriculum
pos_x, pos_y = random_xy(num_samples, max_goal_radius, self.device)
pos_z = random_z(num_samples, self._object_dims.radius_3d, max_height, self.device)
else:
msg = f"Invalid difficulty index for task: {difficulty}."
raise ValueError(msg)
# extract indices for goal object
goal_object_indices = self.gym_indices["goal_object"][instances]
# set values into buffer
# object goal buffer
self._object_goal_poses_buf[instances, 0] = pos_x
self._object_goal_poses_buf[instances, 1] = pos_y
self._object_goal_poses_buf[instances, 2] = pos_z
self._object_goal_poses_buf[instances, 3:7] = orientation
# root actor buffer
self._actors_root_state[goal_object_indices, 0:7] = self._object_goal_poses_buf[instances]
# self._actors_root_state[goal_object_indices, 2] = -10
def pre_physics_step(self, actions):
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.gym.simulate(self.sim)
self.actions = actions.clone().to(self.device)
# if normalized_action is true, then denormalize them.
if self.cfg["env"]["normalize_action"]:
# TODO: Default action should correspond to normalized value of 0.
action_transformed = unscale_transform(
self.actions,
lower=self._action_scale.low,
upper=self._action_scale.high
)
else:
action_transformed = self.actions
# compute command on the basis of mode selected
if self.cfg["env"]["command_mode"] == 'torque':
# command is the desired joint torque
computed_torque = action_transformed
elif self.cfg["env"]["command_mode"] == 'position':
# command is the desired joint positions
desired_dof_position = action_transformed
# compute torque to apply
computed_torque = self._robot_dof_gains["stiffness"] * (desired_dof_position - self._dof_position)
computed_torque -= self._robot_dof_gains["damping"] * self._dof_velocity
else:
msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']."
raise ValueError(msg)
# apply clamping of computed torque to actuator limits
applied_torque = saturate(
computed_torque,
lower=self._robot_limits["joint_torque"].low,
upper=self._robot_limits["joint_torque"].high
)
# apply safety damping and clamping of the action torque if enabled
if self.cfg["env"]["apply_safety_damping"]:
# apply damping by joint velocity
applied_torque -= self._robot_dof_gains["safety_damping"] * self._dof_velocity
# clamp input
applied_torque = saturate(
applied_torque,
lower=self._robot_limits["joint_torque"].low,
upper=self._robot_limits["joint_torque"].high
)
# set computed torques to simulator buffer.
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(applied_torque))
def post_physics_step(self):
self._step_info = {}
self.progress_buf += 1
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
# check termination conditions (success only)
self._check_termination()
if torch.sum(self.reset_buf) > 0:
self._step_info['consecutive_successes'] = np.mean(self._successes.float().cpu().numpy())
self._step_info['consecutive_successes_pos'] = np.mean(self._successes_pos.float().cpu().numpy())
self._step_info['consecutive_successes_quat'] = np.mean(self._successes_quat.float().cpu().numpy())
def _check_termination(self):
"""Check whether the episode is done per environment.
"""
# Extract configuration for termination conditions
termination_config = self.cfg["env"]["termination_conditions"]
# Termination condition - successful completion
# Calculate distance between current object and goal
object_goal_position_dist = torch.norm(
self._object_goal_poses_buf[:, 0:3] - self._object_state_history[0][:, 0:3],
p=2, dim=-1
)
# log theoretical number of r eseats
goal_position_reset = torch.le(object_goal_position_dist,
termination_config["success"]["position_tolerance"])
self._step_info['env/current_position_goal/per_env'] = np.mean(goal_position_reset.float().cpu().numpy())
# For task with difficulty 4, we need to check if orientation matches as well.
# Compute the difference in orientation between object and goal pose
object_goal_orientation_dist = quat_diff_rad(self._object_state_history[0][:, 3:7],
self._object_goal_poses_buf[:, 3:7])
# Check for distance within tolerance
goal_orientation_reset = torch.le(object_goal_orientation_dist,
termination_config["success"]["orientation_tolerance"])
self._step_info['env/current_orientation_goal/per_env'] = np.mean(goal_orientation_reset.float().cpu().numpy())
if self.cfg["env"]['task_difficulty'] < 4:
# Check for task completion if position goal is within a threshold
task_completion_reset = goal_position_reset
elif self.cfg["env"]['task_difficulty'] == 4:
# Check for task completion if both position + orientation goal is within a threshold
task_completion_reset = torch.logical_and(goal_position_reset, goal_orientation_reset)
else:
# Check for task completion if both orientation goal is within a threshold
task_completion_reset = goal_orientation_reset
self._successes = task_completion_reset
self._successes_pos = goal_position_reset
self._successes_quat = goal_orientation_reset
"""
Helper functions - define assets
"""
def __define_robot_asset(self):
""" Define Gym asset for robot.
"""
# define tri-finger asset
robot_asset_options = gymapi.AssetOptions()
robot_asset_options.flip_visual_attachments = False
robot_asset_options.fix_base_link = True
robot_asset_options.collapse_fixed_joints = False
robot_asset_options.disable_gravity = False
robot_asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
robot_asset_options.thickness = 0.001
robot_asset_options.angular_damping = 0.01
robot_asset_options.vhacd_enabled = True
robot_asset_options.vhacd_params = gymapi.VhacdParams()
robot_asset_options.vhacd_params.resolution = 100000
robot_asset_options.vhacd_params.concavity = 0.0025
robot_asset_options.vhacd_params.alpha = 0.04
robot_asset_options.vhacd_params.beta = 1.0
robot_asset_options.vhacd_params.convex_hull_downsampling = 4
robot_asset_options.vhacd_params.max_num_vertices_per_ch = 256
if self.physics_engine == gymapi.SIM_PHYSX:
robot_asset_options.use_physx_armature = True
# load tri-finger asset
trifinger_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._robot_urdf_file, robot_asset_options)
# set the link properties for the robot
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L563
trifinger_props = self.gym.get_asset_rigid_shape_properties(trifinger_asset)
for p in trifinger_props:
p.friction = 1.0
p.torsion_friction = 1.0
p.restitution = 0.8
self.gym.set_asset_rigid_shape_properties(trifinger_asset, trifinger_props)
# extract the frame handles
for frame_name in self._fingertips_handles.keys():
self._fingertips_handles[frame_name] = self.gym.find_asset_rigid_body_index(trifinger_asset,
frame_name)
# check valid handle
if self._fingertips_handles[frame_name] == gymapi.INVALID_HANDLE:
msg = f"Invalid handle received for frame: `{frame_name}`."
print(msg)
if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]:
sensor_pose = gymapi.Transform()
for fingertip_handle in self._fingertips_handles.values():
self.gym.create_asset_force_sensor(trifinger_asset, fingertip_handle, sensor_pose)
# extract the dof indices
# Note: need to write actuated dofs manually since the system contains fixed joints as well which show up.
for dof_name in self._robot_dof_indices.keys():
self._robot_dof_indices[dof_name] = self.gym.find_asset_dof_index(trifinger_asset, dof_name)
# check valid handle
if self._robot_dof_indices[dof_name] == gymapi.INVALID_HANDLE:
msg = f"Invalid index received for DOF: `{dof_name}`."
print(msg)
# return the asset
return trifinger_asset
def __define_table_asset(self):
""" Define Gym asset for stage.
"""
# define stage asset
table_asset_options = gymapi.AssetOptions()
table_asset_options.disable_gravity = True
table_asset_options.fix_base_link = True
table_asset_options.thickness = 0.001
# load stage asset
table_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._table_urdf_file, table_asset_options)
# set stage properties
table_props = self.gym.get_asset_rigid_shape_properties(table_asset)
# iterate over each mesh
for p in table_props:
p.friction = 0.1
p.torsion_friction = 0.1
self.gym.set_asset_rigid_shape_properties(table_asset, table_props)
# return the asset
return table_asset
def __define_boundary_asset(self):
""" Define Gym asset for stage.
"""
# define stage asset
boundary_asset_options = gymapi.AssetOptions()
boundary_asset_options.disable_gravity = True
boundary_asset_options.fix_base_link = True
boundary_asset_options.thickness = 0.001
boundary_asset_options.vhacd_enabled = True
boundary_asset_options.vhacd_params = gymapi.VhacdParams()
boundary_asset_options.vhacd_params.resolution = 100000
boundary_asset_options.vhacd_params.concavity = 0.0
boundary_asset_options.vhacd_params.alpha = 0.04
boundary_asset_options.vhacd_params.beta = 1.0
boundary_asset_options.vhacd_params.max_num_vertices_per_ch = 1024
# load stage asset
boundary_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._boundary_urdf_file, boundary_asset_options)
# set stage properties
boundary_props = self.gym.get_asset_rigid_shape_properties(boundary_asset)
self.gym.set_asset_rigid_shape_properties(boundary_asset, boundary_props)
# return the asset
return boundary_asset
def __define_object_asset(self):
""" Define Gym asset for object.
"""
# define object asset
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = False
object_asset_options.thickness = 0.001
object_asset_options.flip_visual_attachments = True
# load object asset
object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._object_urdf_file, object_asset_options)
# set object properties
# Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/collision_objects.py#L96
object_props = self.gym.get_asset_rigid_shape_properties(object_asset)
for p in object_props:
p.friction = 1.0
p.torsion_friction = 0.001
p.restitution = 0.0
self.gym.set_asset_rigid_shape_properties(object_asset, object_props)
# return the asset
return object_asset
def __define_goal_object_asset(self):
""" Define Gym asset for goal object.
"""
# define object asset
object_asset_options = gymapi.AssetOptions()
object_asset_options.disable_gravity = True
object_asset_options.fix_base_link = True
object_asset_options.thickness = 0.001
object_asset_options.flip_visual_attachments = True
# load object asset
goal_object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir,
self._object_urdf_file, object_asset_options)
# return the asset
return goal_object_asset
@property
def env_steps_count(self) -> int:
"""Returns the total number of environment steps aggregated across parallel environments."""
return self.gym.get_frame_count(self.sim) * self.num_envs
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def lgsk_kernel(x: torch.Tensor, scale: float = 50.0, eps:float=2) -> torch.Tensor:
"""Defines logistic kernel function to bound input to [-0.25, 0)
Ref: https://arxiv.org/abs/1901.08652 (page 15)
Args:
x: Input tensor.
scale: Scaling of the kernel function (controls how wide the 'bell' shape is')
eps: Controls how 'tall' the 'bell' shape is.
Returns:
Output tensor computed using kernel.
"""
scaled = x * scale
return 1.0 / (scaled.exp() + eps + (-scaled).exp())
@torch.jit.script
def gen_keypoints(pose: torch.Tensor, num_keypoints: int = 8, size: Tuple[float, float, float] = (0.065, 0.065, 0.065)):
num_envs = pose.shape[0]
keypoints_buf = torch.ones(num_envs, num_keypoints, 3, dtype=torch.float32, device=pose.device)
for i in range(num_keypoints):
# which dimensions to negate
n = [((i >> k) & 1) == 0 for k in range(3)]
corner_loc = [(1 if n[k] else -1) * s / 2 for k, s in enumerate(size)],
corner = torch.tensor(corner_loc, dtype=torch.float32, device=pose.device) * keypoints_buf[:, i, :]
keypoints_buf[:, i, :] = local_to_world_space(corner, pose)
return keypoints_buf
@torch.jit.script
def compute_trifinger_reward(
obs_buf: torch.Tensor,
reset_buf: torch.Tensor,
progress_buf: torch.Tensor,
episode_length: int,
dt: float,
finger_move_penalty_weight: float,
finger_reach_object_weight: float,
object_dist_weight: float,
object_rot_weight: float,
env_steps_count: int,
object_goal_poses_buf: torch.Tensor,
object_state: torch.Tensor,
last_object_state: torch.Tensor,
fingertip_state: torch.Tensor,
last_fingertip_state: torch.Tensor,
use_keypoints: bool
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]:
ft_sched_start = 0
ft_sched_end = 5e7
# Reward penalising finger movement
fingertip_vel = (fingertip_state[:, :, 0:3] - last_fingertip_state[:, :, 0:3]) / dt
finger_movement_penalty = finger_move_penalty_weight * fingertip_vel.pow(2).view(-1, 9).sum(dim=-1)
# Reward for finger reaching the object
# distance from each finger to the centroid of the object, shape (N, 3).
curr_norms = torch.stack([
torch.norm(fingertip_state[:, i, 0:3] - object_state[:, 0:3], p=2, dim=-1)
for i in range(3)
], dim=-1)
# distance from each finger to the centroid of the object in the last timestep, shape (N, 3).
prev_norms = torch.stack([
torch.norm(last_fingertip_state[:, i, 0:3] - last_object_state[:, 0:3], p=2, dim=-1)
for i in range(3)
], dim=-1)
ft_sched_val = 1.0 if ft_sched_start <= env_steps_count <= ft_sched_end else 0.0
finger_reach_object_reward = finger_reach_object_weight * ft_sched_val * (curr_norms - prev_norms).sum(dim=-1)
if use_keypoints:
object_keypoints = gen_keypoints(object_state[:, 0:7])
goal_keypoints = gen_keypoints(object_goal_poses_buf[:, 0:7])
delta = object_keypoints - goal_keypoints
dist_l2 = torch.norm(delta, p=2, dim=-1)
keypoints_kernel_sum = lgsk_kernel(dist_l2, scale=30., eps=2.).mean(dim=-1)
pose_reward = object_dist_weight * dt * keypoints_kernel_sum
else:
# Reward for object distance
object_dist = torch.norm(object_state[:, 0:3] - object_goal_poses_buf[:, 0:3], p=2, dim=-1)
object_dist_reward = object_dist_weight * dt * lgsk_kernel(object_dist, scale=50., eps=2.)
# Reward for object rotation
# extract quaternion orientation
quat_a = object_state[:, 3:7]
quat_b = object_goal_poses_buf[:, 3:7]
angles = quat_diff_rad(quat_a, quat_b)
object_rot_reward = object_rot_weight * dt / (3. * torch.abs(angles) + 0.01)
pose_reward = object_dist_reward + object_rot_reward
total_reward = (
finger_movement_penalty
+ finger_reach_object_reward
+ pose_reward
)
# reset agents
reset = torch.zeros_like(reset_buf)
reset = torch.where(progress_buf >= episode_length - 1, torch.ones_like(reset_buf), reset)
info: Dict[str, torch.Tensor] = {
'finger_movement_penalty': finger_movement_penalty,
'finger_reach_object_reward': finger_reach_object_reward,
'pose_reward': finger_reach_object_reward,
'reward': total_reward,
}
return total_reward, reset, info
@torch.jit.script
def compute_trifinger_observations_states(
asymmetric_obs: bool,
dof_position: torch.Tensor,
dof_velocity: torch.Tensor,
object_state: torch.Tensor,
object_goal_poses: torch.Tensor,
actions: torch.Tensor,
fingertip_state: torch.Tensor,
joint_torques: torch.Tensor,
tip_wrenches: torch.Tensor
):
num_envs = dof_position.shape[0]
obs_buf = torch.cat([
dof_position,
dof_velocity,
object_state[:, 0:7], # pose
object_goal_poses,
actions
], dim=-1)
if asymmetric_obs:
states_buf = torch.cat([
obs_buf,
object_state[:, 7:13], # linear / angular velocity
fingertip_state.reshape(num_envs, -1),
joint_torques,
tip_wrenches
], dim=-1)
else:
states_buf = obs_buf
return obs_buf, states_buf
"""
Sampling of cuboidal object
"""
@torch.jit.script
def random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)"""
# sample radius of circle
radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device))
radius *= max_com_distance_to_center
# sample theta of point
theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
# x,y-position of the cube
x = radius * torch.cos(theta)
y = radius * torch.sin(theta)
return x, y
@torch.jit.script
def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:
"""Returns sampled height of the goal object."""
z = torch.rand(num, dtype=torch.float, device=device)
z = (max_height - min_height) * z + min_height
return z
@torch.jit.script
def default_orientation(num: int, device: str) -> torch.Tensor:
"""Returns identity rotation transform."""
quat = torch.zeros((num, 4,), dtype=torch.float, device=device)
quat[..., -1] = 1.0
return quat
@torch.jit.script
def random_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation in 3D as quaternion.
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html
"""
# sample random orientation from normal distribution
quat = torch.randn((num, 4,), dtype=torch.float, device=device)
# normalize the quaternion
quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)
return quat
@torch.jit.script
def random_orientation_within_angle(num: int, device:str, base: torch.Tensor, max_angle: float):
""" Generates random quaternions within max_angle of base
Ref: https://math.stackexchange.com/a/3448434
"""
quat = torch.zeros((num, 4,), dtype=torch.float, device=device)
rand = torch.rand((num, 3), dtype=torch.float, device=device)
c = torch.cos(rand[:, 0]*max_angle)
n = torch.sqrt((1.-c)/2.)
quat[:, 3] = torch.sqrt((1+c)/2.)
quat[:, 2] = (rand[:, 1]*2.-1.) * n
quat[:, 0] = (torch.sqrt(1-quat[:, 2]**2.) * torch.cos(2*np.pi*rand[:, 2])) * n
quat[:, 1] = (torch.sqrt(1-quat[:, 2]**2.) * torch.sin(2*np.pi*rand[:, 2])) * n
# floating point errors can cause it to be slightly off, re-normalise
quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)
return quat_mul(quat, base)
@torch.jit.script
def random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor:
"""Samples a random angular velocity with standard deviation `magnitude_stdev`"""
axis = torch.randn((num, 3,), dtype=torch.float, device=device)
axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1)
magnitude = torch.randn((num, 1,), dtype=torch.float, device=device)
magnitude *= magnitude_stdev
return magnitude * axis
@torch.jit.script
def random_yaw_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation around z-axis."""
roll = torch.zeros(num, dtype=torch.float, device=device)
pitch = torch.zeros(num, dtype=torch.float, device=device)
yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
return quat_from_euler_xyz(roll, pitch, yaw)
| 70,571 | Python | 45.643754 | 217 | 0.611568 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cabinet.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, tensor_clamp, \
tf_vector, tf_combine
from .base.vec_task import VecTask
class FrankaCabinet(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.action_scale = self.cfg["env"]["actionScale"]
self.start_position_noise = self.cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self.cfg["env"]["startRotationNoise"]
self.num_props = self.cfg["env"]["numProps"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self.cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self.cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self.cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.up_axis = "z"
self.up_axis_idx = 2
self.distX_offset = 0.04
self.dt = 1/60.
# prop dimensions
self.prop_width = 0.08
self.prop_height = 0.08
self.prop_length = 0.08
self.prop_spacing = 0.09
num_obs = 23
num_acts = 9
self.cfg["env"]["numObservations"] = 23
self.cfg["env"]["numActions"] = 9
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.franka_default_dof_pos = to_torch([1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.franka_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_franka_dofs]
self.franka_dof_pos = self.franka_dof_state[..., 0]
self.franka_dof_vel = self.franka_dof_state[..., 1]
self.cabinet_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, self.num_franka_dofs:]
self.cabinet_dof_pos = self.cabinet_dof_state[..., 0]
self.cabinet_dof_vel = self.cabinet_dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(self.num_envs, -1, 13)
if self.num_props > 0:
self.prop_states = self.root_state_tensor[:, 2:]
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
self.franka_dof_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * (2 + self.num_props), dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(
self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
franka_asset_file = "urdf/franka_description/robots/franka_panda.urdf"
cabinet_asset_file = "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file)
cabinet_asset_file = self.cfg["env"]["asset"].get("assetFileNameCabinet", cabinet_asset_file)
# load franka asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
asset_options.use_mesh_materials = True
franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options)
# load cabinet asset
asset_options.flip_visual_attachments = False
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = False
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
asset_options.armature = 0.005
cabinet_asset = self.gym.load_asset(self.sim, asset_root, cabinet_asset_file, asset_options)
franka_dof_stiffness = to_torch([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float, device=self.device)
franka_dof_damping = to_torch([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device)
self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset)
self.num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset)
self.num_cabinet_dofs = self.gym.get_asset_dof_count(cabinet_asset)
print("num franka bodies: ", self.num_franka_bodies)
print("num franka dofs: ", self.num_franka_dofs)
print("num cabinet bodies: ", self.num_cabinet_bodies)
print("num cabinet dofs: ", self.num_cabinet_dofs)
# set franka dof properties
franka_dof_props = self.gym.get_asset_dof_properties(franka_asset)
self.franka_dof_lower_limits = []
self.franka_dof_upper_limits = []
for i in range(self.num_franka_dofs):
franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS
if self.physics_engine == gymapi.SIM_PHYSX:
franka_dof_props['stiffness'][i] = franka_dof_stiffness[i]
franka_dof_props['damping'][i] = franka_dof_damping[i]
else:
franka_dof_props['stiffness'][i] = 7000.0
franka_dof_props['damping'][i] = 50.0
self.franka_dof_lower_limits.append(franka_dof_props['lower'][i])
self.franka_dof_upper_limits.append(franka_dof_props['upper'][i])
self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device)
self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[[7, 8]] = 0.1
franka_dof_props['effort'][7] = 200
franka_dof_props['effort'][8] = 200
# set cabinet dof properties
cabinet_dof_props = self.gym.get_asset_dof_properties(cabinet_asset)
for i in range(self.num_cabinet_dofs):
cabinet_dof_props['damping'][i] = 10.0
# create prop assets
box_opts = gymapi.AssetOptions()
box_opts.density = 400
prop_asset = self.gym.create_box(self.sim, self.prop_width, self.prop_height, self.prop_width, box_opts)
franka_start_pose = gymapi.Transform()
franka_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0)
franka_start_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
cabinet_start_pose = gymapi.Transform()
cabinet_start_pose.p = gymapi.Vec3(*get_axis_params(0.4, self.up_axis_idx))
# compute aggregate size
num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset)
num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset)
num_cabinet_shapes = self.gym.get_asset_rigid_shape_count(cabinet_asset)
num_prop_bodies = self.gym.get_asset_rigid_body_count(prop_asset)
num_prop_shapes = self.gym.get_asset_rigid_shape_count(prop_asset)
max_agg_bodies = num_franka_bodies + num_cabinet_bodies + self.num_props * num_prop_bodies
max_agg_shapes = num_franka_shapes + num_cabinet_shapes + self.num_props * num_prop_shapes
self.frankas = []
self.cabinets = []
self.default_prop_states = []
self.prop_start = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 3:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 1, 0)
self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props)
if self.aggregate_mode == 2:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
cabinet_pose = cabinet_start_pose
cabinet_pose.p.x += self.start_position_noise * (np.random.rand() - 0.5)
dz = 0.5 * np.random.rand()
dy = np.random.rand() - 0.5
cabinet_pose.p.y += self.start_position_noise * dy
cabinet_pose.p.z += self.start_position_noise * dz
cabinet_actor = self.gym.create_actor(env_ptr, cabinet_asset, cabinet_pose, "cabinet", i, 2, 0)
self.gym.set_actor_dof_properties(env_ptr, cabinet_actor, cabinet_dof_props)
if self.aggregate_mode == 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
if self.num_props > 0:
self.prop_start.append(self.gym.get_sim_actor_count(self.sim))
drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top")
drawer_pose = self.gym.get_rigid_transform(env_ptr, drawer_handle)
props_per_row = int(np.ceil(np.sqrt(self.num_props)))
xmin = -0.5 * self.prop_spacing * (props_per_row - 1)
yzmin = -0.5 * self.prop_spacing * (props_per_row - 1)
prop_count = 0
for j in range(props_per_row):
prop_up = yzmin + j * self.prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * self.prop_spacing
prop_state_pose = gymapi.Transform()
prop_state_pose.p.x = drawer_pose.p.x + propx
propz, propy = 0, prop_up
prop_state_pose.p.y = drawer_pose.p.y + propy
prop_state_pose.p.z = drawer_pose.p.z + propz
prop_state_pose.r = gymapi.Quat(0, 0, 0, 1)
prop_handle = self.gym.create_actor(env_ptr, prop_asset, prop_state_pose, "prop{}".format(prop_count), i, 0, 0)
prop_count += 1
prop_idx = j * props_per_row + k
self.default_prop_states.append([prop_state_pose.p.x, prop_state_pose.p.y, prop_state_pose.p.z,
prop_state_pose.r.x, prop_state_pose.r.y, prop_state_pose.r.z, prop_state_pose.r.w,
0, 0, 0, 0, 0, 0])
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.frankas.append(franka_actor)
self.cabinets.append(cabinet_actor)
self.hand_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_link7")
self.drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top")
self.lfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_leftfinger")
self.rfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_rightfinger")
self.default_prop_states = to_torch(self.default_prop_states, device=self.device, dtype=torch.float).view(self.num_envs, self.num_props, 13)
self.init_data()
def init_data(self):
hand = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_link7")
lfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_leftfinger")
rfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_rightfinger")
hand_pose = self.gym.get_rigid_transform(self.envs[0], hand)
lfinger_pose = self.gym.get_rigid_transform(self.envs[0], lfinger)
rfinger_pose = self.gym.get_rigid_transform(self.envs[0], rfinger)
finger_pose = gymapi.Transform()
finger_pose.p = (lfinger_pose.p + rfinger_pose.p) * 0.5
finger_pose.r = lfinger_pose.r
hand_pose_inv = hand_pose.inverse()
grasp_pose_axis = 1
franka_local_grasp_pose = hand_pose_inv * finger_pose
franka_local_grasp_pose.p += gymapi.Vec3(*get_axis_params(0.04, grasp_pose_axis))
self.franka_local_grasp_pos = to_torch([franka_local_grasp_pose.p.x, franka_local_grasp_pose.p.y,
franka_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1))
self.franka_local_grasp_rot = to_torch([franka_local_grasp_pose.r.x, franka_local_grasp_pose.r.y,
franka_local_grasp_pose.r.z, franka_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1))
drawer_local_grasp_pose = gymapi.Transform()
drawer_local_grasp_pose.p = gymapi.Vec3(*get_axis_params(0.01, grasp_pose_axis, 0.3))
drawer_local_grasp_pose.r = gymapi.Quat(0, 0, 0, 1)
self.drawer_local_grasp_pos = to_torch([drawer_local_grasp_pose.p.x, drawer_local_grasp_pose.p.y,
drawer_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1))
self.drawer_local_grasp_rot = to_torch([drawer_local_grasp_pose.r.x, drawer_local_grasp_pose.r.y,
drawer_local_grasp_pose.r.z, drawer_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1))
self.gripper_forward_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1))
self.drawer_inward_axis = to_torch([-1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.gripper_up_axis = to_torch([0, 1, 0], device=self.device).repeat((self.num_envs, 1))
self.drawer_up_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1))
self.franka_grasp_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_grasp_rot = torch.zeros_like(self.franka_local_grasp_rot)
self.franka_grasp_rot[..., -1] = 1 # xyzw
self.drawer_grasp_pos = torch.zeros_like(self.drawer_local_grasp_pos)
self.drawer_grasp_rot = torch.zeros_like(self.drawer_local_grasp_rot)
self.drawer_grasp_rot[..., -1] = 1
self.franka_lfinger_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_rfinger_pos = torch.zeros_like(self.franka_local_grasp_pos)
self.franka_lfinger_rot = torch.zeros_like(self.franka_local_grasp_rot)
self.franka_rfinger_rot = torch.zeros_like(self.franka_local_grasp_rot)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_franka_reward(
self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos,
self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot,
self.franka_lfinger_pos, self.franka_rfinger_pos,
self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis,
self.num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale,
self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self.max_episode_length
)
def compute_observations(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
hand_pos = self.rigid_body_states[:, self.hand_handle][:, 0:3]
hand_rot = self.rigid_body_states[:, self.hand_handle][:, 3:7]
drawer_pos = self.rigid_body_states[:, self.drawer_handle][:, 0:3]
drawer_rot = self.rigid_body_states[:, self.drawer_handle][:, 3:7]
self.franka_grasp_rot[:], self.franka_grasp_pos[:], self.drawer_grasp_rot[:], self.drawer_grasp_pos[:] = \
compute_grasp_transforms(hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos,
drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos
)
self.franka_lfinger_pos = self.rigid_body_states[:, self.lfinger_handle][:, 0:3]
self.franka_rfinger_pos = self.rigid_body_states[:, self.rfinger_handle][:, 0:3]
self.franka_lfinger_rot = self.rigid_body_states[:, self.lfinger_handle][:, 3:7]
self.franka_rfinger_rot = self.rigid_body_states[:, self.rfinger_handle][:, 3:7]
dof_pos_scaled = (2.0 * (self.franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0)
to_target = self.drawer_grasp_pos - self.franka_grasp_pos
self.obs_buf = torch.cat((dof_pos_scaled, self.franka_dof_vel * self.dof_vel_scale, to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1)), dim=-1)
return self.obs_buf
def reset_idx(self, env_ids):
env_ids_int32 = env_ids.to(dtype=torch.int32)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self.device) - 0.5),
self.franka_dof_lower_limits, self.franka_dof_upper_limits)
self.franka_dof_pos[env_ids, :] = pos
self.franka_dof_vel[env_ids, :] = torch.zeros_like(self.franka_dof_vel[env_ids])
self.franka_dof_targets[env_ids, :self.num_franka_dofs] = pos
# reset cabinet
self.cabinet_dof_state[env_ids, :] = torch.zeros_like(self.cabinet_dof_state[env_ids])
# reset props
if self.num_props > 0:
prop_indices = self.global_indices[env_ids, 2:].flatten()
self.prop_states[env_ids] = self.default_prop_states[env_ids]
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(prop_indices), len(prop_indices))
multi_env_ids_int32 = self.global_indices[env_ids, :2].flatten()
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.franka_dof_targets),
gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
targets = self.franka_dof_targets[:, :self.num_franka_dofs] + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:, :self.num_franka_dofs] = tensor_clamp(
targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
self.gym.set_dof_position_target_tensor(self.sim,
gymtorch.unwrap_tensor(self.franka_dof_targets))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
px = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_grasp_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85])
px = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.drawer_grasp_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
px = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_lfinger_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
px = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.franka_rfinger_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_franka_reward(
reset_buf, progress_buf, actions, cabinet_dof_pos,
franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot,
franka_lfinger_pos, franka_rfinger_pos,
gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis,
num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale,
finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper
dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
around_handle_reward + 0.5, around_handle_reward), around_handle_reward)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward), finger_dist_reward)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions ** 2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = dist_reward_scale * dist_reward + rot_reward_scale * rot_reward \
+ around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward \
+ finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
# prevent bad style in opening drawer
rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
torch.ones_like(rewards) * -1, rewards)
rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
torch.ones_like(rewards) * -1, rewards)
# reset if drawer is open or max length reached
reset_buf = torch.where(cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(reset_buf), reset_buf)
reset_buf = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
return rewards, reset_buf
@torch.jit.script
def compute_grasp_transforms(hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos,
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
| 32,782 | Python | 56.716549 | 217 | 0.613141 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/__init__.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .ant import Ant
from .anymal import Anymal
from .anymal_terrain import AnymalTerrain
from .ball_balance import BallBalance
from .cartpole import Cartpole
from .factory.factory_task_gears import FactoryTaskGears
from .factory.factory_task_insertion import FactoryTaskInsertion
from .factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick
from .factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace
from .factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew
from .franka_cabinet import FrankaCabinet
from .franka_cube_stack import FrankaCubeStack
from .humanoid import Humanoid
from .humanoid_amp import HumanoidAMP
from .ingenuity import Ingenuity
from .quadcopter import Quadcopter
from .shadow_hand import ShadowHand
from .allegro_hand import AllegroHand
from .dextreme.allegro_hand_dextreme import AllegroHandDextremeManualDR, AllegroHandDextremeADR
from .trifinger import Trifinger
from .allegro_kuka.allegro_kuka_reorientation import AllegroKukaReorientation
from .allegro_kuka.allegro_kuka_regrasping import AllegroKukaRegrasping
from .allegro_kuka.allegro_kuka_throw import AllegroKukaThrow
from .allegro_kuka.allegro_kuka_two_arms_regrasping import AllegroKukaTwoArmsRegrasping
from .allegro_kuka.allegro_kuka_two_arms_reorientation import AllegroKukaTwoArmsReorientation
from .industreal.industreal_task_pegs_insert import IndustRealTaskPegsInsert
from .industreal.industreal_task_gears_insert import IndustRealTaskGearsInsert
def resolve_allegro_kuka(cfg, *args, **kwargs):
subtask_name: str = cfg["env"]["subtask"]
subtask_map = dict(
reorientation=AllegroKukaReorientation,
throw=AllegroKukaThrow,
regrasping=AllegroKukaRegrasping,
)
if subtask_name not in subtask_map:
print("!!!!!")
raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}")
return subtask_map[subtask_name](cfg, *args, **kwargs)
def resolve_allegro_kuka_two_arms(cfg, *args, **kwargs):
subtask_name: str = cfg["env"]["subtask"]
subtask_map = dict(
reorientation=AllegroKukaTwoArmsReorientation,
regrasping=AllegroKukaTwoArmsRegrasping,
)
if subtask_name not in subtask_map:
raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}")
return subtask_map[subtask_name](cfg, *args, **kwargs)
# Mappings from strings to environments
isaacgym_task_map = {
"AllegroHand": AllegroHand,
"AllegroKuka": resolve_allegro_kuka,
"AllegroKukaTwoArms": resolve_allegro_kuka_two_arms,
"AllegroHandManualDR": AllegroHandDextremeManualDR,
"AllegroHandADR": AllegroHandDextremeADR,
"Ant": Ant,
"Anymal": Anymal,
"AnymalTerrain": AnymalTerrain,
"BallBalance": BallBalance,
"Cartpole": Cartpole,
"FactoryTaskGears": FactoryTaskGears,
"FactoryTaskInsertion": FactoryTaskInsertion,
"FactoryTaskNutBoltPick": FactoryTaskNutBoltPick,
"FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace,
"FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew,
"IndustRealTaskPegsInsert": IndustRealTaskPegsInsert,
"IndustRealTaskGearsInsert": IndustRealTaskGearsInsert,
"FrankaCabinet": FrankaCabinet,
"FrankaCubeStack": FrankaCubeStack,
"Humanoid": Humanoid,
"HumanoidAMP": HumanoidAMP,
"Ingenuity": Ingenuity,
"Quadcopter": Quadcopter,
"ShadowHand": ShadowHand,
"Trifinger": Trifinger,
}
| 4,960 | Python | 42.13913 | 95 | 0.777218 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid_amp.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE..
from enum import Enum
import numpy as np
import torch
import os
from gym import spaces
from isaacgym import gymapi
from isaacgym import gymtorch
from isaacgymenvs.tasks.amp.humanoid_amp_base import HumanoidAMPBase, dof_to_obs
from isaacgymenvs.tasks.amp.utils_amp import gym_util
from isaacgymenvs.tasks.amp.utils_amp.motion_lib import MotionLib
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, calc_heading_quat_inv, quat_to_tan_norm, my_quat_rotate
NUM_AMP_OBS_PER_STEP = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos]
class HumanoidAMP(HumanoidAMPBase):
class StateInit(Enum):
Default = 0
Start = 1
Random = 2
Hybrid = 3
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
state_init = cfg["env"]["stateInit"]
self._state_init = HumanoidAMP.StateInit[state_init]
self._hybrid_init_prob = cfg["env"]["hybridInitProb"]
self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"]
assert(self._num_amp_obs_steps >= 2)
self._reset_default_env_ids = []
self._reset_ref_env_ids = []
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
motion_file = cfg['env'].get('motion_file', "amp_humanoid_backflip.npy")
motion_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets/amp/motions/" + motion_file)
self._load_motion(motion_file_path)
self.num_amp_obs = self._num_amp_obs_steps * NUM_AMP_OBS_PER_STEP
self._amp_obs_space = spaces.Box(np.ones(self.num_amp_obs) * -np.Inf, np.ones(self.num_amp_obs) * np.Inf)
self._amp_obs_buf = torch.zeros((self.num_envs, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float)
self._curr_amp_obs_buf = self._amp_obs_buf[:, 0]
self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:]
self._amp_obs_demo_buf = None
return
def post_physics_step(self):
super().post_physics_step()
self._update_hist_amp_obs()
self._compute_amp_observations()
amp_obs_flat = self._amp_obs_buf.view(-1, self.get_num_amp_obs())
self.extras["amp_obs"] = amp_obs_flat
return
def get_num_amp_obs(self):
return self.num_amp_obs
@property
def amp_observation_space(self):
return self._amp_obs_space
def fetch_amp_obs_demo(self, num_samples):
return self.task.fetch_amp_obs_demo(num_samples)
def fetch_amp_obs_demo(self, num_samples):
dt = self.dt
motion_ids = self._motion_lib.sample_motions(num_samples)
if (self._amp_obs_demo_buf is None):
self._build_amp_obs_demo_buf(num_samples)
else:
assert(self._amp_obs_demo_buf.shape[0] == num_samples)
motion_times0 = self._motion_lib.sample_time(motion_ids)
motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps])
motion_times = np.expand_dims(motion_times0, axis=-1)
time_steps = -dt * np.arange(0, self._num_amp_obs_steps)
motion_times = motion_times + time_steps
motion_ids = motion_ids.flatten()
motion_times = motion_times.flatten()
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1)
amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos,
self._local_root_obs)
self._amp_obs_demo_buf[:] = amp_obs_demo.view(self._amp_obs_demo_buf.shape)
amp_obs_demo_flat = self._amp_obs_demo_buf.view(-1, self.get_num_amp_obs())
return amp_obs_demo_flat
def _build_amp_obs_demo_buf(self, num_samples):
self._amp_obs_demo_buf = torch.zeros((num_samples, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float)
return
def _load_motion(self, motion_file):
self._motion_lib = MotionLib(motion_file=motion_file,
num_dofs=self.num_dof,
key_body_ids=self._key_body_ids.cpu().numpy(),
device=self.device)
return
def reset_idx(self, env_ids):
super().reset_idx(env_ids)
self._init_amp_obs(env_ids)
return
def _reset_actors(self, env_ids):
if (self._state_init == HumanoidAMP.StateInit.Default):
self._reset_default(env_ids)
elif (self._state_init == HumanoidAMP.StateInit.Start
or self._state_init == HumanoidAMP.StateInit.Random):
self._reset_ref_state_init(env_ids)
elif (self._state_init == HumanoidAMP.StateInit.Hybrid):
self._reset_hybrid_state_init(env_ids)
else:
assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self._terminate_buf[env_ids] = 0
return
def _reset_default(self, env_ids):
self._dof_pos[env_ids] = self._initial_dof_pos[env_ids]
self._dof_vel[env_ids] = self._initial_dof_vel[env_ids]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self._reset_default_env_ids = env_ids
return
def _reset_ref_state_init(self, env_ids):
num_envs = env_ids.shape[0]
motion_ids = self._motion_lib.sample_motions(num_envs)
if (self._state_init == HumanoidAMP.StateInit.Random
or self._state_init == HumanoidAMP.StateInit.Hybrid):
motion_times = self._motion_lib.sample_time(motion_ids)
elif (self._state_init == HumanoidAMP.StateInit.Start):
motion_times = np.zeros(num_envs)
else:
assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init))
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
self._set_env_state(env_ids=env_ids,
root_pos=root_pos,
root_rot=root_rot,
dof_pos=dof_pos,
root_vel=root_vel,
root_ang_vel=root_ang_vel,
dof_vel=dof_vel)
self._reset_ref_env_ids = env_ids
self._reset_ref_motion_ids = motion_ids
self._reset_ref_motion_times = motion_times
return
def _reset_hybrid_state_init(self, env_ids):
num_envs = env_ids.shape[0]
ref_probs = to_torch(np.array([self._hybrid_init_prob] * num_envs), device=self.device)
ref_init_mask = torch.bernoulli(ref_probs) == 1.0
ref_reset_ids = env_ids[ref_init_mask]
if (len(ref_reset_ids) > 0):
self._reset_ref_state_init(ref_reset_ids)
default_reset_ids = env_ids[torch.logical_not(ref_init_mask)]
if (len(default_reset_ids) > 0):
self._reset_default(default_reset_ids)
return
def _init_amp_obs(self, env_ids):
self._compute_amp_observations(env_ids)
if (len(self._reset_default_env_ids) > 0):
self._init_amp_obs_default(self._reset_default_env_ids)
if (len(self._reset_ref_env_ids) > 0):
self._init_amp_obs_ref(self._reset_ref_env_ids, self._reset_ref_motion_ids,
self._reset_ref_motion_times)
return
def _init_amp_obs_default(self, env_ids):
curr_amp_obs = self._curr_amp_obs_buf[env_ids].unsqueeze(-2)
self._hist_amp_obs_buf[env_ids] = curr_amp_obs
return
def _init_amp_obs_ref(self, env_ids, motion_ids, motion_times):
dt = self.dt
motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps - 1])
motion_times = np.expand_dims(motion_times, axis=-1)
time_steps = -dt * (np.arange(0, self._num_amp_obs_steps - 1) + 1)
motion_times = motion_times + time_steps
motion_ids = motion_ids.flatten()
motion_times = motion_times.flatten()
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \
= self._motion_lib.get_motion_state(motion_ids, motion_times)
root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1)
amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos,
self._local_root_obs)
self._hist_amp_obs_buf[env_ids] = amp_obs_demo.view(self._hist_amp_obs_buf[env_ids].shape)
return
def _set_env_state(self, env_ids, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel):
self._root_states[env_ids, 0:3] = root_pos
self._root_states[env_ids, 3:7] = root_rot
self._root_states[env_ids, 7:10] = root_vel
self._root_states[env_ids, 10:13] = root_ang_vel
self._dof_pos[env_ids] = dof_pos
self._dof_vel[env_ids] = dof_vel
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
return
def _update_hist_amp_obs(self, env_ids=None):
if (env_ids is None):
for i in reversed(range(self._amp_obs_buf.shape[1] - 1)):
self._amp_obs_buf[:, i + 1] = self._amp_obs_buf[:, i]
else:
for i in reversed(range(self._amp_obs_buf.shape[1] - 1)):
self._amp_obs_buf[env_ids, i + 1] = self._amp_obs_buf[env_ids, i]
return
def _compute_amp_observations(self, env_ids=None):
key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :]
if (env_ids is None):
self._curr_amp_obs_buf[:] = build_amp_observations(self._root_states, self._dof_pos, self._dof_vel, key_body_pos,
self._local_root_obs)
else:
self._curr_amp_obs_buf[env_ids] = build_amp_observations(self._root_states[env_ids], self._dof_pos[env_ids],
self._dof_vel[env_ids], key_body_pos[env_ids],
self._local_root_obs)
return
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def build_amp_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs):
# type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor
root_pos = root_states[:, 0:3]
root_rot = root_states[:, 3:7]
root_vel = root_states[:, 7:10]
root_ang_vel = root_states[:, 10:13]
root_h = root_pos[:, 2:3]
heading_rot = calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = quat_to_tan_norm(root_rot_obs)
local_root_vel = my_quat_rotate(heading_rot, root_vel)
local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos)
obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1)
return obs | 14,984 | Python | 44 | 217 | 0.602309 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp, compute_heading_and_up, compute_rot, normalize_angle
from isaacgymenvs.tasks.base.vec_task import VecTask
class Humanoid(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self.cfg["env"].get("angularVelocityScale", 0.1)
self.contact_force_scale = self.cfg["env"]["contactForceScale"]
self.power_scale = self.cfg["env"]["powerScale"]
self.heading_weight = self.cfg["env"]["headingWeight"]
self.up_weight = self.cfg["env"]["upWeight"]
self.actions_cost_scale = self.cfg["env"]["actionsCost"]
self.energy_cost_scale = self.cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"]
self.death_cost = self.cfg["env"]["deathCost"]
self.termination_height = self.cfg["env"]["terminationHeight"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.cfg["env"]["numObservations"] = 108
self.cfg["env"]["numActions"] = 21
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
if self.viewer != None:
cam_pos = gymapi.Vec3(50.0, 25.0, 2.4)
cam_target = gymapi.Vec3(45.0, 25.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
sensors_per_env = 2
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:, 7:13] = 0
# create some wrapper tensors for different slices
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float)
zero_tensor = torch.tensor([0.0], device=self.device)
self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower,
torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos))
self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float)
# initialize some data used later on
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.dt = self.cfg["sim"]["dt"]
self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs)
self.prev_potentials = self.potentials.clone()
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
plane_params.restitution = self.plane_restitution
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = "mjcf/nv_humanoid.xml"
if "asset" in self.cfg["env"]:
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.angular_damping = 0.01
asset_options.max_angular_velocity = 100.0
# Note - DOF mode is set in the MJCF file and loaded by Isaac Gym
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
# Note - for this asset we are loading the actuator info from the MJCF
actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset)
motor_efforts = [prop.motor_effort for prop in actuator_props]
# create force sensors at the feet
right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot")
left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot")
sensor_pose = gymapi.Transform()
self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose)
self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose)
self.max_motor_effort = max(motor_efforts)
self.motor_efforts = to_torch(motor_efforts, device=self.device)
self.torso_index = 0
self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset)
self.num_dof = self.gym.get_asset_dof_count(humanoid_asset)
self.num_joints = self.gym.get_asset_joint_count(humanoid_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*get_axis_params(1.34, self.up_axis_idx))
start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device)
self.humanoid_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, 0, 0)
self.gym.enable_actor_dof_force_sensors(env_ptr, handle)
for j in range(self.num_bodies):
self.gym.set_rigid_body_color(
env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
self.envs.append(env_ptr)
self.humanoid_handles.append(handle)
dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle)
for j in range(self.num_dof):
if dof_prop['lower'][j] > dof_prop['upper'][j]:
self.dof_limits_lower.append(dof_prop['upper'][j])
self.dof_limits_upper.append(dof_prop['lower'][j])
else:
self.dof_limits_lower.append(dof_prop['lower'][j])
self.dof_limits_upper.append(dof_prop['upper'][j])
self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device)
self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device)
self.extremities = to_torch([5, 8], device=self.device, dtype=torch.long)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf = compute_humanoid_reward(
self.obs_buf,
self.reset_buf,
self.progress_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.joints_at_limit_cost_scale,
self.max_motor_effort,
self.motor_efforts,
self.termination_height,
self.death_cost,
self.max_episode_length
)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_humanoid_observations(
self.obs_buf, self.root_states, self.targets, self.potentials,
self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_force_tensor,
self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.angular_velocity_scale,
self.basis_vec0, self.basis_vec1)
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper)
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3]
to_target[:, self.up_axis_idx] = 0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def pre_physics_step(self, actions):
self.actions = actions.to(self.device).clone()
forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale
force_tensor = gymtorch.unwrap_tensor(forces)
self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor)
def post_physics_step(self):
self.progress_buf += 1
self.randomize_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
points = []
colors = []
for i in range(self.num_envs):
origin = self.gym.get_env_origin(self.envs[i])
pose = self.root_states[:, 0:3][i].cpu().numpy()
glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(),
glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()])
colors.append([0.97, 0.1, 0.06])
points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(),
glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()])
colors.append([0.05, 0.99, 0.04])
self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_humanoid_reward(
obs_buf,
reset_buf,
progress_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
joints_at_limit_cost_scale,
max_motor_effort,
motor_efforts,
termination_height,
death_cost,
max_episode_length
):
# type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, Tensor, float, float, float) -> Tuple[Tensor, Tensor]
# reward from the direction headed
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# reward for being upright
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
actions_cost = torch.sum(actions ** 2, dim=-1)
# energy cost reward
motor_effort_ratio = motor_efforts / max_motor_effort
scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02
dof_at_limit_cost = torch.sum((torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1)
electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 33:54]) * motor_effort_ratio.unsqueeze(0), dim=-1)
# reward for duration of being alive
alive_reward = torch.ones_like(potentials) * 2.0
progress_reward = potentials - prev_potentials
total_reward = progress_reward + alive_reward + up_reward + heading_reward - \
actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost
# adjust reward for fallen agents
total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward)
# reset agents
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return total_reward, reset
@torch.jit.script
def compute_humanoid_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel,
dof_force, dof_limits_lower, dof_limits_upper, dof_vel_scale,
sensor_force_torques, actions, dt, contact_force_scale, angular_velocity_scale,
basis_vec0, basis_vec1):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, float, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
torso_position = root_states[:, 0:3]
torso_rotation = root_states[:, 3:7]
velocity = root_states[:, 7:10]
ang_velocity = root_states[:, 10:13]
to_target = targets - torso_position
to_target[:, 2] = 0
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position)
roll = normalize_angle(roll).unsqueeze(-1)
yaw = normalize_angle(yaw).unsqueeze(-1)
angle_to_target = normalize_angle(angle_to_target).unsqueeze(-1)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs (21), num_dofs (21), 6, num_acts (21)
obs = torch.cat((torso_position[:, 2].view(-1, 1), vel_loc, angvel_loc * angular_velocity_scale,
yaw, roll, angle_to_target, up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1),
dof_pos_scaled, dof_vel * dof_vel_scale, dof_force * contact_force_scale,
sensor_force_torques.view(-1, 12) * contact_force_scale, actions), dim=-1)
return obs, potentials, prev_potentials_new, up_vec, heading_vec
| 20,168 | Python | 47.717391 | 217 | 0.631743 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/cartpole.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from .base.vec_task import VecTask
class Cartpole(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.reset_dist = self.cfg["env"]["resetDist"]
self.max_push_effort = self.cfg["env"]["maxEffort"]
self.max_episode_length = 500
self.cfg["env"]["numObservations"] = 4
self.cfg["env"]["numActions"] = 1
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis = self.cfg["sim"]["up_axis"]
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing)
upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/cartpole.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
cartpole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(cartpole_asset)
pose = gymapi.Transform()
if self.up_axis == 'z':
pose.p.z = 2.0
# asset is rotated z-up by default, no additional rotations needed
pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
else:
pose.p.y = 2.0
pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2)
self.cartpole_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
cartpole_handle = self.gym.create_actor(env_ptr, cartpole_asset, pose, "cartpole", i, 1, 0)
dof_props = self.gym.get_actor_dof_properties(env_ptr, cartpole_handle)
dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT
dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE
dof_props['stiffness'][:] = 0.0
dof_props['damping'][:] = 0.0
self.gym.set_actor_dof_properties(env_ptr, cartpole_handle, dof_props)
self.envs.append(env_ptr)
self.cartpole_handles.append(cartpole_handle)
def compute_reward(self):
# retrieve environment observations from buffer
pole_angle = self.obs_buf[:, 2]
pole_vel = self.obs_buf[:, 3]
cart_vel = self.obs_buf[:, 1]
cart_pos = self.obs_buf[:, 0]
self.rew_buf[:], self.reset_buf[:] = compute_cartpole_reward(
pole_angle, pole_vel, cart_vel, cart_pos,
self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length
)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim)
self.obs_buf[env_ids, 0] = self.dof_pos[env_ids, 0].squeeze()
self.obs_buf[env_ids, 1] = self.dof_vel[env_ids, 0].squeeze()
self.obs_buf[env_ids, 2] = self.dof_pos[env_ids, 1].squeeze()
self.obs_buf[env_ids, 3] = self.dof_vel[env_ids, 1].squeeze()
return self.obs_buf
def reset_idx(self, env_ids):
positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
self.dof_pos[env_ids, :] = positions[:]
self.dof_vel[env_ids, :] = velocities[:]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, actions):
actions_tensor = torch.zeros(self.num_envs * self.num_dof, device=self.device, dtype=torch.float)
actions_tensor[::self.num_dof] = actions.to(self.device).squeeze() * self.max_push_effort
forces = gymtorch.unwrap_tensor(actions_tensor)
self.gym.set_dof_actuation_force_tensor(self.sim, forces)
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_cartpole_reward(pole_angle, pole_vel, cart_vel, cart_pos,
reset_dist, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# reward is combo of angle deviated from upright, velocity of cart, and velocity of pole moving
reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel)
# adjust reward for reset agents
reward = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reward) * -2.0, reward)
reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
reset = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reset_buf), reset)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return reward, reset
| 9,134 | Python | 45.370558 | 217 | 0.629297 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cube_stack.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, tensor_clamp
from isaacgymenvs.tasks.base.vec_task import VecTask
@torch.jit.script
def axisangle2quat(vec, eps=1e-6):
"""
Converts scaled axis-angle to quat.
Args:
vec (tensor): (..., 3) tensor where final dim is (ax,ay,az) axis-angle exponential coordinates
eps (float): Stability value below which small values will be mapped to 0
Returns:
tensor: (..., 4) tensor where final dim is (x,y,z,w) vec4 float quaternion
"""
# type: (Tensor, float) -> Tensor
# store input shape and reshape
input_shape = vec.shape[:-1]
vec = vec.reshape(-1, 3)
# Grab angle
angle = torch.norm(vec, dim=-1, keepdim=True)
# Create return array
quat = torch.zeros(torch.prod(torch.tensor(input_shape)), 4, device=vec.device)
quat[:, 3] = 1.0
# Grab indexes where angle is not zero an convert the input to its quaternion form
idx = angle.reshape(-1) > eps
quat[idx, :] = torch.cat([
vec[idx, :] * torch.sin(angle[idx, :] / 2.0) / angle[idx, :],
torch.cos(angle[idx, :] / 2.0)
], dim=-1)
# Reshape and return output
quat = quat.reshape(list(input_shape) + [4, ])
return quat
class FrankaCubeStack(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.action_scale = self.cfg["env"]["actionScale"]
self.start_position_noise = self.cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self.cfg["env"]["startRotationNoise"]
self.franka_position_noise = self.cfg["env"]["frankaPositionNoise"]
self.franka_rotation_noise = self.cfg["env"]["frankaRotationNoise"]
self.franka_dof_noise = self.cfg["env"]["frankaDofNoise"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
# Create dicts to pass to reward function
self.reward_settings = {
"r_dist_scale": self.cfg["env"]["distRewardScale"],
"r_lift_scale": self.cfg["env"]["liftRewardScale"],
"r_align_scale": self.cfg["env"]["alignRewardScale"],
"r_stack_scale": self.cfg["env"]["stackRewardScale"],
}
# Controller type
self.control_type = self.cfg["env"]["controlType"]
assert self.control_type in {"osc", "joint_tor"},\
"Invalid control type specified. Must be one of: {osc, joint_tor}"
# dimensions
# obs include: cubeA_pose (7) + cubeB_pos (3) + eef_pose (7) + q_gripper (2)
self.cfg["env"]["numObservations"] = 19 if self.control_type == "osc" else 26
# actions include: delta EEF if OSC (6) or joint torques (7) + bool gripper (1)
self.cfg["env"]["numActions"] = 7 if self.control_type == "osc" else 8
# Values to be filled in at runtime
self.states = {} # will be dict filled with relevant states to use for reward calculation
self.handles = {} # will be dict mapping names to relevant sim handles
self.num_dofs = None # Total number of DOFs per env
self.actions = None # Current actions to be deployed
self._init_cubeA_state = None # Initial state of cubeA for the current env
self._init_cubeB_state = None # Initial state of cubeB for the current env
self._cubeA_state = None # Current state of cubeA for the current env
self._cubeB_state = None # Current state of cubeB for the current env
self._cubeA_id = None # Actor ID corresponding to cubeA for a given env
self._cubeB_id = None # Actor ID corresponding to cubeB for a given env
# Tensor placeholders
self._root_state = None # State of root body (n_envs, 13)
self._dof_state = None # State of all joints (n_envs, n_dof)
self._q = None # Joint positions (n_envs, n_dof)
self._qd = None # Joint velocities (n_envs, n_dof)
self._rigid_body_state = None # State of all rigid bodies (n_envs, n_bodies, 13)
self._contact_forces = None # Contact forces in sim
self._eef_state = None # end effector state (at grasping point)
self._eef_lf_state = None # end effector state (at left fingertip)
self._eef_rf_state = None # end effector state (at left fingertip)
self._j_eef = None # Jacobian for end effector
self._mm = None # Mass matrix
self._arm_control = None # Tensor buffer for controlling arm
self._gripper_control = None # Tensor buffer for controlling gripper
self._pos_control = None # Position actions
self._effort_control = None # Torque actions
self._franka_effort_limits = None # Actuator effort limits for franka
self._global_indices = None # Unique indices corresponding to all envs in flattened array
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.up_axis = "z"
self.up_axis_idx = 2
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# Franka defaults
self.franka_default_dof_pos = to_torch(
[0, 0.1963, 0, -2.6180, 0, 2.9416, 0.7854, 0.035, 0.035], device=self.device
)
# OSC Gains
self.kp = to_torch([150.] * 6, device=self.device)
self.kd = 2 * torch.sqrt(self.kp)
self.kp_null = to_torch([10.] * 7, device=self.device)
self.kd_null = 2 * torch.sqrt(self.kp_null)
#self.cmd_limit = None # filled in later
# Set control limits
self.cmd_limit = to_torch([0.1, 0.1, 0.1, 0.5, 0.5, 0.5], device=self.device).unsqueeze(0) if \
self.control_type == "osc" else self._franka_effort_limits[:7].unsqueeze(0)
# Reset all environments
self.reset_idx(torch.arange(self.num_envs, device=self.device))
# Refresh tensors
self._refresh()
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(
self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
franka_asset_file = "urdf/franka_description/robots/franka_panda_gripper.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file)
# load franka asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = False
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
asset_options.use_mesh_materials = True
franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options)
franka_dof_stiffness = to_torch([0, 0, 0, 0, 0, 0, 0, 5000., 5000.], dtype=torch.float, device=self.device)
franka_dof_damping = to_torch([0, 0, 0, 0, 0, 0, 0, 1.0e2, 1.0e2], dtype=torch.float, device=self.device)
# Create table asset
table_pos = [0.0, 0.0, 1.0]
table_thickness = 0.05
table_opts = gymapi.AssetOptions()
table_opts.fix_base_link = True
table_asset = self.gym.create_box(self.sim, *[1.2, 1.2, table_thickness], table_opts)
# Create table stand asset
table_stand_height = 0.1
table_stand_pos = [-0.5, 0.0, 1.0 + table_thickness / 2 + table_stand_height / 2]
table_stand_opts = gymapi.AssetOptions()
table_stand_opts.fix_base_link = True
table_stand_asset = self.gym.create_box(self.sim, *[0.2, 0.2, table_stand_height], table_opts)
self.cubeA_size = 0.050
self.cubeB_size = 0.070
# Create cubeA asset
cubeA_opts = gymapi.AssetOptions()
cubeA_asset = self.gym.create_box(self.sim, *([self.cubeA_size] * 3), cubeA_opts)
cubeA_color = gymapi.Vec3(0.6, 0.1, 0.0)
# Create cubeB asset
cubeB_opts = gymapi.AssetOptions()
cubeB_asset = self.gym.create_box(self.sim, *([self.cubeB_size] * 3), cubeB_opts)
cubeB_color = gymapi.Vec3(0.0, 0.4, 0.1)
self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset)
print("num franka bodies: ", self.num_franka_bodies)
print("num franka dofs: ", self.num_franka_dofs)
# set franka dof properties
franka_dof_props = self.gym.get_asset_dof_properties(franka_asset)
self.franka_dof_lower_limits = []
self.franka_dof_upper_limits = []
self._franka_effort_limits = []
for i in range(self.num_franka_dofs):
franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if i > 6 else gymapi.DOF_MODE_EFFORT
if self.physics_engine == gymapi.SIM_PHYSX:
franka_dof_props['stiffness'][i] = franka_dof_stiffness[i]
franka_dof_props['damping'][i] = franka_dof_damping[i]
else:
franka_dof_props['stiffness'][i] = 7000.0
franka_dof_props['damping'][i] = 50.0
self.franka_dof_lower_limits.append(franka_dof_props['lower'][i])
self.franka_dof_upper_limits.append(franka_dof_props['upper'][i])
self._franka_effort_limits.append(franka_dof_props['effort'][i])
self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device)
self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device)
self._franka_effort_limits = to_torch(self._franka_effort_limits, device=self.device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[[7, 8]] = 0.1
franka_dof_props['effort'][7] = 200
franka_dof_props['effort'][8] = 200
# Define start pose for franka
franka_start_pose = gymapi.Transform()
franka_start_pose.p = gymapi.Vec3(-0.45, 0.0, 1.0 + table_thickness / 2 + table_stand_height)
franka_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# Define start pose for table
table_start_pose = gymapi.Transform()
table_start_pose.p = gymapi.Vec3(*table_pos)
table_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self._table_surface_pos = np.array(table_pos) + np.array([0, 0, table_thickness / 2])
self.reward_settings["table_height"] = self._table_surface_pos[2]
# Define start pose for table stand
table_stand_start_pose = gymapi.Transform()
table_stand_start_pose.p = gymapi.Vec3(*table_stand_pos)
table_stand_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# Define start pose for cubes (doesn't really matter since they're get overridden during reset() anyways)
cubeA_start_pose = gymapi.Transform()
cubeA_start_pose.p = gymapi.Vec3(-1.0, 0.0, 0.0)
cubeA_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
cubeB_start_pose = gymapi.Transform()
cubeB_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0)
cubeB_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
# compute aggregate size
num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset)
num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset)
max_agg_bodies = num_franka_bodies + 4 # 1 for table, table stand, cubeA, cubeB
max_agg_shapes = num_franka_shapes + 4 # 1 for table, table stand, cubeA, cubeB
self.frankas = []
self.envs = []
# Create environments
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
# Create actors and define aggregate group appropriately depending on setting
# NOTE: franka should ALWAYS be loaded first in sim!
if self.aggregate_mode >= 3:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create franka
# Potentially randomize start pose
if self.franka_position_noise > 0:
rand_xy = self.franka_position_noise * (-1. + np.random.rand(2) * 2.0)
franka_start_pose.p = gymapi.Vec3(-0.45 + rand_xy[0], 0.0 + rand_xy[1],
1.0 + table_thickness / 2 + table_stand_height)
if self.franka_rotation_noise > 0:
rand_rot = torch.zeros(1, 3)
rand_rot[:, -1] = self.franka_rotation_noise * (-1. + np.random.rand() * 2.0)
new_quat = axisangle2quat(rand_rot).squeeze().numpy().tolist()
franka_start_pose.r = gymapi.Quat(*new_quat)
franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 0, 0)
self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props)
if self.aggregate_mode == 2:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create table
table_actor = self.gym.create_actor(env_ptr, table_asset, table_start_pose, "table", i, 1, 0)
table_stand_actor = self.gym.create_actor(env_ptr, table_stand_asset, table_stand_start_pose, "table_stand",
i, 1, 0)
if self.aggregate_mode == 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# Create cubes
self._cubeA_id = self.gym.create_actor(env_ptr, cubeA_asset, cubeA_start_pose, "cubeA", i, 2, 0)
self._cubeB_id = self.gym.create_actor(env_ptr, cubeB_asset, cubeB_start_pose, "cubeB", i, 4, 0)
# Set colors
self.gym.set_rigid_body_color(env_ptr, self._cubeA_id, 0, gymapi.MESH_VISUAL, cubeA_color)
self.gym.set_rigid_body_color(env_ptr, self._cubeB_id, 0, gymapi.MESH_VISUAL, cubeB_color)
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
# Store the created env pointers
self.envs.append(env_ptr)
self.frankas.append(franka_actor)
# Setup init state buffer
self._init_cubeA_state = torch.zeros(self.num_envs, 13, device=self.device)
self._init_cubeB_state = torch.zeros(self.num_envs, 13, device=self.device)
# Setup data
self.init_data()
def init_data(self):
# Setup sim handles
env_ptr = self.envs[0]
franka_handle = 0
self.handles = {
# Franka
"hand": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_hand"),
"leftfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_leftfinger_tip"),
"rightfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_rightfinger_tip"),
"grip_site": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_grip_site"),
# Cubes
"cubeA_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeA_id, "box"),
"cubeB_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeB_id, "box"),
}
# Get total DOFs
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
# Setup tensor buffers
_actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
_dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
_rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self._root_state = gymtorch.wrap_tensor(_actor_root_state_tensor).view(self.num_envs, -1, 13)
self._dof_state = gymtorch.wrap_tensor(_dof_state_tensor).view(self.num_envs, -1, 2)
self._rigid_body_state = gymtorch.wrap_tensor(_rigid_body_state_tensor).view(self.num_envs, -1, 13)
self._q = self._dof_state[..., 0]
self._qd = self._dof_state[..., 1]
self._eef_state = self._rigid_body_state[:, self.handles["grip_site"], :]
self._eef_lf_state = self._rigid_body_state[:, self.handles["leftfinger_tip"], :]
self._eef_rf_state = self._rigid_body_state[:, self.handles["rightfinger_tip"], :]
_jacobian = self.gym.acquire_jacobian_tensor(self.sim, "franka")
jacobian = gymtorch.wrap_tensor(_jacobian)
hand_joint_index = self.gym.get_actor_joint_dict(env_ptr, franka_handle)['panda_hand_joint']
self._j_eef = jacobian[:, hand_joint_index, :, :7]
_massmatrix = self.gym.acquire_mass_matrix_tensor(self.sim, "franka")
mm = gymtorch.wrap_tensor(_massmatrix)
self._mm = mm[:, :7, :7]
self._cubeA_state = self._root_state[:, self._cubeA_id, :]
self._cubeB_state = self._root_state[:, self._cubeB_id, :]
# Initialize states
self.states.update({
"cubeA_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeA_size,
"cubeB_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeB_size,
})
# Initialize actions
self._pos_control = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self._effort_control = torch.zeros_like(self._pos_control)
# Initialize control
self._arm_control = self._effort_control[:, :7]
self._gripper_control = self._pos_control[:, 7:9]
# Initialize indices
self._global_indices = torch.arange(self.num_envs * 5, dtype=torch.int32,
device=self.device).view(self.num_envs, -1)
def _update_states(self):
self.states.update({
# Franka
"q": self._q[:, :],
"q_gripper": self._q[:, -2:],
"eef_pos": self._eef_state[:, :3],
"eef_quat": self._eef_state[:, 3:7],
"eef_vel": self._eef_state[:, 7:],
"eef_lf_pos": self._eef_lf_state[:, :3],
"eef_rf_pos": self._eef_rf_state[:, :3],
# Cubes
"cubeA_quat": self._cubeA_state[:, 3:7],
"cubeA_pos": self._cubeA_state[:, :3],
"cubeA_pos_relative": self._cubeA_state[:, :3] - self._eef_state[:, :3],
"cubeB_quat": self._cubeB_state[:, 3:7],
"cubeB_pos": self._cubeB_state[:, :3],
"cubeA_to_cubeB_pos": self._cubeB_state[:, :3] - self._cubeA_state[:, :3],
})
def _refresh(self):
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_jacobian_tensors(self.sim)
self.gym.refresh_mass_matrix_tensors(self.sim)
# Refresh states
self._update_states()
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_franka_reward(
self.reset_buf, self.progress_buf, self.actions, self.states, self.reward_settings, self.max_episode_length
)
def compute_observations(self):
self._refresh()
obs = ["cubeA_quat", "cubeA_pos", "cubeA_to_cubeB_pos", "eef_pos", "eef_quat"]
obs += ["q_gripper"] if self.control_type == "osc" else ["q"]
self.obs_buf = torch.cat([self.states[ob] for ob in obs], dim=-1)
maxs = {ob: torch.max(self.states[ob]).item() for ob in obs}
return self.obs_buf
def reset_idx(self, env_ids):
env_ids_int32 = env_ids.to(dtype=torch.int32)
# Reset cubes, sampling cube B first, then A
# if not self._i:
self._reset_init_cube_state(cube='B', env_ids=env_ids, check_valid=False)
self._reset_init_cube_state(cube='A', env_ids=env_ids, check_valid=True)
# self._i = True
# Write these new init states to the sim states
self._cubeA_state[env_ids] = self._init_cubeA_state[env_ids]
self._cubeB_state[env_ids] = self._init_cubeB_state[env_ids]
# Reset agent
reset_noise = torch.rand((len(env_ids), 9), device=self.device)
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0) +
self.franka_dof_noise * 2.0 * (reset_noise - 0.5),
self.franka_dof_lower_limits.unsqueeze(0), self.franka_dof_upper_limits)
# Overwrite gripper init pos (no noise since these are always position controlled)
pos[:, -2:] = self.franka_default_dof_pos[-2:]
# Reset the internal obs accordingly
self._q[env_ids, :] = pos
self._qd[env_ids, :] = torch.zeros_like(self._qd[env_ids])
# Set any position control to the current position, and any vel / effort control to be 0
# NOTE: Task takes care of actually propagating these controls in sim using the SimActions API
self._pos_control[env_ids, :] = pos
self._effort_control[env_ids, :] = torch.zeros_like(pos)
# Deploy updates
multi_env_ids_int32 = self._global_indices[env_ids, 0].flatten()
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._pos_control),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._effort_control),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self._dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
# Update cube states
multi_env_ids_cubes_int32 = self._global_indices[env_ids, -2:].flatten()
self.gym.set_actor_root_state_tensor_indexed(
self.sim, gymtorch.unwrap_tensor(self._root_state),
gymtorch.unwrap_tensor(multi_env_ids_cubes_int32), len(multi_env_ids_cubes_int32))
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def _reset_init_cube_state(self, cube, env_ids, check_valid=True):
"""
Simple method to sample @cube's position based on self.startPositionNoise and self.startRotationNoise, and
automaticlly reset the pose internally. Populates the appropriate self._init_cubeX_state
If @check_valid is True, then this will also make sure that the sampled position is not in contact with the
other cube.
Args:
cube(str): Which cube to sample location for. Either 'A' or 'B'
env_ids (tensor or None): Specific environments to reset cube for
check_valid (bool): Whether to make sure sampled position is collision-free with the other cube.
"""
# If env_ids is None, we reset all the envs
if env_ids is None:
env_ids = torch.arange(start=0, end=self.num_envs, device=self.device, dtype=torch.long)
# Initialize buffer to hold sampled values
num_resets = len(env_ids)
sampled_cube_state = torch.zeros(num_resets, 13, device=self.device)
# Get correct references depending on which one was selected
if cube.lower() == 'a':
this_cube_state_all = self._init_cubeA_state
other_cube_state = self._init_cubeB_state[env_ids, :]
cube_heights = self.states["cubeA_size"]
elif cube.lower() == 'b':
this_cube_state_all = self._init_cubeB_state
other_cube_state = self._init_cubeA_state[env_ids, :]
cube_heights = self.states["cubeA_size"]
else:
raise ValueError(f"Invalid cube specified, options are 'A' and 'B'; got: {cube}")
# Minimum cube distance for guarenteed collision-free sampling is the sum of each cube's effective radius
min_dists = (self.states["cubeA_size"] + self.states["cubeB_size"])[env_ids] * np.sqrt(2) / 2.0
# We scale the min dist by 2 so that the cubes aren't too close together
min_dists = min_dists * 2.0
# Sampling is "centered" around middle of table
centered_cube_xy_state = torch.tensor(self._table_surface_pos[:2], device=self.device, dtype=torch.float32)
# Set z value, which is fixed height
sampled_cube_state[:, 2] = self._table_surface_pos[2] + cube_heights.squeeze(-1)[env_ids] / 2
# Initialize rotation, which is no rotation (quat w = 1)
sampled_cube_state[:, 6] = 1.0
# If we're verifying valid sampling, we need to check and re-sample if any are not collision-free
# We use a simple heuristic of checking based on cubes' radius to determine if a collision would occur
if check_valid:
success = False
# Indexes corresponding to envs we're still actively sampling for
active_idx = torch.arange(num_resets, device=self.device)
num_active_idx = len(active_idx)
for i in range(100):
# Sample x y values
sampled_cube_state[active_idx, :2] = centered_cube_xy_state + \
2.0 * self.start_position_noise * (
torch.rand_like(sampled_cube_state[active_idx, :2]) - 0.5)
# Check if sampled values are valid
cube_dist = torch.linalg.norm(sampled_cube_state[:, :2] - other_cube_state[:, :2], dim=-1)
active_idx = torch.nonzero(cube_dist < min_dists, as_tuple=True)[0]
num_active_idx = len(active_idx)
# If active idx is empty, then all sampling is valid :D
if num_active_idx == 0:
success = True
break
# Make sure we succeeded at sampling
assert success, "Sampling cube locations was unsuccessful! ):"
else:
# We just directly sample
sampled_cube_state[:, :2] = centered_cube_xy_state.unsqueeze(0) + \
2.0 * self.start_position_noise * (
torch.rand(num_resets, 2, device=self.device) - 0.5)
# Sample rotation value
if self.start_rotation_noise > 0:
aa_rot = torch.zeros(num_resets, 3, device=self.device)
aa_rot[:, 2] = 2.0 * self.start_rotation_noise * (torch.rand(num_resets, device=self.device) - 0.5)
sampled_cube_state[:, 3:7] = quat_mul(axisangle2quat(aa_rot), sampled_cube_state[:, 3:7])
# Lastly, set these sampled values as the new init state
this_cube_state_all[env_ids, :] = sampled_cube_state
def _compute_osc_torques(self, dpose):
# Solve for Operational Space Control # Paper: khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf
# Helpful resource: studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/
q, qd = self._q[:, :7], self._qd[:, :7]
mm_inv = torch.inverse(self._mm)
m_eef_inv = self._j_eef @ mm_inv @ torch.transpose(self._j_eef, 1, 2)
m_eef = torch.inverse(m_eef_inv)
# Transform our cartesian action `dpose` into joint torques `u`
u = torch.transpose(self._j_eef, 1, 2) @ m_eef @ (
self.kp * dpose - self.kd * self.states["eef_vel"]).unsqueeze(-1)
# Nullspace control torques `u_null` prevents large changes in joint configuration
# They are added into the nullspace of OSC so that the end effector orientation remains constant
# roboticsproceedings.org/rss07/p31.pdf
j_eef_inv = m_eef @ self._j_eef @ mm_inv
u_null = self.kd_null * -qd + self.kp_null * (
(self.franka_default_dof_pos[:7] - q + np.pi) % (2 * np.pi) - np.pi)
u_null[:, 7:] *= 0
u_null = self._mm @ u_null.unsqueeze(-1)
u += (torch.eye(7, device=self.device).unsqueeze(0) - torch.transpose(self._j_eef, 1, 2) @ j_eef_inv) @ u_null
# Clip the values to be within valid effort range
u = tensor_clamp(u.squeeze(-1),
-self._franka_effort_limits[:7].unsqueeze(0), self._franka_effort_limits[:7].unsqueeze(0))
return u
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
# Split arm and gripper command
u_arm, u_gripper = self.actions[:, :-1], self.actions[:, -1]
# print(u_arm, u_gripper)
# print(self.cmd_limit, self.action_scale)
# Control arm (scale value first)
u_arm = u_arm * self.cmd_limit / self.action_scale
if self.control_type == "osc":
u_arm = self._compute_osc_torques(dpose=u_arm)
self._arm_control[:, :] = u_arm
# Control gripper
u_fingers = torch.zeros_like(self._gripper_control)
u_fingers[:, 0] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-2].item(),
self.franka_dof_lower_limits[-2].item())
u_fingers[:, 1] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-1].item(),
self.franka_dof_lower_limits[-1].item())
# Write gripper command to appropriate tensor buffer
self._gripper_control[:, :] = u_fingers
# Deploy actions
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self._pos_control))
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(self._effort_control))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
# debug viz
if self.viewer and self.debug_viz:
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# Grab relevant states to visualize
eef_pos = self.states["eef_pos"]
eef_rot = self.states["eef_quat"]
cubeA_pos = self.states["cubeA_pos"]
cubeA_rot = self.states["cubeA_quat"]
cubeB_pos = self.states["cubeB_pos"]
cubeB_rot = self.states["cubeB_quat"]
# Plot visualizations
for i in range(self.num_envs):
for pos, rot in zip((eef_pos, cubeA_pos, cubeB_pos), (eef_rot, cubeA_rot, cubeB_rot)):
px = (pos[i] + quat_apply(rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
py = (pos[i] + quat_apply(rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
pz = (pos[i] + quat_apply(rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85])
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_franka_reward(
reset_buf, progress_buf, actions, states, reward_settings, max_episode_length
):
# type: (Tensor, Tensor, Tensor, Dict[str, Tensor], Dict[str, float], float) -> Tuple[Tensor, Tensor]
# Compute per-env physical parameters
target_height = states["cubeB_size"] + states["cubeA_size"] / 2.0
cubeA_size = states["cubeA_size"]
cubeB_size = states["cubeB_size"]
# distance from hand to the cubeA
d = torch.norm(states["cubeA_pos_relative"], dim=-1)
d_lf = torch.norm(states["cubeA_pos"] - states["eef_lf_pos"], dim=-1)
d_rf = torch.norm(states["cubeA_pos"] - states["eef_rf_pos"], dim=-1)
dist_reward = 1 - torch.tanh(10.0 * (d + d_lf + d_rf) / 3)
# reward for lifting cubeA
cubeA_height = states["cubeA_pos"][:, 2] - reward_settings["table_height"]
cubeA_lifted = (cubeA_height - cubeA_size) > 0.04
lift_reward = cubeA_lifted
# how closely aligned cubeA is to cubeB (only provided if cubeA is lifted)
offset = torch.zeros_like(states["cubeA_to_cubeB_pos"])
offset[:, 2] = (cubeA_size + cubeB_size) / 2
d_ab = torch.norm(states["cubeA_to_cubeB_pos"] + offset, dim=-1)
align_reward = (1 - torch.tanh(10.0 * d_ab)) * cubeA_lifted
# Dist reward is maximum of dist and align reward
dist_reward = torch.max(dist_reward, align_reward)
# final reward for stacking successfully (only if cubeA is close to target height and corresponding location, and gripper is not grasping)
cubeA_align_cubeB = (torch.norm(states["cubeA_to_cubeB_pos"][:, :2], dim=-1) < 0.02)
cubeA_on_cubeB = torch.abs(cubeA_height - target_height) < 0.02
gripper_away_from_cubeA = (d > 0.04)
stack_reward = cubeA_align_cubeB & cubeA_on_cubeB & gripper_away_from_cubeA
# Compose rewards
# We either provide the stack reward or the align + dist reward
rewards = torch.where(
stack_reward,
reward_settings["r_stack_scale"] * stack_reward,
reward_settings["r_dist_scale"] * dist_reward + reward_settings["r_lift_scale"] * lift_reward + reward_settings[
"r_align_scale"] * align_reward,
)
# Compute resets
reset_buf = torch.where((progress_buf >= max_episode_length - 1) | (stack_reward > 0), torch.ones_like(reset_buf), reset_buf)
return rewards, reset_buf
| 37,426 | Python | 49.036096 | 217 | 0.595816 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/quadcopter.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgym import gymutil, gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import *
from .base.vec_task import VecTask
class Quadcopter(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
dofs_per_env = 8
bodies_per_env = 9
# Observations:
# 0:13 - root state
# 13:29 - DOF states
num_obs = 21
# Actions:
# 0:8 - rotor DOF position targets
# 8:12 - rotor thrust magnitudes
num_acts = 12
self.cfg["env"]["numObservations"] = num_obs
self.cfg["env"]["numActions"] = num_acts
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
self.root_states = vec_root_tensor
self.root_positions = vec_root_tensor[..., 0:3]
self.root_quats = vec_root_tensor[..., 3:7]
self.root_linvels = vec_root_tensor[..., 7:10]
self.root_angvels = vec_root_tensor[..., 10:13]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_root_states = vec_root_tensor.clone()
self.initial_dof_states = vec_dof_tensor.clone()
max_thrust = 2
self.thrust_lower_limits = torch.zeros(4, device=self.device, dtype=torch.float32)
self.thrust_upper_limits = max_thrust * torch.ones(4, device=self.device, dtype=torch.float32)
# control tensors
self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False)
self.thrusts = torch.zeros((self.num_envs, 4), dtype=torch.float32, device=self.device, requires_grad=False)
self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(self.num_envs, dtype=torch.int32, device=self.device)
if self.viewer:
cam_pos = gymapi.Vec3(1.0, 1.0, 1.8)
cam_target = gymapi.Vec3(2.2, 2.0, 1.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# need rigid body states for visualizing thrusts
self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13)
self.rb_positions = self.rb_states[..., 0:3]
self.rb_quats = self.rb_states[..., 3:7]
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self.dt = self.sim_params.dt
self._create_quadcopter_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_quadcopter_asset(self):
chassis_radius = 0.1
chassis_thickness = 0.03
rotor_radius = 0.04
rotor_thickness = 0.01
rotor_arm_radius = 0.01
root = ET.Element('mujoco')
root.attrib["model"] = "Quadcopter"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
worldbody = ET.SubElement(root, "worldbody")
chassis = ET.SubElement(worldbody, "body")
chassis.attrib["name"] = "chassis"
chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0)
chassis_geom = ET.SubElement(chassis, "geom")
chassis_geom.attrib["type"] = "cylinder"
chassis_geom.attrib["size"] = "%g %g" % (chassis_radius, 0.5 * chassis_thickness)
chassis_geom.attrib["pos"] = "0 0 0"
chassis_geom.attrib["density"] = "50"
chassis_joint = ET.SubElement(chassis, "joint")
chassis_joint.attrib["name"] = "root_joint"
chassis_joint.attrib["type"] = "free"
zaxis = gymapi.Vec3(0, 0, 1)
rotor_arm_offset = gymapi.Vec3(chassis_radius + 0.25 * rotor_arm_radius, 0, 0)
pitch_joint_offset = gymapi.Vec3(0, 0, 0)
rotor_offset = gymapi.Vec3(rotor_radius + 0.25 * rotor_arm_radius, 0, 0)
rotor_angles = [0.25 * math.pi, 0.75 * math.pi, 1.25 * math.pi, 1.75 * math.pi]
for i in range(len(rotor_angles)):
angle = rotor_angles[i]
rotor_arm_quat = gymapi.Quat.from_axis_angle(zaxis, angle)
rotor_arm_pos = rotor_arm_quat.rotate(rotor_arm_offset)
pitch_joint_pos = pitch_joint_offset
rotor_pos = rotor_offset
rotor_quat = gymapi.Quat()
rotor_arm = ET.SubElement(chassis, "body")
rotor_arm.attrib["name"] = "rotor_arm" + str(i)
rotor_arm.attrib["pos"] = "%g %g %g" % (rotor_arm_pos.x, rotor_arm_pos.y, rotor_arm_pos.z)
rotor_arm.attrib["quat"] = "%g %g %g %g" % (rotor_arm_quat.w, rotor_arm_quat.x, rotor_arm_quat.y, rotor_arm_quat.z)
rotor_arm_geom = ET.SubElement(rotor_arm, "geom")
rotor_arm_geom.attrib["type"] = "sphere"
rotor_arm_geom.attrib["size"] = "%g" % rotor_arm_radius
rotor_arm_geom.attrib["density"] = "200"
pitch_joint = ET.SubElement(rotor_arm, "joint")
pitch_joint.attrib["name"] = "rotor_pitch" + str(i)
pitch_joint.attrib["type"] = "hinge"
pitch_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
pitch_joint.attrib["axis"] = "0 1 0"
pitch_joint.attrib["limited"] = "true"
pitch_joint.attrib["range"] = "-30 30"
rotor = ET.SubElement(rotor_arm, "body")
rotor.attrib["name"] = "rotor" + str(i)
rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_geom = ET.SubElement(rotor, "geom")
rotor_geom.attrib["type"] = "cylinder"
rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness)
#rotor_geom.attrib["type"] = "box"
#rotor_geom.attrib["size"] = "%g %g %g" % (rotor_radius, rotor_radius, 0.5 * rotor_thickness)
rotor_geom.attrib["density"] = "1000"
roll_joint = ET.SubElement(rotor, "joint")
roll_joint.attrib["name"] = "rotor_roll" + str(i)
roll_joint.attrib["type"] = "hinge"
roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
roll_joint.attrib["axis"] = "1 0 0"
roll_joint.attrib["limited"] = "true"
roll_joint.attrib["range"] = "-30 30"
gymutil._indent_xml(root)
ET.ElementTree(root).write("quadcopter.xml")
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "."
asset_file = "quadcopter.xml"
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = 0.0
asset_options.max_angular_velocity = 4 * math.pi
asset_options.slices_per_cylinder = 40
asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dofs = self.gym.get_asset_dof_count(asset)
dof_props = self.gym.get_asset_dof_properties(asset)
self.dof_lower_limits = []
self.dof_upper_limits = []
for i in range(self.num_dofs):
self.dof_lower_limits.append(dof_props['lower'][i])
self.dof_upper_limits.append(dof_props['upper'][i])
self.dof_lower_limits = to_torch(self.dof_lower_limits, device=self.device)
self.dof_upper_limits = to_torch(self.dof_upper_limits, device=self.device)
self.dof_ranges = self.dof_upper_limits - self.dof_lower_limits
default_pose = gymapi.Transform()
default_pose.p.z = 1.0
self.envs = []
for i in range(self.num_envs):
# create env instance
env = self.gym.create_env(self.sim, lower, upper, num_per_row)
actor_handle = self.gym.create_actor(env, asset, default_pose, "quadcopter", i, 1, 0)
dof_props = self.gym.get_actor_dof_properties(env, actor_handle)
dof_props['driveMode'].fill(gymapi.DOF_MODE_POS)
dof_props['stiffness'].fill(1000.0)
dof_props['damping'].fill(0.0)
self.gym.set_actor_dof_properties(env, actor_handle, dof_props)
# pretty colors
chassis_color = gymapi.Vec3(0.8, 0.6, 0.2)
rotor_color = gymapi.Vec3(0.1, 0.2, 0.6)
arm_color = gymapi.Vec3(0.0, 0.0, 0.0)
self.gym.set_rigid_body_color(env, actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, chassis_color)
self.gym.set_rigid_body_color(env, actor_handle, 1, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 3, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 5, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 7, gymapi.MESH_VISUAL_AND_COLLISION, arm_color)
self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color)
#self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0))
#self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 1, 0))
#self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 0, 1))
#self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0))
self.envs.append(env)
if self.debug_viz:
# need env offsets for the rotors
self.rotor_env_offsets = torch.zeros((self.num_envs, 4, 3), device=self.device)
for i in range(self.num_envs):
env_origin = self.gym.get_env_origin(self.envs[i])
self.rotor_env_offsets[i, ..., 0] = env_origin.x
self.rotor_env_offsets[i, ..., 1] = env_origin.y
self.rotor_env_offsets[i, ..., 2] = env_origin.z
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_states[env_ids] = self.initial_dof_states[env_ids]
actor_indices = self.all_actor_indices[env_ids].flatten()
self.root_states[env_ids] = self.initial_root_states[env_ids]
self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten()
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.dof_positions[env_ids] = torch_rand_float(-0.2, 0.2, (num_resets, 8), self.device)
self.dof_velocities[env_ids] = 0.0
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, _actions):
# resets
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = _actions.to(self.device)
dof_action_speed_scale = 8 * math.pi
self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]
self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits)
thrust_action_speed_scale = 200
self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]
self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)
self.forces[:, 2, 2] = self.thrusts[:, 0]
self.forces[:, 4, 2] = self.thrusts[:, 1]
self.forces[:, 6, 2] = self.thrusts[:, 2]
self.forces[:, 8, 2] = self.thrusts[:, 3]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
self.dof_position_targets[reset_env_ids] = self.dof_positions[reset_env_ids]
# apply actions
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets))
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# debug viz
if self.viewer and self.debug_viz:
# compute start and end positions for visualizing thrust lines
self.gym.refresh_rigid_body_state_tensor(self.sim)
rotor_indices = torch.LongTensor([2, 4, 6, 8])
quats = self.rb_quats[:, rotor_indices]
dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3)
starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets
ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs
# submit debug line geometry
verts = torch.stack([starts, ends], dim=2).cpu().numpy()
colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32)
colors[..., 0] = 1.0
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors)
def compute_observations(self):
target_x = 0.0
target_y = 0.0
target_z = 1.0
self.obs_buf[..., 0] = (target_x - self.root_positions[..., 0]) / 3
self.obs_buf[..., 1] = (target_y - self.root_positions[..., 1]) / 3
self.obs_buf[..., 2] = (target_z - self.root_positions[..., 2]) / 3
self.obs_buf[..., 3:7] = self.root_quats
self.obs_buf[..., 7:10] = self.root_linvels / 2
self.obs_buf[..., 10:13] = self.root_angvels / math.pi
self.obs_buf[..., 13:21] = self.dof_positions
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_quadcopter_reward(
self.root_positions,
self.root_quats,
self.root_linvels,
self.root_angvels,
self.reset_buf, self.progress_buf, self.max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_quadcopter_reward(root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# distance to target
target_dist = torch.sqrt(root_positions[..., 0] * root_positions[..., 0] +
root_positions[..., 1] * root_positions[..., 1] +
(1 - root_positions[..., 2]) * (1 - root_positions[..., 2]))
pos_reward = 1.0 / (1.0 + target_dist * target_dist)
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + spinnage * spinnage)
# combined reward
# uprigness and spinning only matter when close to the target
reward = pos_reward + pos_reward * (up_reward + spinnage_reward)
# resets due to misbehavior
ones = torch.ones_like(reset_buf)
die = torch.zeros_like(reset_buf)
die = torch.where(target_dist > 3.0, ones, die)
die = torch.where(root_positions[..., 2] < 0.3, ones, die)
# resets due to episode length
reset = torch.where(progress_buf >= max_episode_length - 1, ones, die)
return reward, reset
| 19,725 | Python | 46.078759 | 217 | 0.61308 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ingenuity.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import os
import torch
import xml.etree.ElementTree as ET
from isaacgymenvs.utils.torch_jit_utils import *
from .base.vec_task import VecTask
from isaacgym import gymutil, gymtorch, gymapi
class Ingenuity(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
# Observations:
# 0:13 - root state
self.cfg["env"]["numObservations"] = 13
# Actions:
# 0:3 - xyz force vector for lower rotor
# 4:6 - xyz force vector for upper rotor
self.cfg["env"]["numActions"] = 6
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
dofs_per_env = 4
bodies_per_env = 6
self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 2, 13)
vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2)
self.root_states = vec_root_tensor[:, 0, :]
self.root_positions = self.root_states[:, 0:3]
self.target_root_positions = torch.zeros((self.num_envs, 3), device=self.device, dtype=torch.float32)
self.target_root_positions[:, 2] = 1
self.root_quats = self.root_states[:, 3:7]
self.root_linvels = self.root_states[:, 7:10]
self.root_angvels = self.root_states[:, 10:13]
self.marker_states = vec_root_tensor[:, 1, :]
self.marker_positions = self.marker_states[:, 0:3]
self.dof_states = vec_dof_tensor
self.dof_positions = vec_dof_tensor[..., 0]
self.dof_velocities = vec_dof_tensor[..., 1]
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.initial_root_states = self.root_states.clone()
self.initial_dof_states = self.dof_states.clone()
self.thrust_lower_limit = 0
self.thrust_upper_limit = 2000
self.thrust_lateral_component = 0.2
# control tensors
self.thrusts = torch.zeros((self.num_envs, 2, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False)
self.all_actor_indices = torch.arange(self.num_envs * 2, dtype=torch.int32, device=self.device).reshape((self.num_envs, 2))
if self.viewer:
cam_pos = gymapi.Vec3(2.25, 2.25, 3.0)
cam_target = gymapi.Vec3(3.5, 4.0, 1.9)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# need rigid body states for visualizing thrusts
self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13)
self.rb_positions = self.rb_states[..., 0:3]
self.rb_quats = self.rb_states[..., 3:7]
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
# Mars gravity
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -3.721
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self.dt = self.sim_params.dt
self._create_ingenuity_asset()
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ingenuity_asset(self):
chassis_size = 0.06
rotor_axis_length = 0.2
rotor_radius = 0.15
rotor_thickness = 0.01
rotor_arm_radius = 0.01
root = ET.Element('mujoco')
root.attrib["model"] = "Ingenuity"
compiler = ET.SubElement(root, "compiler")
compiler.attrib["angle"] = "degree"
compiler.attrib["coordinate"] = "local"
compiler.attrib["inertiafromgeom"] = "true"
mesh_asset = ET.SubElement(root, "asset")
model_path = "../assets/glb/ingenuity/"
mesh = ET.SubElement(mesh_asset, "mesh")
mesh.attrib["file"] = model_path + "chassis.glb"
mesh.attrib["name"] = "ingenuity_mesh"
lower_prop_mesh = ET.SubElement(mesh_asset, "mesh")
lower_prop_mesh.attrib["file"] = model_path + "lower_prop.glb"
lower_prop_mesh.attrib["name"] = "lower_prop_mesh"
upper_prop_mesh = ET.SubElement(mesh_asset, "mesh")
upper_prop_mesh.attrib["file"] = model_path + "upper_prop.glb"
upper_prop_mesh.attrib["name"] = "upper_prop_mesh"
worldbody = ET.SubElement(root, "worldbody")
chassis = ET.SubElement(worldbody, "body")
chassis.attrib["name"] = "chassis"
chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0)
chassis_geom = ET.SubElement(chassis, "geom")
chassis_geom.attrib["type"] = "box"
chassis_geom.attrib["size"] = "%g %g %g" % (chassis_size, chassis_size, chassis_size)
chassis_geom.attrib["pos"] = "0 0 0"
chassis_geom.attrib["density"] = "50"
mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0)
mesh_geom = ET.SubElement(chassis, "geom")
mesh_geom.attrib["type"] = "mesh"
mesh_geom.attrib["quat"] = "%g %g %g %g" % (mesh_quat.w, mesh_quat.x, mesh_quat.y, mesh_quat.z)
mesh_geom.attrib["mesh"] = "ingenuity_mesh"
mesh_geom.attrib["pos"] = "%g %g %g" % (0, 0, 0)
mesh_geom.attrib["contype"] = "0"
mesh_geom.attrib["conaffinity"] = "0"
chassis_joint = ET.SubElement(chassis, "joint")
chassis_joint.attrib["name"] = "root_joint"
chassis_joint.attrib["type"] = "hinge"
chassis_joint.attrib["limited"] = "true"
chassis_joint.attrib["range"] = "0 0"
zaxis = gymapi.Vec3(0, 0, 1)
low_rotor_pos = gymapi.Vec3(0, 0, 0)
rotor_separation = gymapi.Vec3(0, 0, 0.025)
for i, mesh_name in enumerate(["lower_prop_mesh", "upper_prop_mesh"]):
angle = 0
rotor_quat = gymapi.Quat.from_axis_angle(zaxis, angle)
rotor_pos = low_rotor_pos + (rotor_separation * i)
rotor = ET.SubElement(chassis, "body")
rotor.attrib["name"] = "rotor_physics_" + str(i)
rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_geom = ET.SubElement(rotor, "geom")
rotor_geom.attrib["type"] = "cylinder"
rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness)
rotor_geom.attrib["density"] = "1000"
roll_joint = ET.SubElement(rotor, "joint")
roll_joint.attrib["name"] = "rotor_roll" + str(i)
roll_joint.attrib["type"] = "hinge"
roll_joint.attrib["limited"] = "true"
roll_joint.attrib["range"] = "0 0"
roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
rotor_dummy = ET.SubElement(chassis, "body")
rotor_dummy.attrib["name"] = "rotor_visual_" + str(i)
rotor_dummy.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z)
rotor_dummy.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z)
rotor_mesh_geom = ET.SubElement(rotor_dummy, "geom")
rotor_mesh_geom.attrib["type"] = "mesh"
rotor_mesh_geom.attrib["mesh"] = mesh_name
rotor_mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0)
rotor_mesh_geom.attrib["quat"] = "%g %g %g %g" % (rotor_mesh_quat.w, rotor_mesh_quat.x, rotor_mesh_quat.y, rotor_mesh_quat.z)
rotor_mesh_geom.attrib["contype"] = "0"
rotor_mesh_geom.attrib["conaffinity"] = "0"
dummy_roll_joint = ET.SubElement(rotor_dummy, "joint")
dummy_roll_joint.attrib["name"] = "rotor_roll" + str(i)
dummy_roll_joint.attrib["type"] = "hinge"
dummy_roll_joint.attrib["axis"] = "0 0 1"
dummy_roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0)
gymutil._indent_xml(root)
ET.ElementTree(root).write("ingenuity.xml")
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = "./"
asset_file = "ingenuity.xml"
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = 0.0
asset_options.max_angular_velocity = 4 * math.pi
asset_options.slices_per_cylinder = 40
asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
asset_options.fix_base_link = True
marker_asset = self.gym.create_sphere(self.sim, 0.1, asset_options)
default_pose = gymapi.Transform()
default_pose.p.z = 1.0
self.envs = []
self.actor_handles = []
for i in range(self.num_envs):
# create env instance
env = self.gym.create_env(self.sim, lower, upper, num_per_row)
actor_handle = self.gym.create_actor(env, asset, default_pose, "ingenuity", i, 1, 1)
dof_props = self.gym.get_actor_dof_properties(env, actor_handle)
dof_props['stiffness'].fill(0)
dof_props['damping'].fill(0)
self.gym.set_actor_dof_properties(env, actor_handle, dof_props)
marker_handle = self.gym.create_actor(env, marker_asset, default_pose, "marker", i, 1, 1)
self.gym.set_rigid_body_color(env, marker_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0))
self.actor_handles.append(actor_handle)
self.envs.append(env)
if self.debug_viz:
# need env offsets for the rotors
self.rotor_env_offsets = torch.zeros((self.num_envs, 2, 3), device=self.device)
for i in range(self.num_envs):
env_origin = self.gym.get_env_origin(self.envs[i])
self.rotor_env_offsets[i, ..., 0] = env_origin.x
self.rotor_env_offsets[i, ..., 1] = env_origin.y
self.rotor_env_offsets[i, ..., 2] = env_origin.z
def set_targets(self, env_ids):
num_sets = len(env_ids)
# set target position randomly with x, y in (-5, 5) and z in (1, 2)
self.target_root_positions[env_ids, 0:2] = (torch.rand(num_sets, 2, device=self.device) * 10) - 5
self.target_root_positions[env_ids, 2] = torch.rand(num_sets, device=self.device) + 1
self.marker_positions[env_ids] = self.target_root_positions[env_ids]
# copter "position" is at the bottom of the legs, so shift the target up so it visually aligns better
self.marker_positions[env_ids, 2] += 0.4
actor_indices = self.all_actor_indices[env_ids, 1].flatten()
return actor_indices
def reset_idx(self, env_ids):
# set rotor speeds
self.dof_velocities[:, 1] = -50
self.dof_velocities[:, 3] = 50
num_resets = len(env_ids)
target_actor_indices = self.set_targets(env_ids)
actor_indices = self.all_actor_indices[env_ids, 0].flatten()
self.root_states[env_ids] = self.initial_root_states[env_ids]
self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten()
self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten()
self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
return torch.unique(torch.cat([target_actor_indices, actor_indices]))
def pre_physics_step(self, _actions):
# resets
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
target_actor_indices = torch.tensor([], device=self.device, dtype=torch.int32)
if len(set_target_ids) > 0:
target_actor_indices = self.set_targets(set_target_ids)
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
actor_indices = torch.tensor([], device=self.device, dtype=torch.int32)
if len(reset_env_ids) > 0:
actor_indices = self.reset_idx(reset_env_ids)
reset_indices = torch.unique(torch.cat([target_actor_indices, actor_indices]))
if len(reset_indices) > 0:
self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(reset_indices), len(reset_indices))
actions = _actions.to(self.device)
thrust_action_speed_scale = 2000
vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit)
vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit)
lateral_fraction_prop_0 = torch.clamp(actions[:, 0:2], -self.thrust_lateral_component, self.thrust_lateral_component)
lateral_fraction_prop_1 = torch.clamp(actions[:, 3:5], -self.thrust_lateral_component, self.thrust_lateral_component)
self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0
self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0
self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1
self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1
self.forces[:, 1] = self.thrusts[:, 0]
self.forces[:, 3] = self.thrusts[:, 1]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
# apply actions
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.compute_observations()
self.compute_reward()
# debug viz
if self.viewer and self.debug_viz:
# compute start and end positions for visualizing thrust lines
self.gym.refresh_rigid_body_state_tensor(self.sim)
rotor_indices = torch.LongTensor([2, 4, 6, 8])
quats = self.rb_quats[:, rotor_indices]
dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3)
starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets
ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs
# submit debug line geometry
verts = torch.stack([starts, ends], dim=2).cpu().numpy()
colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32)
colors[..., 0] = 1.0
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors)
def compute_observations(self):
self.obs_buf[..., 0:3] = (self.target_root_positions - self.root_positions) / 3
self.obs_buf[..., 3:7] = self.root_quats
self.obs_buf[..., 7:10] = self.root_linvels / 2
self.obs_buf[..., 10:13] = self.root_angvels / math.pi
return self.obs_buf
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:] = compute_ingenuity_reward(
self.root_positions,
self.target_root_positions,
self.root_quats,
self.root_linvels,
self.root_angvels,
self.reset_buf, self.progress_buf, self.max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_ingenuity_reward(root_positions, target_root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor]
# distance to target
target_dist = torch.sqrt(torch.square(target_root_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + target_dist * target_dist)
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 5.0 / (1.0 + tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + spinnage * spinnage)
# combined reward
# uprigness and spinning only matter when close to the target
reward = pos_reward + pos_reward * (up_reward + spinnage_reward)
# resets due to misbehavior
ones = torch.ones_like(reset_buf)
die = torch.zeros_like(reset_buf)
die = torch.where(target_dist > 8.0, ones, die)
die = torch.where(root_positions[..., 2] < 0.5, ones, die)
# resets due to episode length
reset = torch.where(progress_buf >= max_episode_length - 1, ones, die)
return reward, reset
| 19,671 | Python | 43.60771 | 217 | 0.614763 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse
from isaacgymenvs.tasks.base.vec_task import VecTask
from typing import Tuple, Dict
class Anymal(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
# normalization
self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"]
self.action_scale = self.cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]
# randomization
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
# command ranges
self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# plane params
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
# base init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
self.cfg["env"]["numObservations"] = 48
self.cfg["env"]["numActions"] = 12
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# other
self.dt = self.sim_params.dt
self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.Kp = self.cfg["env"]["control"]["stiffness"]
self.Kd = self.cfg["env"]["control"]["damping"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
if self.viewer != None:
p = self.cfg["env"]["viewer"]["pos"]
lookat = self.cfg["env"]["viewer"]["lookat"]
cam_pos = gymapi.Vec3(p[0], p[1], p[2])
cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym state tensors
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
torques = self.gym.acquire_dof_force_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
# create some wrapper tensors for different slices
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis
self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof)
self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False)
self.commands_y = self.commands.view(self.num_envs, 3)[..., 1]
self.commands_x = self.commands.view(self.num_envs, 3)[..., 0]
self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2]
self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.cfg["env"]["numActions"]):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
# initialize some data used later on
self.extras = {}
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')
asset_file = "urdf/anymal_c/urdf/anymal.urdf"
asset_options = gymapi.AssetOptions()
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
asset_options.collapse_fixed_joints = True
asset_options.replace_cylinder_with_capsule = True
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"]
asset_options.density = 0.001
asset_options.angular_damping = 0.0
asset_options.linear_damping = 0.0
asset_options.armature = 0.0
asset_options.thickness = 0.01
asset_options.disable_gravity = False
anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(anymal_asset)
self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset)
start_pose = gymapi.Transform()
start_pose.p = gymapi.Vec3(*self.base_init_state[:3])
body_names = self.gym.get_asset_rigid_body_names(anymal_asset)
self.dof_names = self.gym.get_asset_dof_names(anymal_asset)
extremity_name = "SHANK" if asset_options.collapse_fixed_joints else "FOOT"
feet_names = [s for s in body_names if extremity_name in s]
self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False)
knee_names = [s for s in body_names if "THIGH" in s]
self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False)
self.base_index = 0
dof_props = self.gym.get_asset_dof_properties(anymal_asset)
for i in range(self.num_dof):
dof_props['driveMode'][i] = gymapi.DOF_MODE_POS
dof_props['stiffness'][i] = self.cfg["env"]["control"]["stiffness"] #self.Kp
dof_props['damping'][i] = self.cfg["env"]["control"]["damping"] #self.Kd
env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
env_upper = gymapi.Vec3(spacing, spacing, spacing)
self.anymal_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row)
anymal_handle = self.gym.create_actor(env_ptr, anymal_asset, start_pose, "anymal", i, 1, 0)
self.gym.set_actor_dof_properties(env_ptr, anymal_handle, dof_props)
self.gym.enable_actor_dof_force_sensors(env_ptr, anymal_handle)
self.envs.append(env_ptr)
self.anymal_handles.append(anymal_handle)
for i in range(len(feet_names)):
self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i])
for i in range(len(knee_names)):
self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i])
self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base")
def pre_physics_step(self, actions):
self.actions = actions.clone().to(self.device)
targets = self.action_scale * self.actions + self.default_dof_pos
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(targets))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward(self.actions)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:] = compute_anymal_reward(
# tensors
self.root_states,
self.commands,
self.torques,
self.contact_forces,
self.knee_indices,
self.progress_buf,
# Dict
self.rew_scales,
# other
self.base_index,
self.max_episode_length,
)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim) # done in step
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.obs_buf[:] = compute_anymal_observations( # tensors
self.root_states,
self.commands,
self.dof_pos,
self.default_dof_pos,
self.dof_vel,
self.gravity_vec,
self.actions,
# scales
self.lin_vel_scale,
self.ang_vel_scale,
self.dof_pos_scale,
self.dof_vel_scale
)
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_anymal_reward(
# tensors
root_states,
commands,
torques,
contact_forces,
knee_indices,
episode_lengths,
# Dict
rew_scales,
# other
base_index,
max_episode_length
):
# (reward, reset, feet_in air, feet_air_time, episode sums)
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], int, int) -> Tuple[Tensor, Tensor]
# prepare quantities (TODO: return from obs ?)
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10])
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"]
# torque penalty
rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"]
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque
total_reward = torch.clip(total_reward, 0., None)
# reset agents
reset = torch.norm(contact_forces[:, base_index, :], dim=1) > 1.
reset = reset | torch.any(torch.norm(contact_forces[:, knee_indices, :], dim=2) > 1., dim=1)
time_out = episode_lengths >= max_episode_length - 1 # no terminal reward for time-outs
reset = reset | time_out
return total_reward.detach(), reset
@torch.jit.script
def compute_anymal_observations(root_states,
commands,
dof_pos,
default_dof_pos,
dof_vel,
gravity_vec,
actions,
lin_vel_scale,
ang_vel_scale,
dof_pos_scale,
dof_vel_scale
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> Tensor
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) * lin_vel_scale
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) * ang_vel_scale
projected_gravity = quat_rotate(base_quat, gravity_vec)
dof_pos_scaled = (dof_pos - default_dof_pos) * dof_pos_scale
commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device)
obs = torch.cat((base_lin_vel,
base_ang_vel,
projected_gravity,
commands_scaled,
dof_pos_scaled,
dof_vel*dof_vel_scale,
actions
), dim=-1)
return obs
| 18,546 | Python | 46.925064 | 217 | 0.602071 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
from typing import Tuple, List
import itertools
from itertools import permutations
from tkinter import W
from typing import Tuple, Dict, List, Set
import numpy as np
import torch
from isaacgym import gymapi
from isaacgym import gymtorch
from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \
to_torch, get_axis_params, torch_rand_float, tensor_clamp
from torch import Tensor
from isaacgymenvs.tasks.dextreme.adr_vec_task import ADRVecTask
from isaacgymenvs.utils.torch_jit_utils import quaternion_to_matrix, matrix_to_quaternion
from isaacgymenvs.utils.rna_util import RandomNetworkAdversary
class AllegroHandDextreme(ADRVecTask):
dict_obs_cls = True
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
'''
obligatory constructor to fill-in class variables and setting
up the simulation.
self._read_cfg() is about initialising class variables from a
config file.
self._init_pre_sim_buffers() initialises particular tensors
that are useful in storing various states
randomised or otherwise
self._init_post_sim_buffers() initialises the root tensors and
other auxiliary variables that can be provided
as input to the controller or the value function
'''
self.cfg = cfg
# Read the task config file and store all the relevant variables in the class
self._read_cfg()
self.fingertips = [s+"_link_3" for s in ["index", "middle", "ring", "thumb"]]
self.num_fingertips = len(self.fingertips)
num_dofs = 16
self.num_obs_dict = self.get_num_obs_dict(num_dofs)
self.cfg["env"]["obsDims"] = {}
for o in self.num_obs_dict.keys():
if o not in self.num_obs_dict:
raise Exception(f"Unknown type of observation {o}!")
self.cfg["env"]["obsDims"][o] = (self.num_obs_dict[o],)
self.up_axis = 'z'
self.use_vel_obs = False
self.fingertip_obs = True
self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"]
self.cfg["env"]["numActions"] = 16
self.sim_device = sim_device
rl_device = self.cfg.get("rl_device", "cuda:0")
self._init_pre_sim_buffers()
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, use_dict_obs=True)
self._init_post_sim_buffers()
reward_keys = ['dist_rew', 'rot_rew', 'action_penalty', 'action_delta_penalty',
'velocity_penalty', 'reach_goal_rew', 'fall_rew', 'timeout_rew']
self.rewards_episode = {key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys}
if self.use_adr:
self.apply_reset_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device)
if self.print_success_stat:
self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.last_ep_successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.successes_count = torch.zeros(self.max_consecutive_successes + 1, dtype=torch.float, device=self.device)
from tensorboardX import SummaryWriter
self.eval_summary_dir = './eval_summaries'
# remove the old directory if it exists
if os.path.exists(self.eval_summary_dir):
import shutil
shutil.rmtree(self.eval_summary_dir)
self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3)
def get_env_state(self):
env_dict=dict(act_moving_average=self.act_moving_average)
if self.use_adr:
env_dict = dict(**env_dict, **super().get_env_state())
return env_dict
def get_save_tensors(self):
if hasattr(self, 'actions'):
actions = self.actions
else:
actions = torch.zeros((self.num_envs, self.cfg["env"]["numActions"])).to(self.device)
# scale is [-1, 1] -> [low, upper]
# unscale is [low, upper] -> [-1, 1]
# self.actions are in [-1, 1] as they are raw
# actions returned by the policy
return {
# 'observations': self.obs_buf,
'actions': actions,
'cube_state': self.root_state_tensor[self.object_indices],
'goal_state': self.goal_states,
'joint_positions': self.dof_pos,
'joint_velocities': self.dof_vel,
'root_state': self.root_state_tensor[self.hand_indices],
}
def save_step(self):
self.capture.append_experience(self.get_save_tensors())
def get_num_obs_dict(self, num_dofs):
# This is what we use for ADR
num_obs = {
"dof_pos": num_dofs,
"dof_pos_randomized": num_dofs,
"dof_vel": num_dofs,
"dof_force": num_dofs, # generalised forces
"object_vels": 6,
"last_actions": num_dofs,
"cube_random_params": 3,
"hand_random_params": 1,
"gravity_vec": 3,
"ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip
"ft_force_torques": 6 * self.num_fingertips, # wrenches
"rb_forces": 3, # random forces being applied to the cube
"rot_dist": 2,
"stochastic_delay_params": 4, # cube obs + action delay prob, action fixed latency, pose refresh rate
"affine_params": 16*2 + 7*2 + 16*2,
"object_pose": 7,
"goal_pose": 7,
"goal_relative_rot": 4,
"object_pose_cam_randomized": 7,
"goal_relative_rot_cam_randomized": 4,
}
return num_obs
def create_sim(self):
self.dt = self.sim_params.dt
self.up_axis_idx = 2 # index of up axis: Y=1, Z=2
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets')
hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf"
if "asset" in self.cfg["env"]:
asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root)
hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", hand_asset_file)
object_asset_file = self.asset_files_dict[self.object_type]
# load allegro hand_ asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = False
asset_options.disable_gravity = False
asset_options.thickness = 0.001
asset_options.angular_damping = 0.01
if self.physics_engine == gymapi.SIM_PHYSX:
asset_options.use_physx_armature = True
# The control interface i.e. we will be sending target positions to the robot
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
hand_asset = self.gym.load_asset(self.sim, asset_root, hand_asset_file, asset_options)
self.num_hand_bodies = self.gym.get_asset_rigid_body_count(hand_asset)
self.num_hand_shapes = self.gym.get_asset_rigid_shape_count(hand_asset)
self.num_hand_dofs = self.gym.get_asset_dof_count(hand_asset)
print("Num dofs: ", self.num_hand_dofs)
self.num_hand_actuators = self.num_hand_dofs
self.actuated_dof_indices = [i for i in range(self.num_hand_dofs)]
# set allegro_hand dof properties
hand_dof_props = self.gym.get_asset_dof_properties(hand_asset)
self.hand_dof_lower_limits = []
self.hand_dof_upper_limits = []
self.hand_dof_default_pos = []
self.hand_dof_default_vel = []
self.sensors = []
sensor_pose = gymapi.Transform()
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips]
# create fingertip force sensors
sensor_pose = gymapi.Transform()
for ft_handle in self.fingertip_handles:
self.gym.create_asset_force_sensor(hand_asset, ft_handle, sensor_pose)
for i in range(self.num_hand_dofs):
self.hand_dof_lower_limits.append(hand_dof_props['lower'][i])
self.hand_dof_upper_limits.append(hand_dof_props['upper'][i])
self.hand_dof_default_pos.append(0.0)
self.hand_dof_default_vel.append(0.0)
hand_dof_props['effort'][i] = self.max_effort
hand_dof_props['stiffness'][i] = 2
hand_dof_props['damping'][i] = 0.1
hand_dof_props['friction'][i] = 0.01
hand_dof_props['armature'][i] = 0.002
self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device)
self.hand_dof_lower_limits = to_torch(self.hand_dof_lower_limits, device=self.device)
self.hand_dof_upper_limits = to_torch(self.hand_dof_upper_limits, device=self.device)
self.hand_dof_default_pos = to_torch(self.hand_dof_default_pos, device=self.device)
self.hand_dof_default_vel = to_torch(self.hand_dof_default_vel, device=self.device)
# load manipulated object and goal assets
object_asset_options = gymapi.AssetOptions()
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
hand_start_pose = gymapi.Transform()
hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx))
hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * \
gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * \
gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi)
object_start_pose = gymapi.Transform()
object_start_pose.p = gymapi.Vec3()
object_start_pose.p.x = hand_start_pose.p.x
pose_dy, pose_dz = self.start_object_pose_dy, self.start_object_pose_dz
object_start_pose.p.y = hand_start_pose.p.y + pose_dy
object_start_pose.p.z = hand_start_pose.p.z + pose_dz
self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12)
self.goal_displacement_tensor = to_torch(
[self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)
goal_start_pose = gymapi.Transform()
goal_start_pose.p = object_start_pose.p + self.goal_displacement
goal_start_pose.p.y -= 0.02
goal_start_pose.p.z -= 0.04
# compute aggregate size
max_agg_bodies = self.num_hand_bodies + 2
max_agg_shapes = self.num_hand_shapes + 2
self.allegro_hands = []
self.object_handles = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips]
hand_rb_count = self.gym.get_asset_rigid_body_count(hand_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
self.object_rb_handles = list(range(hand_rb_count, hand_rb_count + object_rb_count))
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
# add hand - collision filter = -1 to use asset collision filters set in mjcf loader
hand_actor = self.gym.create_actor(env_ptr, hand_asset, hand_start_pose, "hand", i, -1, 0)
self.hand_start_states.append([hand_start_pose.p.x, hand_start_pose.p.y, hand_start_pose.p.z,
hand_start_pose.r.x, hand_start_pose.r.y, hand_start_pose.r.z, hand_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, hand_actor, hand_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, hand_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
self.gym.enable_actor_dof_force_sensors(env_ptr, hand_actor)
# add object
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
# add goal object
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.allegro_hands.append(hand_actor)
self.object_handles.append(object_handle)
self.palm_link_handle = self.gym.find_actor_rigid_body_handle(env_ptr, hand_actor, "palm_link"),
object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)
self.object_rb_masses = [prop.mass for prop in object_rb_props]
self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13)
self.goal_states = self.object_init_state.clone()
self.goal_states[:, self.up_axis_idx] -= 0.04
self.goal_init_state = self.goal_states.clone()
self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)
self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device)
self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)
self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)
self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)
# Random Network Adversary
# As mentioned in OpenAI et al. 2019 (Appendix B.3) https://arxiv.org/abs/1910.07113
# and DeXtreme, 2022 (Section 2.6.2) https://arxiv.org/abs/2210.13702
if self.enable_rna:
softmax_bins = 32
num_dofs = len(self.hand_dof_lower_limits)
self.discretised_dofs = torch.zeros((num_dofs, softmax_bins)).to(self.device)
# Discretising the joing angles into 32 bins
for i in range(0, len(self.hand_dof_lower_limits)):
self.discretised_dofs[i] = torch.linspace(self.hand_dof_lower_limits[i],
self.hand_dof_upper_limits[i], steps=softmax_bins).to(self.device)
# input is the joint angles and cube pose (pos: 3 + quat: 4), therefore a total of 16+7 dimensions
self.rna_network = RandomNetworkAdversary(num_envs=self.num_envs, in_dims=num_dofs+7, \
out_dims=num_dofs, softmax_bins=softmax_bins, device=self.device)
# Random cube observations. Need this tensor for Random Cube Pose Injection
self.random_cube_poses = torch.zeros(self.num_envs, 7, device=self.device)
def compute_reward(self, actions):
self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], \
self.hold_count_buf[:], self.successes[:], self.consecutive_successes[:], \
dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew = compute_hand_reward(
self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.hold_count_buf, self.cur_targets, self.prev_targets,
self.dof_vel, self.successes, self.consecutive_successes, self.max_episode_length,
self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps,
self.actions, self.action_penalty_scale, self.action_delta_penalty_scale,
self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty,
self.max_consecutive_successes, self.av_factor, self.num_success_hold_steps
)
# update best rotation distance in the current episode
self.best_rotation_dist = torch.minimum(self.best_rotation_dist, self.curr_rotation_dist)
self.extras['consecutive_successes'] = self.consecutive_successes.mean()
self.extras['true_objective'] = self.successes
episode_cumulative = dict()
episode_cumulative['dist_rew'] = dist_rew
episode_cumulative['rot_rew'] = rot_rew
episode_cumulative['action_penalty'] = action_penalty
episode_cumulative['action_delta_penalty'] = action_delta_penalty
episode_cumulative['velocity_penalty'] = velocity_penalty
episode_cumulative['reach_goal_rew'] = reach_goal_rew
episode_cumulative['fall_rew'] = fall_rew
episode_cumulative['timeout_rew'] = timeout_rew
self.extras['episode_cumulative'] = episode_cumulative
if self.print_success_stat:
is_success = self.reset_goal_buf.to(torch.bool)
frame_ = torch.empty_like(self.last_success_step).fill_(self.frame)
self.success_time = torch.where(is_success, frame_ - self.last_success_step, self.success_time)
self.last_success_step = torch.where(is_success, frame_, self.last_success_step)
mask_ = self.success_time > 0
if any(mask_):
avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item()
else:
avg_time_mean = math.nan
envs_reset = self.reset_buf
if self.use_adr:
envs_reset = self.reset_buf & ~self.apply_reset_buf
self.total_resets = self.total_resets + envs_reset.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * envs_reset).sum()
self.total_num_resets += envs_reset
self.last_ep_successes = torch.where(envs_reset > 0, self.successes, self.last_ep_successes)
reset_ids = envs_reset.nonzero().squeeze()
last_successes = self.successes[reset_ids].long()
self.successes_count[last_successes] += 1
if self.frame % 100 == 0:
# The direct average shows the overall result more quickly, but slightly undershoots long term
# policy performance.
print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs)))
if self.total_resets > 0:
print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets))
print(f"Max num successes: {self.successes.max().item()}")
print(f"Average consecutive successes: {self.consecutive_successes.mean().item():.2f}")
print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}")
print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}")
print(f"Last ep successes: {self.last_ep_successes.mean().item():.2f} {self.last_ep_successes}")
self.eval_summaries.add_scalar("consecutive_successes", self.consecutive_successes.mean().item(), self.frame)
self.eval_summaries.add_scalar("last_ep_successes", self.last_ep_successes.mean().item(), self.frame)
self.eval_summaries.add_scalar("reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, self.frame)
self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), self.frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, self.frame)
frame_time = self.control_freq_inv * self.dt
self.eval_summaries.add_scalar("policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, self.frame)
self.eval_summaries.add_scalar("policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), self.frame)
print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}")
dof_delta = self.dof_delta.abs()
print(f"Max dof deltas: {dof_delta.max(dim=0).values}, max across dofs: {self.dof_delta.abs().max().item():.2f}, mean: {self.dof_delta.abs().mean().item():.2f}")
print(f"Max dof delta radians per sec: {dof_delta.max().item() / frame_time:.2f}, mean: {dof_delta.mean().item() / frame_time:.2f}")
# create a matplotlib bar chart of the self.successes_count
import matplotlib.pyplot as plt
plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy())
plt.title("Successes histogram")
plt.xlabel("Successes")
plt.ylabel("Frequency")
plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png")
plt.clf()
def compute_poses_wrt_wrist(self, object_pose, palm_link_pose, goal_pose=None):
object_pos = object_pose[:, 0:3]
object_rot = object_pose[:, 3:7]
palm_link_pos = palm_link_pose[:, 0:3]
palm_link_quat_xyzw = palm_link_pose[:, 3:7]
palm_link_quat_wxyz = palm_link_quat_xyzw[:, [3, 0, 1, 2]]
R_W_P = quaternion_to_matrix(palm_link_quat_wxyz)
T_W_P = torch.eye(4).repeat(R_W_P.shape[0], 1, 1).to(R_W_P.device)
T_W_P[:, 0:3, 0:3] = R_W_P
T_W_P[:, 0:3, 3] = palm_link_pos
object_quat_xyzw = object_rot
object_quat_wxyz = object_quat_xyzw[:, [3, 0, 1, 2]]
R_W_O = quaternion_to_matrix(object_quat_wxyz)
T_W_O = torch.eye(4).repeat(R_W_O.shape[0], 1, 1).to(R_W_O.device)
T_W_O[:, 0:3, 0:3] = R_W_O
T_W_O[:, 0:3, 3] = object_pos
relative_pose = torch.matmul(torch.inverse(T_W_P), T_W_O)
relative_translation = relative_pose[:, 0:3, 3]
relative_quat_wxyz = matrix_to_quaternion(relative_pose[:, 0:3, 0:3])
relative_quat_xyzw = relative_quat_wxyz[:, [1, 2, 3, 0]]
object_pos_wrt_wrist = relative_translation
object_quat_wrt_wrist = relative_quat_xyzw
object_pose_wrt_wrist = torch.cat((object_pos_wrt_wrist, object_quat_wrt_wrist), axis=-1)
if goal_pose == None:
return object_pose_wrt_wrist
goal_pos = goal_pose[:, 0:3]
goal_quat_xyzw = goal_pose[:, 3:7]
goal_quat_wxyz = goal_quat_xyzw[:, [3, 0, 1, 2]]
R_W_G = quaternion_to_matrix(goal_quat_wxyz)
T_W_G = torch.eye(4).repeat(R_W_G.shape[0], 1, 1).to(R_W_G.device)
T_W_G[:, 0:3, 0:3] = R_W_G
T_W_G[:, 0:3, 3] = goal_pos
relative_goal_pose = torch.matmul(torch.inverse(T_W_P), T_W_G)
relative_goal_translation = relative_goal_pose[:, 0:3, 3]
relative_goal_quat_wxyz = matrix_to_quaternion(relative_goal_pose[:, 0:3, 0:3])
relative_goal_quat_xyzw = relative_goal_quat_wxyz[:, [1, 2, 3, 0]]
goal_pose_wrt_wrist = torch.cat((relative_goal_translation, relative_goal_quat_xyzw), axis=-1)
return object_pose_wrt_wrist, goal_pose_wrt_wrist
def convert_pos_quat_to_mat(self, obj_pose_pos_quat):
pos = obj_pose_pos_quat[:, 0:3]
quat_xyzw = obj_pose_pos_quat[:, 3:7]
quat_wxyz = quat_xyzw[:, [3, 0, 1, 2]]
R = quaternion_to_matrix(quat_wxyz)
T = torch.eye(4).repeat(R.shape[0], 1, 1).to(R.device)
T[:, 0:3, 0:3] = R
T[:, 0:3, 3] = pos
return T
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_force_sensor_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
self.goal_pose = self.goal_states[:, 0:7]
self.goal_pos = self.goal_states[:, 0:3]
self.goal_rot = self.goal_states[:, 3:7]
# Need to update the pose of the cube so that it is represented wrt wrist
self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7)
self.object_pose_wrt_wrist, self.goal_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose,
self.palm_link_pose,
self.goal_pose)
self.goal_wrt_wrist_rot = self.goal_pose_wrt_wrist[:, 3:7]
self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]
self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]
if not self.use_adr and self.randomize:
update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rates) == 0
self.obs_object_pose_freq[update_freq] = self.object_pose_wrt_wrist[update_freq]
# simulate adding delay
update_delay = torch.randn(self.num_envs, device=self.device) > self.cube_obs_delay_prob
self.obs_object_pose[update_delay] = self.obs_object_pose_freq[update_delay]
# increment the frame counter both for manual DR and ADR
self.frame += 1
cube_scale = self.cube_random_params[:, 0]
cube_scale = cube_scale.reshape(-1, 1)
# unscale is [low, upper] -> [-1, 1]
self.obs_dict["dof_pos"][:] = unscale(self.dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits)
self.obs_dict["dof_vel"][:] = self.dof_vel
self.obs_dict["dof_force"][:] = self.force_torque_obs_scale * self.dof_force_tensor
self.obs_dict["object_pose"][:] = self.object_pose_wrt_wrist
self.obs_dict["object_vels"][:, 0:3] = self.object_linvel
self.obs_dict["object_vels"][:, 3:6] = self.vel_obs_scale * self.object_angvel
self.obs_dict["goal_pose"][:] = self.goal_pose_wrt_wrist
self.obs_dict["goal_relative_rot"][:] = quat_mul(self.object_pose_wrt_wrist[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
# This is only needed for manul DR experiments
if not self.use_adr:
self.obs_dict["object_pose_cam"][:] = self.obs_object_pose
self.obs_dict["goal_relative_rot_cam"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
self.obs_dict["ft_states"][:] = self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips)
self.obs_dict["ft_force_torques"][:] = self.force_torque_obs_scale * self.vec_sensor_tensor # wrenches
self.obs_dict["rb_forces"] = self.rb_forces[:, self.object_rb_handles, :].view(-1, 3)
self.obs_dict["last_actions"][:] = self.actions
if self.randomize:
self.obs_dict["cube_random_params"][:] = self.cube_random_params
self.obs_dict["hand_random_params"][:] = self.hand_random_params
self.obs_dict["gravity_vec"][:] = self.gravity_vec
quat_diff = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.curr_rotation_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
self.best_rotation_dist = torch.where(self.best_rotation_dist < 0.0, self.curr_rotation_dist, self.best_rotation_dist)
# add rotation distances to the observations so that critic could predict the rewards better
self.obs_dict["rot_dist"][:, 0] = self.curr_rotation_dist
self.obs_dict["rot_dist"][:, 1] = self.best_rotation_dist
def get_random_quat(self, env_ids):
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
# https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261
uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device)
q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1]))
q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1]))
q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2]))
q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2]))
new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1)
return new_rot
def reset_target_pose(self, env_ids, apply_reset=False):
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
if self.apply_random_quat:
new_rot = self.get_random_quat(env_ids)
else:
new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]
self.goal_states[env_ids, 3:7] = new_rot
self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor
self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]
self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])
if apply_reset:
goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))
self.reset_goal_buf[env_ids] = 0
# change back to non-initialized state
self.best_rotation_dist[env_ids] = -1
def get_relative_rot(self, obj_rot, goal_rot):
return quat_mul(obj_rot, quat_conjugate(goal_rot))
def get_random_cube_observation(self, current_cube_pose):
'''
This function replaces cube pose in some environments
with a random cube pose to simulate noisy perception
estimates in the real world.
It is also called random cube pose injection.
'''
env_ids = np.arange(0, self.num_envs)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 5), device=self.device)
if self.apply_random_quat:
new_object_rot = self.get_random_quat(env_ids)
else:
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4],
self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.random_cube_poses[:, 0:2] = self.object_init_state[env_ids, 0:2] +\
0.5 * rand_floats[:, 0:2]
self.random_cube_poses[:, 2] = self.object_init_state[env_ids, 2] + \
0.5 * rand_floats[:, 2]
self.random_cube_poses[:, 3:7] = new_object_rot
random_cube_pose_mask = torch.rand(len(env_ids), 1, device=self.device) < self.random_cube_pose_prob
current_cube_pose = current_cube_pose * ~random_cube_pose_mask + self.random_cube_poses * random_cube_pose_mask
return current_cube_pose
def reset_idx(self, env_ids, goal_env_ids):
# generate random values
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device)
# randomize start object poses
self.reset_target_pose(env_ids)
# reset rigid body forces
self.rb_forces[env_ids, :, :] = 0.0
# reset object
self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()
self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \
self.reset_position_noise * rand_floats[:, 0:2]
self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \
self.reset_position_noise_z * rand_floats[:, self.up_axis_idx]
if self.apply_random_quat:
new_object_rot = self.get_random_quat(env_ids)
else:
new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids])
self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot
self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13])
object_indices = torch.unique(torch.cat([self.object_indices[env_ids],
self.goal_object_indices[env_ids],
self.goal_object_indices[goal_env_ids]]).to(torch.int32))
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state_tensor),
gymtorch.unwrap_tensor(object_indices), len(object_indices))
# reset random force probabilities
self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))
# reset allegro hand
delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos
delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos
rand_floats_dof_pos = (rand_floats[:, 5:5+self.num_hand_dofs] + 1) / 2
rand_delta = delta_min + (delta_max - delta_min) * rand_floats_dof_pos
pos = self.hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta
self.dof_pos[env_ids, :] = pos
self.dof_vel[env_ids, :] = self.hand_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_hand_dofs:5+self.num_hand_dofs*2]
self.prev_targets[env_ids, :self.num_hand_dofs] = pos
self.cur_targets[env_ids, :self.num_hand_dofs] = pos
self.prev_prev_targets[env_ids, :self.num_hand_dofs] = pos
hand_indices = self.hand_indices[env_ids].to(torch.int32)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.prev_targets),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(hand_indices), len(env_ids))
# Need to update the pose of the cube so that it is represented wrt wrist
self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7)
self.object_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose,
self.palm_link_pose)
# object pose is represented with respect to the wrist
self.obs_object_pose[env_ids] = self.object_pose_wrt_wrist[env_ids].clone()
self.obs_object_pose_freq[env_ids] = self.object_pose_wrt_wrist[env_ids].clone()
if self.use_adr and len(env_ids) == self.num_envs:
self.progress_buf = torch.randint(0, self.max_episode_length, size=(self.num_envs,), dtype=torch.long, device=self.device)
else:
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
if self.use_adr:
self.apply_reset_buf[env_ids] = 0
self.successes[env_ids] = 0
self.best_rotation_dist[env_ids] = -1
self.hold_count_buf[env_ids] = 0
def get_rna_alpha(self):
"""Function to get RNA alpha value."""
raise NotImplementedError
def get_random_network_adversary_action(self, canonical_action):
if self.enable_rna:
if self.last_step > 0 and self.last_step % self.random_adversary_weight_sample_freq == 0:
self.rna_network._refresh()
rand_action_softmax = self.rna_network(torch.cat([self.dof_pos, self.object_pose_wrt_wrist], axis=-1))
rand_action_inds = torch.argmax(rand_action_softmax, axis=-1)
rand_action_inds = torch.permute(rand_action_inds, (1, 0))
rand_perturbation = torch.gather(self.discretised_dofs, 1, rand_action_inds)
rand_perturbation = torch.permute(rand_perturbation, (1, 0))
# unscale it first (normalise it to [-1, 1])
rand_perturbation = unscale(rand_perturbation,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
if not self.use_adr:
action_perturb_mask = torch.rand(self.num_envs, 1, device=self.device) < self.action_perturb_prob
rand_perturbation = ~action_perturb_mask * canonical_action + action_perturb_mask * rand_perturbation
rna_alpha = self.get_rna_alpha()
rand_perturbation = rna_alpha * rand_perturbation + (1 - rna_alpha) * canonical_action
return rand_perturbation
else:
return canonical_action
def update_action_moving_average(self):
# scheduling action moving average
if self.last_step > 0 and self.last_step % self.act_moving_average_scheduled_freq == 0:
sched_scaling = 1.0 / self.act_moving_average_scheduled_steps * min(self.last_step, self.act_moving_average_scheduled_steps)
self.act_moving_average = self.act_moving_average_upper + (self.act_moving_average_lower - self.act_moving_average_upper) * \
sched_scaling
print('action moving average: {}'.format(self.act_moving_average))
print('last_step: {}'.format(self.last_step), ' scheduled steps: {}'.format(self.act_moving_average_scheduled_steps))
self.extras['annealing/action_moving_average_scalar'] = self.act_moving_average
def pre_physics_step(self, actions):
# Anneal action moving average
self.update_action_moving_average()
env_ids_reset = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
if self.randomize and not self.use_adr:
self.apply_randomizations(dr_params=self.randomization_params, randomisation_callback=self.randomisation_callback)
elif self.randomize and self.use_adr:
# NB - when we are daing ADR, we must calculate the ADR or new DR vals one step BEFORE applying randomisations
# this is because reset needs to be applied on the next step for it to take effect
env_mask_randomize = (self.reset_buf & ~self.apply_reset_buf).bool()
env_ids_reset = self.apply_reset_buf.nonzero(as_tuple=False).flatten()
if len(env_mask_randomize.nonzero(as_tuple=False).flatten()) > 0:
self.apply_randomizations(dr_params=self.randomization_params,
randomize_buf=env_mask_randomize,
adr_objective=self.successes,
randomisation_callback=self.randomisation_callback)
self.apply_reset_buf[env_mask_randomize] = 1
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids_reset) == 0:
self.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset()
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids_reset) > 0:
self.reset_idx(env_ids_reset, goal_env_ids)
self.apply_actions(actions)
self.apply_random_forces()
def apply_action_noise_latency(self):
return self.actions
def apply_actions(self, actions):
self.actions = actions.clone().to(self.device)
refreshed = self.progress_buf == 0
self.prev_actions_queue[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits,
self.hand_dof_upper_limits).view(-1, 1, self.num_actions)
# Needed for the first step and every refresh
# you don't want to mix with zeros
self.prev_actions[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits,
self.hand_dof_upper_limits).view(-1, self.num_actions)
# update the actions queue
self.prev_actions_queue[:, 1:] = self.prev_actions_queue[:, :-1].detach()
self.prev_actions_queue[:, 0, :] = self.actions
# apply action delay
actions_delayed = self.apply_action_noise_latency()
# apply random network adversary
actions_delayed = self.get_random_network_adversary_action(actions_delayed)
if self.use_relative_control:
targets = self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * actions_delayed
self.cur_targets[:, self.actuated_dof_indices] = targets
elif self.use_capped_dof_control:
# This is capping the maximum dof velocity
targets = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
delta = targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices]
max_dof_delta = self.max_dof_radians_per_second * self.dt * self.control_freq_inv
delta = torch.clamp_(delta, -max_dof_delta, max_dof_delta)
self.cur_targets[:, self.actuated_dof_indices] = self.prev_targets[:, self.actuated_dof_indices] + delta
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(actions_delayed,
self.hand_dof_lower_limits[self.actuated_dof_indices],
self.hand_dof_upper_limits[self.actuated_dof_indices])
self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,self.actuated_dof_indices] + \
(1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices],
self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices])
self.dof_delta = self.cur_targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))
self.prev_actions[:] = self.actions.clone()
def apply_random_forces(self):
"""Applies random forces to the object.
Forces are applied as in https://arxiv.org/abs/1808.00177
"""
if self.force_scale > 0.0:
self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)
# apply new forces
force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()
self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn(
self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale
self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE)
def post_physics_step(self):
self.progress_buf += 1
# This is for manual DR so ADR has to be OFF
if self.randomize and not self.use_adr:
# This buffer is needed for manual DR randomisation
self.randomize_buf += 1
self.compute_observations()
self.compute_reward(self.actions)
# update the previous targets
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
# save and viz dr params changing on the fly
self.track_dr_params()
if self.viewer and self.debug_viz:
# draw axes on target object
self.gym.clear_lines(self.viewer)
self.gym.refresh_rigid_body_state_tensor(self.sim)
for i in range(self.num_envs):
targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])
objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()
objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()
objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()
p0 = self.object_pos[i].cpu().numpy()
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])
self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])
def track_dr_params(self):
'''
Track the parameters you wish to here
'''
pass
def _read_cfg(self):
'''
reads various variables from the config file
'''
self.randomize = self.cfg["task"]["randomize"]
self.randomization_params = self.cfg["task"]["randomization_params"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"]
self.action_delta_penalty_scale = self.cfg["env"]["actionDeltaPenaltyScale"]
self.success_tolerance = self.cfg["env"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
if "max_effort" in self.cfg["env"]:
self.max_effort = self.cfg["env"]["max_effort"]
else:
self.max_effort = 0.35
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.start_object_pose_dy = self.cfg["env"]["startObjectPoseDY"]
self.start_object_pose_dz = self.cfg["env"]["startObjectPoseDZ"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
self.use_relative_control = self.cfg["env"]["useRelativeControl"]
self.use_capped_dof_control = self.cfg["env"]["use_capped_dof_control"]
self.max_dof_radians_per_second = self.cfg["env"]["max_dof_radians_per_second"]
self.num_success_hold_steps = self.cfg["env"].get("num_success_hold_steps", 1)
# Moving average related
self.act_moving_average_range = self.cfg["env"]["actionsMovingAverage"]["range"]
self.act_moving_average_scheduled_steps = self.cfg["env"]["actionsMovingAverage"]["schedule_steps"]
self.act_moving_average_scheduled_freq = self.cfg["env"]["actionsMovingAverage"]["schedule_freq"]
self.act_moving_average_lower = self.act_moving_average_range[0]
self.act_moving_average_upper = self.act_moving_average_range[1]
self.act_moving_average = self.act_moving_average_upper
# Random cube observation
has_random_cube_obs = 'random_cube_observation' in self.cfg["env"]
if has_random_cube_obs:
self.enable_random_obs = self.cfg["env"]["random_cube_observation"]["enable"]
self.random_cube_pose_prob = self.cfg["env"]["random_cube_observation"]["prob"]
else:
self.enable_random_obs = False
# We have two ways to sample quaternions where one of the samplings is biased
# If this flag is enabled, the sampling will be UNBIASED
self.apply_random_quat = self.cfg['env'].get("apply_random_quat", True)
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.eval_stats_name = self.cfg["env"].get("evalStatsName", '')
self.num_eval_frames = self.cfg["env"].get("numEvalFrames", None)
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.cube_obs_delay_prob = self.cfg["env"].get("cubeObsDelayProb", 0.0)
# Action delay
self.action_delay_prob_max = self.cfg["env"]["actionDelayProbMax"]
self.action_latency_max = self.cfg["env"]["actionLatencyMax"]
self.action_latency_scheduled_steps = self.cfg["env"]["actionLatencyScheduledSteps"]
self.frame = 0
self.max_skip_obs = self.cfg["env"].get("maxObjectSkipObs", 1)
self.object_type = self.cfg["env"]["objectType"]
assert self.object_type in ["block", "egg"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
# "block": "urdf/objects/cube_multicolor_sdf.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
}
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"])
# Random Network Adversary
self.enable_rna = "random_network_adversary" in self.cfg["env"] and self.cfg["env"]["random_network_adversary"]["enable"]
if self.enable_rna:
if "prob" in self.cfg["env"]["random_network_adversary"]:
self.action_perturb_prob = self.cfg["env"]["random_network_adversary"]["prob"]
# how often we want to resample the weights of the random neural network
self.random_adversary_weight_sample_freq = self.cfg["env"]["random_network_adversary"]["weight_sample_freq"]
def _init_pre_sim_buffers(self):
"""Initialise buffers that must be initialised before sim startup."""
# 0 - scale, 1 - mass, 2 - friction
self.cube_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device)
# 0 - scale
self.hand_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 1), dtype=torch.float, device=self.sim_device)
self.gravity_vec = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device)
def _init_post_sim_buffers(self):
"""Initialise buffers that must be initialised after sim startup."""
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(10.0, 5.0, 1.0)
cam_target = gymapi.Vec3(6.0, 5.0, 0.0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym GPU state tensors
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_hand_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# create some wrapper tensors for different slices
self.hand_default_dof_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_hand_dofs]
self.dof_pos = self.dof_state[..., 0]
self.dof_vel = self.dof_state[..., 1]
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
print("Num dofs: ", self.num_dofs)
self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.prev_prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.hold_count_buf = self.progress_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
# object apply random forces parameters
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
# object observations parameters
self.object_pose = self.root_state_tensor[self.object_indices, 0:7]
self.object_pos = self.root_state_tensor[self.object_indices, 0:3]
self.object_rot = self.root_state_tensor[self.object_indices, 3:7]
self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]
self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]
# buffer storing object poses which are only refreshed every n steps
self.obs_object_pose_freq = self.object_pose.clone()
# buffer storing object poses with added delay which are only refreshed every n steps
self.obs_object_pose = self.object_pose.clone()
self.current_object_pose = self.object_pose.clone()
self.object_pose_wrt_wrist = torch.zeros_like(self.object_pose)
self.object_pose_wrt_wrist[:, 6] = 1.0
self.prev_object_pose = self.object_pose.clone()
# inverse refresh rate for each environment
self.cube_pose_refresh_rates = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device)
self.prev_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device)
# Related to action delay
self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], \
self.action_latency_max+1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
# We have action latency MIN and MAX (declared in _read_cfg() function reading from a config file)
self.action_latency_min = 1
self.action_latency = torch.randint(0, self.action_latency_min + 1, \
size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device)
# tensors for rotation approach reward (-1 stands for not initialized)
self.curr_rotation_dist = None
self.best_rotation_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device)
self.unique_cube_rotations = torch.tensor(unique_cube_rotations_3d(), dtype=torch.float, device=self.device)
self.unique_cube_rotations = matrix_to_quaternion(self.unique_cube_rotations)
self.num_unique_cube_rotations = self.unique_cube_rotations.shape[0]
def randomisation_callback(self, param_name, param_val, env_id=None, actor=None):
if param_name == "gravity":
self.gravity_vec[:, 0] = param_val.x
self.gravity_vec[:, 1] = param_val.y
self.gravity_vec[:, 2] = param_val.z
elif param_name == "scale" and actor == "object":
self.cube_random_params[env_id, 0] = param_val.mean()
elif param_name == "mass" and actor == "object":
self.cube_random_params[env_id, 1] = np.mean(param_val)
elif param_name == "friction" and actor == "object":
self.cube_random_params[env_id, 2] = np.mean(param_val)
elif param_name == "scale" and actor == "hand":
self.hand_random_params[env_id, 0] = param_val.mean()
class AllegroHandDextremeADR(AllegroHandDextreme):
def _init_pre_sim_buffers(self):
super()._init_pre_sim_buffers()
"""Initialise buffers that must be initialised before sim startup."""
self.cube_pose_refresh_rate = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long)
# stores previous actions
self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], self.action_latency_max + 1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
# tensors to store random affine transforms
self.affine_actions_scaling = torch.ones(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
self.affine_actions_additive = torch.zeros(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device)
self.affine_cube_pose_scaling = torch.ones(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device)
self.affine_cube_pose_additive = torch.zeros(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device)
self.affine_dof_pos_scaling = torch.ones(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device)
self.affine_dof_pos_additive = torch.zeros(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device)
self.action_latency = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=self.sim_device)
def sample_discrete_adr(self, param_name, env_ids):
"""Samples a discrete value from ADR continuous distribution.
Eg, given a parameter with uniform sampling range
[0, 0.4]
Will sample 0 with 40% probability and 1 with 60% probability.
"""
adr_value = self.get_adr_tensor(param_name, env_ids=env_ids)
continuous_fuzzed = adr_value + (- (torch.rand_like(adr_value) - 0.5))
return continuous_fuzzed.round().long()
def sample_gaussian_adr(self, param_name, env_ids, trailing_dim=1):
adr_value = self.get_adr_tensor(param_name, env_ids=env_ids).view(-1, 1)
nonlinearity = torch.exp(torch.pow(adr_value, 2.)) - 1.
stdev = torch.where(adr_value > 0, nonlinearity, torch.zeros_like(adr_value))
return torch.randn(len(env_ids), trailing_dim, device=self.device, dtype=torch.float) * stdev
def get_rna_alpha(self):
return self.get_adr_tensor('rna_alpha').view(-1, 1)
def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None):
super().apply_randomizations(dr_params, randomize_buf, adr_objective, randomisation_callback=self.randomisation_callback)
randomize_env_ids = randomize_buf.nonzero(as_tuple=False).squeeze(-1)
self.action_latency[randomize_env_ids] = self.sample_discrete_adr("action_latency", randomize_env_ids)
self.cube_pose_refresh_rate[randomize_env_ids] = self.sample_discrete_adr("cube_pose_refresh_rate", randomize_env_ids)
# Nb - code is to generate uniform from 1 to max_skip_obs (inclusive), but cant use
# torch.uniform as it doesn't support a different max/min value on each
self.cube_pose_refresh_offset[randomize_buf] = \
(torch.rand(randomize_env_ids.shape, device=self.device, dtype=torch.float) \
* (self.cube_pose_refresh_rate[randomize_env_ids].view(-1).float()) - 0.5).round().long() # offset range shifted back by one
self.affine_actions_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_action_scaling", randomize_env_ids, trailing_dim=self.num_actions)
self.affine_actions_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_action_additive", randomize_env_ids, trailing_dim=self.num_actions)
self.affine_cube_pose_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_cube_pose_scaling", randomize_env_ids, trailing_dim=7)
self.affine_cube_pose_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_cube_pose_additive", randomize_env_ids, trailing_dim=7)
self.affine_dof_pos_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_dof_pos_scaling", randomize_env_ids, trailing_dim=16)
self.affine_dof_pos_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_dof_pos_additive", randomize_env_ids, trailing_dim=16)
def create_sim(self):
super().create_sim()
# If randomizing, apply once immediately on startup before the fist sim step
if self.randomize and self.use_adr:
adr_objective = torch.zeros(self.num_envs, dtype=float, device=self.device) if self.use_adr else None
apply_rand_ones = torch.ones(self.num_envs, dtype=bool, device=self.device)
self.apply_randomizations(self.randomization_params, apply_rand_ones, adr_objective=adr_objective,
randomisation_callback=self.randomisation_callback)
def apply_action_noise_latency(self):
action_delay_mask = (torch.rand(self.num_envs, device=self.device) < self.get_adr_tensor("action_delay_prob")).view(-1, 1)
actions = \
self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * ~action_delay_mask \
+ self.prev_actions * action_delay_mask
white_noise = self.sample_gaussian_adr("affine_action_white", self.all_env_ids, trailing_dim=self.num_actions)
actions = self.affine_actions_scaling * actions + self.affine_actions_additive + white_noise
return actions
def compute_observations(self):
super().compute_observations()
update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rate) == 0
# get white noise
white_noise_pose = self.sample_gaussian_adr("affine_cube_pose_white", self.all_env_ids, trailing_dim=7)
# compute noisy object pose as a stochatsic affine transform of actual
noisy_object_pose = self.get_random_cube_observation(
self.affine_cube_pose_scaling * self.object_pose_wrt_wrist + self.affine_cube_pose_additive + white_noise_pose
)
self.obs_object_pose_freq[update_freq] = noisy_object_pose[update_freq]
# simulate adding delay
cube_obs_delay_prob = self.get_adr_tensor("cube_obs_delay_prob", self.all_env_ids).view(self.num_envs,)
update_delay = torch.rand(self.num_envs, device=self.device) < cube_obs_delay_prob
# update environments that are NOT delayed
self.obs_object_pose[~update_delay] = self.obs_object_pose_freq[~update_delay]
white_noise_dof_pos = self.sample_gaussian_adr("affine_dof_pos_white", self.all_env_ids, trailing_dim=16)
self.dof_pos_randomized = self.affine_dof_pos_scaling * self.dof_pos + self.affine_dof_pos_additive + white_noise_dof_pos
cube_scale = self.cube_random_params[:, 0]
cube_scale = cube_scale.reshape(-1, 1)
self.obs_dict["dof_pos_randomized"][:] = unscale(self.dof_pos_randomized, self.hand_dof_lower_limits, self.hand_dof_upper_limits)
self.obs_dict["object_pose_cam_randomized"][:] = self.obs_object_pose
self.obs_dict["goal_relative_rot_cam_randomized"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot))
self.obs_dict["stochastic_delay_params"][:] = torch.stack([
self.get_adr_tensor("cube_obs_delay_prob"),
self.cube_pose_refresh_rate.float() / 6.0,
self.get_adr_tensor("action_delay_prob"),
self.action_latency.float() / 60.0,
], dim=1)
self.obs_dict["affine_params"][:] = torch.cat([
self.affine_actions_scaling,
self.affine_actions_additive,
self.affine_cube_pose_scaling,
self.affine_cube_pose_additive,
self.affine_dof_pos_scaling,
self.affine_dof_pos_additive
],
dim=-1)
def _read_cfg(self):
super()._read_cfg()
self.vel_obs_scale = 1.0 # scale factor of velocity based observations
self.force_torque_obs_scale = 1.0 # scale factor of velocity based observations
return
class AllegroHandDextremeManualDR(AllegroHandDextreme):
def _init_post_sim_buffers(self):
super()._init_post_sim_buffers()
# We could potentially update this regularly
self.action_delay_prob = self.action_delay_prob_max * \
torch.rand(self.cfg["env"]["numEnvs"], dtype=torch.float, device=self.device)
# inverse refresh rate for each environment
self.cube_pose_refresh_rate = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device)
# offset so not all the environments have it each time
self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device)
def get_num_obs_dict(self, num_dofs=16):
return {"dof_pos": num_dofs,
"dof_vel": num_dofs,
"dof_force": num_dofs, # generalised forces
"object_pose": 7,
"object_vels": 6,
"goal_pose": 7,
"goal_relative_rot": 4,
"object_pose_cam": 7,
"goal_relative_rot_cam": 4,
"last_actions": num_dofs,
"cube_random_params": 3,
"hand_random_params": 1,
"gravity_vec": 3,
"rot_dist": 2,
"ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip
"ft_force_torques": 6 * self.num_fingertips, # wrenches
}
def get_rna_alpha(self):
if self.randomize:
return torch.rand(self.num_envs, 1, device=self.device)
else:
return torch.zeros(self.num_envs, 1, device=self.device)
def create_sim(self):
super().create_sim()
# If randomizing, apply once immediately on startup before the fist sim step
# ADR has its own create_sim and randomisation is called there with appropriate
# inputs
if self.randomize and not self.use_adr:
self.apply_randomizations(self.randomization_params, randomisation_callback=self.randomisation_callback)
def apply_randomizations(self, dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=None):
super().apply_randomizations(dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=self.randomisation_callback)
def apply_action_noise_latency(self):
# anneal action latency
if self.randomize:
self.cur_action_latency = 1.0 / self.action_latency_scheduled_steps \
* min(self.last_step, self.action_latency_scheduled_steps)
self.cur_action_latency = min(max(int(self.cur_action_latency), self.action_latency_min), self.action_latency_max)
self.extras['annealing/cur_action_latency_max'] = self.cur_action_latency
self.action_latency = torch.randint(0, self.cur_action_latency + 1, \
size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device)
# probability of not updating the action this step (on top of the delay)
action_delay_mask = (torch.rand(self.num_envs, device=self.device) > self.action_delay_prob).view(-1, 1)
actions_delayed = \
self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * action_delay_mask \
+ self.prev_actions * ~action_delay_mask
return actions_delayed
def compute_observations(self):
super().compute_observations()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_hand_reward(
rew_buf, reset_buf, reset_goal_buf, progress_buf, hold_count_buf, cur_targets, prev_targets, hand_dof_vel, successes, consecutive_successes,
max_episode_length: float, object_pos, object_rot, target_pos, target_rot,
dist_reward_scale: float, rot_reward_scale: float, rot_eps: float,
actions, action_penalty_scale: float, action_delta_penalty_scale: float, #max_velocity: float,
success_tolerance: float, reach_goal_bonus: float, fall_dist: float,
fall_penalty: float, max_consecutive_successes: int, av_factor: float, num_success_hold_steps: int
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
# Distance from the hand to the object
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0))
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = action_penalty_scale * torch.sum(actions ** 2, dim=-1)
action_delta_penalty = action_delta_penalty_scale * torch.sum((cur_targets - prev_targets) ** 2, dim=-1)
max_velocity = 5.0 #rad/s
vel_tolerance = 1.0
velocity_penalty_coef = -0.05
# todo add actions regularization
velocity_penalty = velocity_penalty_coef * torch.sum((hand_dof_vel/(max_velocity - vel_tolerance)) ** 2, dim=-1)
# Find out which envs hit the goal and update successes count
goal_reached = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
hold_count_buf = torch.where(goal_reached, hold_count_buf + 1, torch.zeros_like(goal_reached))
goal_resets = torch.where(hold_count_buf > num_success_hold_steps, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reach_goal_rew = (goal_resets == 1) * reach_goal_bonus
# Fall penalty: distance to the goal is larger than a threashold
fall_rew = (goal_dist >= fall_dist) * fall_penalty
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
timed_out = progress_buf >= max_episode_length - 1
resets = torch.where(timed_out, torch.ones_like(resets), resets)
# Apply penalty for not reaching the goal
timeout_rew = timed_out * 0.5 * fall_penalty
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty + action_delta_penalty + velocity_penalty + reach_goal_rew + fall_rew + timeout_rew
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes)
return reward, resets, goal_resets, progress_buf, hold_count_buf, successes, cons_successes, \
dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew # return individual rewards for visualization
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor),
quat_from_angle_axis(rand1 * np.pi, y_unit_tensor))
def unique_cube_rotations_3d() -> List[np.ndarray]:
"""
Returns the list of all possible 90-degree cube rotations in 3D.
Based on https://stackoverflow.com/a/70413438/1645784
"""
all_rotations = []
for x, y, z in permutations([0, 1, 2]):
for sx, sy, sz in itertools.product([-1, 1], repeat=3):
rotation_matrix = np.zeros((3, 3))
rotation_matrix[0, x] = sx
rotation_matrix[1, y] = sy
rotation_matrix[2, z] = sz
if np.linalg.det(rotation_matrix) == 1:
all_rotations.append(rotation_matrix)
return all_rotations | 83,095 | Python | 48.198342 | 183 | 0.619592 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/adr_vec_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
from typing import Dict, Any, Tuple, List, Set
import gym
from gym import spaces
from isaacgym import gymtorch, gymapi
from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \
get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples
import torch
import numpy as np
import operator, random
from copy import deepcopy
from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr
from collections import deque
from enum import Enum
import sys
import abc
from abc import ABC
from omegaconf import ListConfig
class RolloutWorkerModes:
ADR_ROLLOUT = 0 # rollout with current ADR params
ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges
TEST_ENV = 2 # rollout wit default DR params, used to measure overall success rate. (currently unused)
from isaacgymenvs.tasks.base.vec_task import Env, VecTask
class EnvDextreme(Env):
def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool, use_dict_obs: bool):
Env.__init__(self, config, rl_device, sim_device, graphics_device_id, headless)
self.use_dict_obs = use_dict_obs
if self.use_dict_obs:
self.obs_dims = config["env"]["obsDims"]
self.obs_space = spaces.Dict(
{
k: spaces.Box(
np.ones(shape=dims) * -np.Inf, np.ones(shape=dims) * np.Inf
)
for k, dims in self.obs_dims.items()
}
)
else:
self.num_observations = config["env"]["numObservations"]
self.num_states = config["env"].get("numStates", 0)
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
class VecTaskDextreme(EnvDextreme, VecTask):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False):
"""Initialise the `VecTask`.
Args:
config: config dictionary for the environment.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
"""
EnvDextreme.__init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs)
self.sim_params = self._VecTask__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"])
if self.cfg["physics_engine"] == "physx":
self.physics_engine = gymapi.SIM_PHYSX
elif self.cfg["physics_engine"] == "flex":
self.physics_engine = gymapi.SIM_FLEX
else:
msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}"
raise ValueError(msg)
self.virtual_display = None
# optimization flags for pytorch JIT
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
self.gym = gymapi.acquire_gym()
self.first_randomization = True
self.randomize = self.cfg["task"]["randomize"]
self.randomize_obs_builtin = "observations" in self.cfg["task"].get("randomization_params", {})
self.randomize_act_builtin = "actions" in self.cfg["task"].get("randomization_params", {})
self.randomized_suffix = "randomized"
if self.use_dict_obs and self.randomize and self.randomize_obs_builtin:
self.randomisation_obs = set(self.obs_space.keys()).intersection(set(self.randomization_params['observations'].keys()))
for obs_name in self.randomisation_obs:
self.obs_space[f"{obs_name}_{self.randomized_suffix}"] = self.obs_space[obs_name]
self.obs_dims[f"{obs_name}_{self.randomized_suffix}"] = self.obs_dims[obs_name]
self.obs_randomizations = {}
elif self.randomize_obs_builtin:
self.obs_randomizations = None
self.action_randomizations = None
self.original_props = {}
self.actor_params_generator = None
self.extern_actor_params = {}
self.last_step = -1
self.last_rand_step = -1
for env_id in range(self.num_envs):
self.extern_actor_params[env_id] = None
# create envs, sim and viewer
self.sim_initialized = False
self.create_sim()
self.gym.prepare_sim(self.sim)
self.sim_initialized = True
self.set_viewer()
self.allocate_buffers()
def allocate_buffers(self):
"""Allocate the observation, states, etc. buffers.
These are what is used to set observations and states in the environment classes which
inherit from this one, and are read in `step` and other related functions.
"""
# allocate buffers
if self.use_dict_obs:
self.obs_dict = {
k: torch.zeros(
(self.num_envs, *dims), device=self.device, dtype=torch.float
)
for k, dims in self.obs_dims.items()
}
print("Obs dictinary: ")
print(self.obs_dims)
# print(self.obs_dict)
for k, dims in self.obs_dims.items():
print("1")
print(dims)
self.obs_dict_repeat = {
k: torch.zeros(
(self.num_envs, *dims), device=self.device, dtype=torch.float
)
for k, dims in self.obs_dims.items()
}
else:
self.obs_dict = {}
self.obs_buf = torch.zeros(
(self.num_envs, self.num_obs), device=self.device, dtype=torch.float)
self.states_buf = torch.zeros(
(self.num_envs, self.num_states), device=self.device, dtype=torch.float)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long)
self.timeout_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.randomize_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.extras = {}
def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):
"""Create an Isaac Gym sim object.
Args:
compute_device: ID of compute device to use.
graphics_device: ID of graphics device to use.
physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)
sim_params: sim params to use.
Returns:
the Isaac Gym sim object.
"""
sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
return sim
def get_state(self):
"""Returns the state buffer of the environment (the priviledged observations for asymmetric training)."""
if self.use_dict_obs:
raise NotImplementedError("No states in vec task when `use_dict_obs=True`")
return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
@abc.abstractmethod
def pre_physics_step(self, actions: torch.Tensor):
"""Apply the actions to the environment (eg by setting torques, position targets).
Args:
actions: the actions to apply
"""
@abc.abstractmethod
def post_physics_step(self):
"""Compute reward and observations, reset any environments that require it."""
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
# randomize actions
if self.action_randomizations is not None and self.randomize_act_builtin:
actions = self.action_randomizations['noise_lambda'](actions)
action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
# apply actions
self.pre_physics_step(action_tensor)
# step physics and render each frame
for i in range(self.control_freq_inv):
self.render()
self.gym.simulate(self.sim)
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
# compute observations, rewards, resets, ...
self.post_physics_step()
# fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1.
self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0)
# randomize observations
# cannot randomise in the env because of missing suffix in the observation dict
if self.randomize and self.randomize_obs_builtin and self.use_dict_obs and len(self.obs_randomizations) > 0:
for obs_name, v in self.obs_randomizations.items():
self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] = v['noise_lambda'](self.obs_dict[obs_name])
# Random cube pose
if hasattr(self, 'enable_random_obs') and self.enable_random_obs and obs_name == 'object_pose_cam':
self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] \
= self.get_random_cube_observation(self.obs_dict[f"{obs_name}_{self.randomized_suffix}"])
if hasattr(self, 'enable_random_obs') and self.enable_random_obs:
relative_rot = self.get_relative_rot(self.obs_dict['object_pose_cam_'+ self.randomized_suffix][:, 3:7],
self.obs_dict['goal_pose'][:, 3:7])
v = self.obs_randomizations['goal_relative_rot_cam']
self.obs_dict["goal_relative_rot_cam_" + self.randomized_suffix] = v['noise_lambda'](relative_rot)
elif self.randomize and self.randomize_obs_builtin and not self.use_dict_obs and self.obs_randomizations is not None:
self.obs_buf = self.obs_randomizations['noise_lambda'](self.obs_buf)
self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)
if self.use_dict_obs:
obs_dict_ret = {
k: torch.clone(torch.clamp(t, -self.clip_obs, self.clip_obs)).to(
self.rl_device
)
for k, t in self.obs_dict.items()
}
return obs_dict_ret, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
else:
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
def reset(self) -> torch.Tensor:
"""Reset the environment.
Returns:
Observation dictionary
"""
zero_actions = self.zero_actions()
# step the simulator
self.step(zero_actions)
if self.use_dict_obs:
obs_dict_ret = {
k: torch.clone(
torch.clamp(t, -self.clip_obs, self.clip_obs).to(self.rl_device)
)
for k, t in self.obs_dict.items()
}
return obs_dict_ret
else:
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict
"""
Domain Randomization methods
"""
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
if self.use_adr:
return dict(adr_params=self.adr_params)
else:
return {}
def set_env_state(self, env_state):
if env_state is None:
return
for key in self.get_env_state().keys():
if key == "adr_params" and self.use_adr and not self.adr_load_from_checkpoint:
print("Skipping loading ADR params from checkpoint...")
continue
value = env_state.get(key, None)
if value is None:
continue
self.__dict__[key] = value
print(f'Loaded env state value {key}:{value}')
if self.use_adr:
print(f'ADR Params after loading from checkpoint: {self.adr_params}')
def get_randomization_dict(self, dr_params, obs_shape):
dist = dr_params["distribution"]
op_type = dr_params["operation"]
sched_type = dr_params["schedule"] if "schedule" in dr_params else None
sched_step = dr_params["schedule_steps"] if "schedule" in dr_params else None
op = operator.add if op_type == 'additive' else operator.mul
if not self.use_adr:
apply_white_noise_prob = dr_params.get("apply_white_noise", 0.5)
if sched_type == 'linear':
sched_scaling = 1.0 / sched_step * \
min(self.last_step, sched_step)
elif sched_type == 'constant':
sched_scaling = 0 if self.last_step < sched_step else 1
else:
sched_scaling = 1
if dist == 'gaussian':
mu, var = dr_params["range"]
mu_corr, var_corr = dr_params.get("range_correlated", [0., 0.])
if op_type == 'additive':
mu *= sched_scaling
var *= sched_scaling
mu_corr *= sched_scaling
var_corr *= sched_scaling
elif op_type == 'scaling':
var = var * sched_scaling # scale up var over time
mu = mu * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
var_corr = var_corr * sched_scaling # scale up var over time
mu_corr = mu_corr * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
local_params = {
'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr,
'corr': torch.randn(self.num_envs, *obs_shape, device=self.device)
}
if not self.use_adr:
local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float()
def noise_lambda(tensor, params=local_params):
corr = local_params['corr']
corr = corr * params['var_corr'] + params['mu_corr']
if self.use_adr:
return op(
tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])
else:
return op(
tensor, corr + torch.randn_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * params['var'] + params['mu'])
elif dist == 'uniform':
lo, hi = dr_params["range"]
lo_corr, hi_corr = dr_params.get("range_correlated", [0., 0.])
if op_type == 'additive':
lo *= sched_scaling
hi *= sched_scaling
lo_corr *= sched_scaling
hi_corr *= sched_scaling
elif op_type == 'scaling':
lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)
lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
local_params = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr,
'corr': torch.rand(self.num_envs, *obs_shape, device=self.device)
}
if not self.use_adr:
local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float()
def noise_lambda(tensor, params=local_params):
corr = params['corr']
corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']
if self.use_adr:
return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])
else:
return op(tensor, corr + torch.rand_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * (params['hi'] - params['lo']) + params['lo'])
else:
raise NotImplementedError
# return {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda}
return {'noise_lambda': noise_lambda, 'corr_val': local_params['corr']}
class ADRVecTask(VecTaskDextreme):
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False):
self.adr_cfg = self.cfg["task"].get("adr", {})
self.use_adr = self.adr_cfg.get("use_adr", False)
self.all_env_ids = torch.tensor(list(range(self.cfg["env"]["numEnvs"])), dtype=torch.long, device=sim_device)
if self.use_adr:
self.worker_adr_boundary_fraction = self.adr_cfg["worker_adr_boundary_fraction"]
self.adr_queue_threshold_length = self.adr_cfg["adr_queue_threshold_length"]
self.adr_objective_threshold_low = self.adr_cfg["adr_objective_threshold_low"]
self.adr_objective_threshold_high = self.adr_cfg["adr_objective_threshold_high"]
self.adr_extended_boundary_sample = self.adr_cfg["adr_extended_boundary_sample"]
self.adr_rollout_perf_alpha = self.adr_cfg["adr_rollout_perf_alpha"]
self.update_adr_ranges = self.adr_cfg["update_adr_ranges"]
self.adr_clear_other_queues = self.adr_cfg["clear_other_queues"]
self.adr_rollout_perf_last = None
self.adr_load_from_checkpoint = self.adr_cfg["adr_load_from_checkpoint"]
assert self.randomize, "Worker mode currently only supported when Domain Randomization is turned on"
# 0 = rollout worker
# 1 = ADR worker (see https://arxiv.org/pdf/1910.07113.pdf Section 5)
# 2 = eval worker
# rollout type is selected when an environment gets randomized
self.worker_types = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device)
self.adr_tensor_values = {}
self.adr_params = self.adr_cfg["params"]
self.adr_params_keys = list(self.adr_params.keys())
# list of params which rely on patching the built in domain randomisation
self.adr_params_builtin_keys = []
for k in self.adr_params:
self.adr_params[k]["range"] = self.adr_params[k]["init_range"]
if "limits" not in self.adr_params[k]:
self.adr_params[k]["limits"] = [None, None]
if "delta_style" in self.adr_params[k]:
assert self.adr_params[k]["delta_style"] in ["additive", "multiplicative"]
else:
self.adr_params[k]["delta_style"] = "additive"
if "range_path" in self.adr_params[k]:
self.adr_params_builtin_keys.append(k)
else: # normal tensorised ADR param
param_type = self.adr_params[k].get("type", "uniform")
dtype = torch.long if param_type == "categorical" else torch.float
self.adr_tensor_values[k] = torch.zeros(self.cfg["env"]["numEnvs"], device=sim_device, dtype=dtype)
self.num_adr_params = len(self.adr_params)
# modes for ADR workers.
# there are 2n modes, where mode 2n is lower range and mode 2n+1 is upper range for DR parameter n
self.adr_modes = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device)
self.adr_objective_queues = [deque(maxlen=self.adr_queue_threshold_length) for _ in range(2*self.num_adr_params)]
super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs)
def get_current_adr_params(self, dr_params):
"""Splices the current ADR parameters into the requried ranges"""
current_adr_params = copy.deepcopy(dr_params)
for k in self.adr_params_builtin_keys:
nested_dict_set_attr(current_adr_params, self.adr_params[k]["range_path"], self.adr_params[k]["range"])
return current_adr_params
def get_dr_params_by_env_id(self, env_id, default_dr_params, current_adr_params):
"""Returns the (dictionary) DR params for a particular env ID.
(only applies to env randomisations, for tensor randomisations see `sample_adr_tensor`.)
Params:
env_id: which env ID to get the dict for.
default_dr_params: environment default DR params.
current_adr_params: current dictionary of DR params with current ADR ranges patched in.
Returns:
a patched dictionary with the env randomisations corresponding to the env ID.
"""
env_type = self.worker_types[env_id]
if env_type == RolloutWorkerModes.ADR_ROLLOUT: # rollout worker, uses current ADR params
return current_adr_params
elif env_type == RolloutWorkerModes.ADR_BOUNDARY: # ADR worker, substitute upper or lower bound as entire range for this env
adr_mode = int(self.adr_modes[env_id])
env_adr_params = copy.deepcopy(current_adr_params)
adr_id = adr_mode // 2 # which adr parameter
adr_bound = adr_mode % 2 # 0 = lower, 1 = upper
param_name = self.adr_params_keys[adr_id]
# this DR parameter is randomised as a tensor not through normal DR api
# if not "range_path" in self.adr_params[self.adr_params_keys[adr_id]]:
if not param_name in self.adr_params_builtin_keys:
return env_adr_params
if self.adr_extended_boundary_sample:
boundary_value = self.adr_params[param_name]["next_limits"][adr_bound]
else:
boundary_value = self.adr_params[param_name]["range"][adr_bound]
new_range = [boundary_value, boundary_value]
nested_dict_set_attr(env_adr_params, self.adr_params[param_name]["range_path"], new_range)
return env_adr_params
elif env_type == RolloutWorkerModes.TEST_ENV: # eval worker, uses default fixed params
return default_dr_params
else:
raise NotImplementedError
def modify_adr_param(self, param, direction, adr_param_dict, param_limit=None):
"""Modify an ADR param.
Args:
param: current value of the param.
direction: what direction to move the ADR parameter ('up' or 'down')
adr_param_dict: dictionary of ADR parameter, used to read delta and method of applying delta
param_limit: limit of the parameter (upper bound for 'up' and lower bound for 'down' mode)
Returns:
whether the param was updated
"""
op = adr_param_dict["delta_style"]
delta = adr_param_dict["delta"]
if direction == 'up':
if op == "additive":
new_val = param + delta
elif op == "multiplicative":
assert delta > 1.0, "Must have delta>1 for multiplicative ADR update."
new_val = param * delta
else:
raise NotImplementedError
if param_limit is not None:
new_val = min(new_val, param_limit)
changed = abs(new_val - param) > 1e-9
return new_val, changed
elif direction == 'down':
if op == "additive":
new_val = param - delta
elif op == "multiplicative":
assert delta > 1.0, "Must have delta>1 for multiplicative ADR update."
new_val = param / delta
else:
raise NotImplementedError
if param_limit is not None:
new_val = max(new_val, param_limit)
changed = abs(new_val - param) > 1e-9
return new_val, changed
else:
raise NotImplementedError
@staticmethod
def env_ids_from_mask(mask):
return torch.nonzero(mask, as_tuple=False).squeeze(-1)
def sample_adr_tensor(self, param_name, env_ids=None):
"""Samples the values for a particular ADR parameter as a tensor.
Sets the value as a side-effect in the dictionary of current adr tensors.
Args:
param_name: name of the parameter to sample
env_ids: env ids to sample
Returns:
(len(env_ids), tensor_dim) tensor of sampled parameter values,
where tensor_dim is the trailing dimension of the generated tensor as
specifide in the ADR conifg
"""
if env_ids is None:
env_ids = self.all_env_ids
sample_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
sample_mask[env_ids] = True
params = self.adr_params[param_name]
param_range = params["range"]
next_limits = params.get("next_limits", None)
param_type = params.get("type", "uniform")
n = self.adr_params_keys.index(param_name)
low_idx = 2*n
high_idx = 2*n + 1
adr_workers_low_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) & sample_mask
adr_workers_high_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) & sample_mask
rollout_workers_mask = (~adr_workers_low_mask) & (~adr_workers_high_mask) & sample_mask
rollout_workers_env_ids = self.env_ids_from_mask(rollout_workers_mask)
if param_type == "uniform":
result = torch.zeros((len(env_ids),), device=self.device, dtype=torch.float)
uniform_noise_rollout_workers = \
torch.rand((rollout_workers_env_ids.shape[0],), device=self.device, dtype=torch.float) \
* (param_range[1] - param_range[0]) + param_range[0]
result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers
if self.adr_extended_boundary_sample:
result[adr_workers_low_mask[env_ids]] = next_limits[0]
result[adr_workers_high_mask[env_ids]] = next_limits[1]
else:
result[adr_workers_low_mask[env_ids]] = param_range[0]
result[adr_workers_high_mask[env_ids]] = param_range[1]
elif param_type == "categorical":
result = torch.zeros((len(env_ids), ), device=self.device, dtype=torch.long)
uniform_noise_rollout_workers = torch.randint(int(param_range[0]), int(param_range[1])+1, size=(rollout_workers_env_ids.shape[0], ), device=self.device)
result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers
result[adr_workers_low_mask[env_ids]] = int(next_limits[0] if self.adr_extended_boundary_sample else param_range[0])
result[adr_workers_high_mask[env_ids]] = int(next_limits[1] if self.adr_extended_boundary_sample else param_range[1])
else:
raise NotImplementedError(f"Unknown distribution type {param_type}")
self.adr_tensor_values[param_name][env_ids] = result
return result
def get_adr_tensor(self, param_name, env_ids=None):
"""Returns the current value of an ADR tensor.
"""
if env_ids is None:
return self.adr_tensor_values[param_name]
else:
return self.adr_tensor_values[param_name][env_ids]
def recycle_envs(self, recycle_envs):
"""Recycle the workers that have finished their episodes or to be reassigned etc.
Args:
recycle_envs: env_ids of environments to be recycled
"""
worker_types_rand = torch.rand(len(recycle_envs), device=self.device, dtype=torch.float)
new_worker_types = torch.zeros(len(recycle_envs), device=self.device, dtype=torch.long)
# Choose new types for wokrers
new_worker_types[(worker_types_rand < self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_ROLLOUT
new_worker_types[(worker_types_rand >= self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_BOUNDARY
self.worker_types[recycle_envs] = new_worker_types
# resample the ADR modes (which boundary values to sample) for the given environments (only applies to ADR_BOUNDARY mode)
self.adr_modes[recycle_envs] = torch.randint(0, self.num_adr_params * 2, (len(recycle_envs),), dtype=torch.long, device=self.device)
def adr_update(self, rand_envs, adr_objective):
"""Performs ADR update step (implements algorithm 1 from https://arxiv.org/pdf/1910.07113.pdf).
"""
rand_env_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
rand_env_mask[rand_envs] = True
total_nats = 0.0 # measuring entropy
if self.update_adr_ranges:
adr_params_iter = list(enumerate(self.adr_params))
random.shuffle(adr_params_iter)
# only recycle once
already_recycled = False
for n, adr_param_name in adr_params_iter:
# mode index for environments evaluating lower ADR bound
low_idx = 2*n
# mode index for environments evaluating upper ADR bound
high_idx = 2*n+1
adr_workers_low = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx)
adr_workers_high = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx)
# environments which will be evaluated for ADR (finished the episode) and which are evaluating performance at the
# lower and upper boundaries
adr_done_low = rand_env_mask & adr_workers_low
adr_done_high = rand_env_mask & adr_workers_high
# objective value at environments which have been evaluating the lower bound of ADR param n
objective_low_bounds = adr_objective[adr_done_low]
# objective value at environments which have been evaluating the upper bound of ADR param n
objective_high_bounds = adr_objective[adr_done_high]
# add the success of objectives to queues
self.adr_objective_queues[low_idx].extend(objective_low_bounds.cpu().numpy().tolist())
self.adr_objective_queues[high_idx].extend(objective_high_bounds.cpu().numpy().tolist())
low_queue = self.adr_objective_queues[low_idx]
high_queue = self.adr_objective_queues[high_idx]
mean_low = np.mean(low_queue) if len(low_queue) > 0 else 0.
mean_high = np.mean(high_queue) if len(high_queue) > 0 else 0.
current_range = self.adr_params[adr_param_name]["range"]
range_lower = current_range[0]
range_upper = current_range[1]
range_limits = self.adr_params[adr_param_name]["limits"]
init_range = self.adr_params[adr_param_name]["init_range"]
# one step beyond the current ADR values
[next_limit_lower, next_limit_upper] = self.adr_params[adr_param_name].get("next_limits", [None, None])
changed_low, changed_high = False, False
if len(low_queue) >= self.adr_queue_threshold_length:
changed_low = False
if mean_low < self.adr_objective_threshold_low:
# increase lower bound
range_lower, changed_low = self.modify_adr_param(
range_lower, 'up', self.adr_params[adr_param_name], param_limit=init_range[0]
)
elif mean_low > self.adr_objective_threshold_high:
# reduce lower bound
range_lower, changed_low = self.modify_adr_param(
range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0]
)
# if the ADR boundary is changed, workers working from the old paremeters become invalid.
# Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary
if changed_low:
print(f'Changing {adr_param_name} lower bound. Queue length {len(self.adr_objective_queues[low_idx])}. Mean perf: {mean_low}. Old val: {current_range[0]}. New val: {range_lower}')
self.adr_objective_queues[low_idx].clear()
self.worker_types[adr_workers_low] = RolloutWorkerModes.ADR_ROLLOUT
if len(high_queue) >= self.adr_queue_threshold_length:
if mean_high < self.adr_objective_threshold_low:
# reduce upper bound
range_upper, changed_high = self.modify_adr_param(
range_upper, 'down', self.adr_params[adr_param_name], param_limit=init_range[1]
)
elif mean_high > self.adr_objective_threshold_high:
# increase upper bound
range_upper, changed_high = self.modify_adr_param(
range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1]
)
# if the ADR boundary is changed, workers working from the old paremeters become invalid.
# Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary
if changed_high:
print(f'Changing upper bound {adr_param_name}. Queue length {len(self.adr_objective_queues[high_idx])}. Mean perf {mean_high}. Old val: {current_range[1]}. New val: {range_upper}')
self.adr_objective_queues[high_idx].clear()
self.worker_types[adr_workers_high] = RolloutWorkerModes.ADR_ROLLOUT
if changed_low or next_limit_lower is None:
next_limit_lower, _ = self.modify_adr_param(range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0])
if changed_high or next_limit_upper is None:
next_limit_upper, _ = self.modify_adr_param(range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1])
self.adr_params[adr_param_name]["range"] = [range_lower, range_upper]
if not self.adr_params[adr_param_name]["delta"] < 1e-9: # disabled
upper_lower_delta = range_upper - range_lower
if upper_lower_delta < 1e-3:
upper_lower_delta = 1e-3
nats = np.log(upper_lower_delta)
total_nats += nats
# print(f'nats {nats} delta {upper_lower_delta} range lower {range_lower} range upper {range_upper}')
self.adr_params[adr_param_name]["next_limits"] = [next_limit_lower, next_limit_upper]
if hasattr(self, 'extras') and ((changed_high or changed_low) or self.last_step % 100 == 0): # only log so often to prevent huge log files with ADR vars
self.extras[f'adr/params/{adr_param_name}/lower'] = range_lower
self.extras[f'adr/params/{adr_param_name}/upper'] = range_upper
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/value'] = mean_low
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/queue_len'] = len(low_queue)
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/value'] = mean_high
self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/queue_len'] = len(high_queue)
if self.adr_clear_other_queues and (changed_low or changed_high):
for q in self.adr_objective_queues:
q.clear()
recycle_envs = torch.nonzero((self.worker_types == RolloutWorkerModes.ADR_BOUNDARY), as_tuple=False).squeeze(-1)
self.recycle_envs(recycle_envs)
already_recycled = True
break
if hasattr(self, 'extras') and self.last_step % 100 == 0: # only log so often to prevent huge log files with ADR vars
mean_perf = adr_objective[rand_env_mask & (self.worker_types == RolloutWorkerModes.ADR_ROLLOUT)].mean()
if self.adr_rollout_perf_last is None:
self.adr_rollout_perf_last = mean_perf
else:
self.adr_rollout_perf_last = self.adr_rollout_perf_last * self.adr_rollout_perf_alpha + mean_perf * (1-self.adr_rollout_perf_alpha)
self.extras[f'adr/objective_perf/rollouts'] = self.adr_rollout_perf_last
self.extras[f'adr/npd'] = total_nats / len(self.adr_params)
if not already_recycled:
self.recycle_envs(rand_envs)
else:
self.worker_types[rand_envs] = RolloutWorkerModes.ADR_ROLLOUT
# ensure tensors get re-sampled before new episode
for k in self.adr_tensor_values:
self.sample_adr_tensor(k, rand_envs)
def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None):
"""Apply domain randomizations to the environment.
Note that currently we can only apply randomizations only on resets, due to current PhysX limitations
Args:
dr_params: parameters for domain randomization to use.
randomize_buf: selective randomisation of environments
adr_objective: consecutive successes scalar
randomisation_callback: callbacks we may want to use from the environment class
"""
# If we don't have a randomization frequency, randomize every step
rand_freq = dr_params.get("frequency", 1)
# First, determine what to randomize:
# - non-environment parameters when > frequency steps have passed since the last non-environment
# - physical environments in the reset buffer, which have exceeded the randomization frequency threshold
# - on the first call, randomize everything
self.last_step = self.gym.get_frame_count(self.sim)
# for ADR
if self.use_adr:
if self.first_randomization:
adr_env_ids = list(range(self.num_envs))
else:
adr_env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist()
self.adr_update(adr_env_ids, adr_objective)
current_adr_params = self.get_current_adr_params(dr_params)
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist()
if do_nonenv_randomize:
self.last_rand_step = self.last_step
# For Manual DR
if not self.use_adr:
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
# randomise if the number of steps since the last randomization is greater than the randomization frequency
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))
rand_envs = torch.logical_and(rand_envs, self.reset_buf)
env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()
self.randomize_buf[rand_envs] = 0
if do_nonenv_randomize:
self.last_rand_step = self.last_step
# We don't use it for ADR(!)
if self.randomize_act_builtin:
self.action_randomizations = self.get_randomization_dict(dr_params['actions'], (self.num_actions,))
if self.use_dict_obs and self.randomize_obs_builtin:
for nonphysical_param in self.randomisation_obs:
self.obs_randomizations[nonphysical_param] = self.get_randomization_dict(dr_params['observations'][nonphysical_param],
self.obs_space[nonphysical_param].shape)
elif self.randomize_obs_builtin:
self.observation_randomizations = self.get_randomization_dict(dr_params['observations'], self.obs_space.shape)
param_setters_map = get_property_setter_map(self.gym)
param_setter_defaults_map = get_default_setter_args(self.gym)
param_getters_map = get_property_getter_map(self.gym)
# On first iteration, check the number of buckets
if self.first_randomization:
check_buckets(self.gym, self.envs, dr_params)
# Randomize non-environment parameters e.g. gravity, timestep, rest_offset etc.
if "sim_params" in dr_params and do_nonenv_randomize:
prop_attrs = dr_params["sim_params"]
prop = self.gym.get_sim_params(self.sim)
# Get the list of original paramters set in the yaml and we do add/scale
# on these values
if self.first_randomization:
self.original_props["sim_params"] = {
attr: getattr(prop, attr) for attr in dir(prop)}
# Get prop attrs randomised by add/scale of the original_props values
# attr is [gravity, reset_offset, ... ]
# attr_randomization_params can be {'range': [0, 0.5], 'operation': 'additive', 'distribution': 'gaussian'}
# therefore, prop.val = original_val <operator> random sample
# where operator is add/mul
for attr, attr_randomization_params in prop_attrs.items():
apply_random_samples(
prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step)
if attr == "gravity":
randomisation_callback('gravity', prop.gravity)
# Randomize physical environments
# if self.last_step % 10 == 0 and self.last_step > 0:
# print('random rest offset = ', prop.physx.rest_offset)
self.gym.set_sim_params(self.sim, prop)
# If self.actor_params_generator is initialized: use it to
# sample actor simulation params. This gives users the
# freedom to generate samples from arbitrary distributions,
# e.g. use full-covariance distributions instead of the DR's
# default of treating each simulation parameter independently.
extern_offsets = {}
if self.actor_params_generator is not None:
for env_id in env_ids:
self.extern_actor_params[env_id] = \
self.actor_params_generator.sample()
extern_offsets[env_id] = 0
# randomise all attributes of each actor (hand, cube etc..)
# actor_properties are (stiffness, damping etc..)
# Loop over envs, then loop over actors, then loop over their props
# and lastly loop over the ranges of the params
for i_, env_id in enumerate(env_ids):
if self.use_adr:
# need to generate a custom dictionary for ADR parameters
env_dr_params = self.get_dr_params_by_env_id(env_id, dr_params, current_adr_params)
else:
env_dr_params = dr_params
for actor, actor_properties in env_dr_params["actor_params"].items():
if self.first_randomization and i_ % 1000 == 0:
print(f'Initializing domain randomization for {actor} env={i_}')
env = self.envs[env_id]
handle = self.gym.find_actor_handle(env, actor)
extern_sample = self.extern_actor_params[env_id]
# randomise dof_props, rigid_body, rigid_shape properties
# all obtained from the YAML file
# EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties
# prop_attrs:
# {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'}
# {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'}
for prop_name, prop_attrs in actor_properties.items():
# These properties are to do with whole obj mesh related
if prop_name == 'color':
num_bodies = self.gym.get_actor_rigid_body_count(
env, handle)
for n in range(num_bodies):
self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,
gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))
continue
if prop_name == 'scale':
setup_only = prop_attrs.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
attr_randomization_params = prop_attrs
sample = generate_random_samples(attr_randomization_params, 1,
self.last_step, None)
og_scale = 1
if attr_randomization_params['operation'] == 'scaling':
new_scale = og_scale * sample
elif attr_randomization_params['operation'] == 'additive':
new_scale = og_scale + sample
self.gym.set_actor_scale(env, handle, new_scale)
if hasattr(self, 'cube_random_params') and actor == 'object':
randomisation_callback('scale', new_scale, actor=actor, env_id=env_id)
if hasattr(self, 'hand_random_params') and actor == 'object':
self.hand_random_params[env_id, 0] = new_scale.mean()
continue
# Get the properties from the sim API
# prop_names is dof_properties, rigid_body_properties, rigid_shape_properties
prop = param_getters_map[prop_name](env, handle)
set_random_properties = True
# if list it is likely to be
# - rigid_body_properties
# - rigid_shape_properties
if isinstance(prop, list):
# Read the original values; remember that
# randomised_prop_val = original_prop_val <operator> random sample
if self.first_randomization:
self.original_props[prop_name] = [
{attr: getattr(p, attr) for attr in dir(p)} for p in prop]
# # list to record value of attr for each body.
# recorded_attrs = {"mass": [], "friction": []}
# Loop over all the rigid bodies of the actor and then the corresponding
# attribute ranges
for attr, attr_randomization_params_cfg in prop_attrs.items():
# for curr_prop, og_p in zip(prop, self.original_props[prop_name]):
for body_idx, (p, og_p) in enumerate(zip(prop, self.original_props[prop_name])):
curr_prop = p
if self.use_adr and isinstance(attr_randomization_params_cfg['range'], dict):
# we have custom ranges for different bodies in this actor
# first: let's find out which group of bodies this body belongs to
body_group_name = None
for group_name, list_of_bodies in self.custom_body_handles[actor].items():
if body_idx in list_of_bodies:
body_group_name = group_name
break
if body_group_name is None:
raise ValueError(
f'Could not find body group for body {body_idx} in actor {actor}.\n'
f'Body groups: {self.custom_body_handles}',
)
# now: get the range for this body group
rand_range = attr_randomization_params_cfg['range'][body_group_name]
attr_randomization_params = copy.deepcopy(attr_randomization_params_cfg)
attr_randomization_params['range'] = rand_range
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = copy.deepcopy(dr_params['actor_params'][actor][prop_name][attr])
original_randomization_params['range'] = original_randomization_params['range'][body_group_name]
else:
attr_randomization_params = attr_randomization_params_cfg
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = dr_params['actor_params'][actor][prop_name][attr]
assert isinstance(attr_randomization_params['range'], (list, tuple, ListConfig)), \
f'range for {prop_name} must be a list or tuple, got {attr_randomization_params["range"]}'
# attrs:
# if rigid_body_properties, it is mass
# if rigid_shape_properties it is friction etc.
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], curr_prop, attr)
# generate the samples and add them to props
# e.g. curr_prop is rigid_body_properties
# attr is 'mass' (string)
# mass_val = getattr(curr_prop, 'mass')
# new_mass_val = mass_val <operator> sample
# setattr(curr_prop, 'mass', new_mass_val)
apply_random_samples(
curr_prop, og_p, attr, attr_randomization_params,
self.last_step, smpl,
bucketing_randomization_params=original_randomization_params)
# if attr in recorded_attrs:
# recorded_attrs[attr] = getattr(curr_prop, attr)
if hasattr(self, 'cube_random_params') and actor == 'object':
assert len(self.original_props[prop_name]) == 1
if attr == 'mass':
self.cube_random_params[env_id, 1] = p.mass
elif attr == 'friction':
self.cube_random_params[env_id, 2] = p.friction
else:
set_random_properties = False
# # call the callback with the list of attr values that have just been set (for each rigid body / shape in the actor)
# for attr, val_list in recorded_attrs.items():
# randomisation_callback(attr, val_list, actor=actor, env_id=env_id)
# if it is not a list, it is likely an array
# which means it is for dof_properties
else:
# prop_name is e.g. dof_properties with corresponding meta-data
if self.first_randomization:
self.original_props[prop_name] = deepcopy(prop)
# attrs is damping, stiffness etc.
# attrs_randomisation_params is range, distr, schedule
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], prop, attr)
# we need to sore original params as ADR generated samples need to be bucketed
original_randomization_params = dr_params['actor_params'][actor][prop_name][attr]
# generate random samples and add them to props
# and we set the props back in sim later on
apply_random_samples(
prop, self.original_props[prop_name], attr,
attr_randomization_params, self.last_step, smpl,
bucketing_randomization_params=original_randomization_params)
else:
set_random_properties = False
if set_random_properties:
setter = param_setters_map[prop_name]
default_args = param_setter_defaults_map[prop_name]
setter(env, handle, prop, *default_args)
if self.actor_params_generator is not None:
for env_id in env_ids: # check that we used all dims in sample
if extern_offsets[env_id] > 0:
extern_sample = self.extern_actor_params[env_id]
if extern_offsets[env_id] != extern_sample.shape[0]:
print('env_id', env_id,
'extern_offset', extern_offsets[env_id],
'vs extern_sample.shape', extern_sample.shape)
raise Exception("Invalid extern_sample size")
self.first_randomization = False
| 60,236 | Python | 47.151079 | 204 | 0.55671 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/base/vec_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import time
from datetime import datetime
from os.path import join
from typing import Dict, Any, Tuple, List, Set
import gym
from gym import spaces
from isaacgym import gymtorch, gymapi
from isaacgymenvs.utils.torch_jit_utils import to_torch
from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \
get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples
import torch
import numpy as np
import operator, random
from copy import deepcopy
from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr
from collections import deque
import sys
import abc
from abc import ABC
EXISTING_SIM = None
SCREEN_CAPTURE_RESOLUTION = (1027, 768)
def _create_sim_once(gym, *args, **kwargs):
global EXISTING_SIM
if EXISTING_SIM is not None:
return EXISTING_SIM
else:
EXISTING_SIM = gym.create_sim(*args, **kwargs)
return EXISTING_SIM
class Env(ABC):
def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool):
"""Initialise the env.
Args:
config: the configuration dictionary.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
"""
split_device = sim_device.split(":")
self.device_type = split_device[0]
self.device_id = int(split_device[1]) if len(split_device) > 1 else 0
self.device = "cpu"
if config["sim"]["use_gpu_pipeline"]:
if self.device_type.lower() == "cuda" or self.device_type.lower() == "gpu":
self.device = "cuda" + ":" + str(self.device_id)
else:
print("GPU Pipeline can only be used with GPU simulation. Forcing CPU Pipeline.")
config["sim"]["use_gpu_pipeline"] = False
self.rl_device = rl_device
# Rendering
# if training in a headless mode
self.headless = headless
enable_camera_sensors = config["env"].get("enableCameraSensors", False)
self.graphics_device_id = graphics_device_id
if enable_camera_sensors == False and self.headless == True:
self.graphics_device_id = -1
self.num_environments = config["env"]["numEnvs"]
self.num_agents = config["env"].get("numAgents", 1) # used for multi-agent environments
self.num_observations = config["env"].get("numObservations", 0)
self.num_states = config["env"].get("numStates", 0)
self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
self.num_actions = config["env"]["numActions"]
self.control_freq_inv = config["env"].get("controlFrequencyInv", 1)
self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
self.clip_obs = config["env"].get("clipObservations", np.Inf)
self.clip_actions = config["env"].get("clipActions", np.Inf)
# Total number of training frames since the beginning of the experiment.
# We get this information from the learning algorithm rather than tracking ourselves.
# The learning algorithm tracks the total number of frames since the beginning of training and accounts for
# experiments restart/resumes. This means this number can be > 0 right after initialization if we resume the
# experiment.
self.total_train_env_frames: int = 0
# number of control steps
self.control_steps: int = 0
self.render_fps: int = config["env"].get("renderFPS", -1)
self.last_frame_time: float = 0.0
self.record_frames: bool = False
self.record_frames_dir = join("recorded_frames", datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
@abc.abstractmethod
def allocate_buffers(self):
"""Create torch buffers for observations, rewards, actions dones and any additional data."""
@abc.abstractmethod
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
@abc.abstractmethod
def reset(self)-> Dict[str, torch.Tensor]:
"""Reset the environment.
Returns:
Observation dictionary
"""
@abc.abstractmethod
def reset_idx(self, env_ids: torch.Tensor):
"""Reset environments having the provided indices.
Args:
env_ids: environments to reset
"""
@property
def observation_space(self) -> gym.Space:
"""Get the environment's observation space."""
return self.obs_space
@property
def action_space(self) -> gym.Space:
"""Get the environment's action space."""
return self.act_space
@property
def num_envs(self) -> int:
"""Get the number of environments."""
return self.num_environments
@property
def num_acts(self) -> int:
"""Get the number of actions in the environment."""
return self.num_actions
@property
def num_obs(self) -> int:
"""Get the number of observations in the environment."""
return self.num_observations
def set_train_info(self, env_frames, *args, **kwargs):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
self.total_train_env_frames = env_frames
# print(f'env_frames updated to {self.total_train_env_frames}')
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
class VecTask(Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 24}
def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture: bool = False, force_render: bool = False):
"""Initialise the `VecTask`.
Args:
config: config dictionary for the environment.
sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'
graphics_device_id: the device ID to render with.
headless: Set to False to disable viewer rendering.
virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`.
force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True)
"""
# super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs)
super().__init__(config, rl_device, sim_device, graphics_device_id, headless)
self.virtual_screen_capture = virtual_screen_capture
self.virtual_display = None
if self.virtual_screen_capture:
from pyvirtualdisplay.smartdisplay import SmartDisplay
self.virtual_display = SmartDisplay(size=SCREEN_CAPTURE_RESOLUTION)
self.virtual_display.start()
self.force_render = force_render
self.sim_params = self.__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"])
if self.cfg["physics_engine"] == "physx":
self.physics_engine = gymapi.SIM_PHYSX
elif self.cfg["physics_engine"] == "flex":
self.physics_engine = gymapi.SIM_FLEX
else:
msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}"
raise ValueError(msg)
self.dt: float = self.sim_params.dt
# optimization flags for pytorch JIT
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
self.gym = gymapi.acquire_gym()
self.first_randomization = True
self.original_props = {}
self.dr_randomizations = {}
self.actor_params_generator = None
self.extern_actor_params = {}
self.last_step = -1
self.last_rand_step = -1
for env_id in range(self.num_envs):
self.extern_actor_params[env_id] = None
# create envs, sim and viewer
self.sim_initialized = False
self.create_sim()
self.gym.prepare_sim(self.sim)
self.sim_initialized = True
self.set_viewer()
self.allocate_buffers()
self.obs_dict = {}
def set_viewer(self):
"""Create the viewer."""
# todo: read from config
self.enable_viewer_sync = True
self.viewer = None
# if running with a viewer, set up keyboard shortcuts and camera
if self.headless == False:
# subscribe to keyboard shortcuts
self.viewer = self.gym.create_viewer(
self.sim, gymapi.CameraProperties())
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_ESCAPE, "QUIT")
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_V, "toggle_viewer_sync")
self.gym.subscribe_viewer_keyboard_event(
self.viewer, gymapi.KEY_R, "record_frames")
# set the camera position based on up axis
sim_params = self.gym.get_sim_params(self.sim)
if sim_params.up_axis == gymapi.UP_AXIS_Z:
cam_pos = gymapi.Vec3(20.0, 25.0, 3.0)
cam_target = gymapi.Vec3(10.0, 15.0, 0.0)
else:
cam_pos = gymapi.Vec3(20.0, 3.0, 25.0)
cam_target = gymapi.Vec3(10.0, 0.0, 15.0)
self.gym.viewer_camera_look_at(
self.viewer, None, cam_pos, cam_target)
def allocate_buffers(self):
"""Allocate the observation, states, etc. buffers.
These are what is used to set observations and states in the environment classes which
inherit from this one, and are read in `step` and other related functions.
"""
# allocate buffers
self.obs_buf = torch.zeros(
(self.num_envs, self.num_obs), device=self.device, dtype=torch.float)
self.states_buf = torch.zeros(
(self.num_envs, self.num_states), device=self.device, dtype=torch.float)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long)
self.timeout_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.randomize_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long)
self.extras = {}
def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):
"""Create an Isaac Gym sim object.
Args:
compute_device: ID of compute device to use.
graphics_device: ID of graphics device to use.
physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)
sim_params: sim params to use.
Returns:
the Isaac Gym sim object.
"""
sim = _create_sim_once(self.gym, compute_device, graphics_device, physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
return sim
def get_state(self):
"""Returns the state buffer of the environment (the privileged observations for asymmetric training)."""
return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
@abc.abstractmethod
def pre_physics_step(self, actions: torch.Tensor):
"""Apply the actions to the environment (eg by setting torques, position targets).
Args:
actions: the actions to apply
"""
@abc.abstractmethod
def post_physics_step(self):
"""Compute reward and observations, reset any environments that require it."""
def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""Step the physics of the environment.
Args:
actions: actions to apply
Returns:
Observations, rewards, resets, info
Observations are dict of observations (currently only one member called 'obs')
"""
# randomize actions
if self.dr_randomizations.get('actions', None):
actions = self.dr_randomizations['actions']['noise_lambda'](actions)
action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
# apply actions
self.pre_physics_step(action_tensor)
# step physics and render each frame
for i in range(self.control_freq_inv):
if self.force_render:
self.render()
self.gym.simulate(self.sim)
# to fix!
if self.device == 'cpu':
self.gym.fetch_results(self.sim, True)
# compute observations, rewards, resets, ...
self.post_physics_step()
self.control_steps += 1
# fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1.
self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0)
# randomize observations
if self.dr_randomizations.get('observations', None):
self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)
self.extras["time_outs"] = self.timeout_buf.to(self.rl_device)
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras
def zero_actions(self) -> torch.Tensor:
"""Returns a buffer with zero actions.
Returns:
A buffer of zero torch actions
"""
actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.rl_device)
return actions
def reset_idx(self, env_idx):
"""Reset environment with indces in env_idx.
Should be implemented in an environment class inherited from VecTask.
"""
pass
def reset(self):
"""Is called only once when environment starts to provide the first observations.
Doesn't calculate observations. Actual reset and observation calculation need to be implemented by user.
Returns:
Observation dictionary
"""
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict
def reset_done(self):
"""Reset the environment.
Returns:
Observation dictionary, indices of environments being reset
"""
done_env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(done_env_ids) > 0:
self.reset_idx(done_env_ids)
self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
# asymmetric actor-critic
if self.num_states > 0:
self.obs_dict["states"] = self.get_state()
return self.obs_dict, done_env_ids
def render(self, mode="rgb_array"):
"""Draw the frame to the viewer, and check for keyboard events."""
if self.viewer:
# check for window closed
if self.gym.query_viewer_has_closed(self.viewer):
sys.exit()
# check for keyboard events
for evt in self.gym.query_viewer_action_events(self.viewer):
if evt.action == "QUIT" and evt.value > 0:
sys.exit()
elif evt.action == "toggle_viewer_sync" and evt.value > 0:
self.enable_viewer_sync = not self.enable_viewer_sync
elif evt.action == "record_frames" and evt.value > 0:
self.record_frames = not self.record_frames
# fetch results
if self.device != 'cpu':
self.gym.fetch_results(self.sim, True)
# step graphics
if self.enable_viewer_sync:
self.gym.step_graphics(self.sim)
self.gym.draw_viewer(self.viewer, self.sim, True)
# Wait for dt to elapse in real time.
# This synchronizes the physics simulation with the rendering rate.
self.gym.sync_frame_time(self.sim)
# it seems like in some cases sync_frame_time still results in higher-than-realtime framerate
# this code will slow down the rendering to real time
now = time.time()
delta = now - self.last_frame_time
if self.render_fps < 0:
# render at control frequency
render_dt = self.dt * self.control_freq_inv # render every control step
else:
render_dt = 1.0 / self.render_fps
if delta < render_dt:
time.sleep(render_dt - delta)
self.last_frame_time = time.time()
else:
self.gym.poll_viewer_events(self.viewer)
if self.record_frames:
if not os.path.isdir(self.record_frames_dir):
os.makedirs(self.record_frames_dir, exist_ok=True)
self.gym.write_viewer_image_to_file(self.viewer, join(self.record_frames_dir, f"frame_{self.control_steps}.png"))
if self.virtual_display and mode == "rgb_array":
img = self.virtual_display.grab()
return np.array(img)
def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams:
"""Parse the config dictionary for physics stepping settings.
Args:
physics_engine: which physics engine to use. "physx" or "flex"
config_sim: dict of sim configuration parameters
Returns
IsaacGym SimParams object with updated settings.
"""
sim_params = gymapi.SimParams()
# check correct up-axis
if config_sim["up_axis"] not in ["z", "y"]:
msg = f"Invalid physics up-axis: {config_sim['up_axis']}"
print(msg)
raise ValueError(msg)
# assign general sim parameters
sim_params.dt = config_sim["dt"]
sim_params.num_client_threads = config_sim.get("num_client_threads", 0)
sim_params.use_gpu_pipeline = config_sim["use_gpu_pipeline"]
sim_params.substeps = config_sim.get("substeps", 2)
# assign up-axis
if config_sim["up_axis"] == "z":
sim_params.up_axis = gymapi.UP_AXIS_Z
else:
sim_params.up_axis = gymapi.UP_AXIS_Y
# assign gravity
sim_params.gravity = gymapi.Vec3(*config_sim["gravity"])
# configure physics parameters
if physics_engine == "physx":
# set the parameters
if "physx" in config_sim:
for opt in config_sim["physx"].keys():
if opt == "contact_collection":
setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim["physx"][opt]))
else:
setattr(sim_params.physx, opt, config_sim["physx"][opt])
else:
# set the parameters
if "flex" in config_sim:
for opt in config_sim["flex"].keys():
setattr(sim_params.flex, opt, config_sim["flex"][opt])
# return the configured params
return sim_params
"""
Domain Randomization methods
"""
def get_actor_params_info(self, dr_params: Dict[str, Any], env):
"""Generate a flat array of actor params, their names and ranges.
Returns:
The array
"""
if "actor_params" not in dr_params:
return None
params = []
names = []
lows = []
highs = []
param_getters_map = get_property_getter_map(self.gym)
for actor, actor_properties in dr_params["actor_params"].items():
handle = self.gym.find_actor_handle(env, actor)
for prop_name, prop_attrs in actor_properties.items():
if prop_name == 'color':
continue # this is set randomly
props = param_getters_map[prop_name](env, handle)
if not isinstance(props, list):
props = [props]
for prop_idx, prop in enumerate(props):
for attr, attr_randomization_params in prop_attrs.items():
name = prop_name+'_' + str(prop_idx) + '_'+attr
lo_hi = attr_randomization_params['range']
distr = attr_randomization_params['distribution']
if 'uniform' not in distr:
lo_hi = (-1.0*float('Inf'), float('Inf'))
if isinstance(prop, np.ndarray):
for attr_idx in range(prop[attr].shape[0]):
params.append(prop[attr][attr_idx])
names.append(name+'_'+str(attr_idx))
lows.append(lo_hi[0])
highs.append(lo_hi[1])
else:
params.append(getattr(prop, attr))
names.append(name)
lows.append(lo_hi[0])
highs.append(lo_hi[1])
return params, names, lows, highs
def apply_randomizations(self, dr_params):
"""Apply domain randomizations to the environment.
Note that currently we can only apply randomizations only on resets, due to current PhysX limitations
Args:
dr_params: parameters for domain randomization to use.
"""
# If we don't have a randomization frequency, randomize every step
rand_freq = dr_params.get("frequency", 1)
# First, determine what to randomize:
# - non-environment parameters when > frequency steps have passed since the last non-environment
# - physical environments in the reset buffer, which have exceeded the randomization frequency threshold
# - on the first call, randomize everything
self.last_step = self.gym.get_frame_count(self.sim)
if self.first_randomization:
do_nonenv_randomize = True
env_ids = list(range(self.num_envs))
else:
do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq
rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))
rand_envs = torch.logical_and(rand_envs, self.reset_buf)
env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()
self.randomize_buf[rand_envs] = 0
if do_nonenv_randomize:
self.last_rand_step = self.last_step
param_setters_map = get_property_setter_map(self.gym)
param_setter_defaults_map = get_default_setter_args(self.gym)
param_getters_map = get_property_getter_map(self.gym)
# On first iteration, check the number of buckets
if self.first_randomization:
check_buckets(self.gym, self.envs, dr_params)
for nonphysical_param in ["observations", "actions"]:
if nonphysical_param in dr_params and do_nonenv_randomize:
dist = dr_params[nonphysical_param]["distribution"]
op_type = dr_params[nonphysical_param]["operation"]
sched_type = dr_params[nonphysical_param]["schedule"] if "schedule" in dr_params[nonphysical_param] else None
sched_step = dr_params[nonphysical_param]["schedule_steps"] if "schedule" in dr_params[nonphysical_param] else None
op = operator.add if op_type == 'additive' else operator.mul
if sched_type == 'linear':
sched_scaling = 1.0 / sched_step * \
min(self.last_step, sched_step)
elif sched_type == 'constant':
sched_scaling = 0 if self.last_step < sched_step else 1
else:
sched_scaling = 1
if dist == 'gaussian':
mu, var = dr_params[nonphysical_param]["range"]
mu_corr, var_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.])
if op_type == 'additive':
mu *= sched_scaling
var *= sched_scaling
mu_corr *= sched_scaling
var_corr *= sched_scaling
elif op_type == 'scaling':
var = var * sched_scaling # scale up var over time
mu = mu * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
var_corr = var_corr * sched_scaling # scale up var over time
mu_corr = mu_corr * sched_scaling + 1.0 * \
(1.0 - sched_scaling) # linearly interpolate
def noise_lambda(tensor, param_name=nonphysical_param):
params = self.dr_randomizations[param_name]
corr = params.get('corr', None)
if corr is None:
corr = torch.randn_like(tensor)
params['corr'] = corr
corr = corr * params['var_corr'] + params['mu_corr']
return op(
tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])
self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'noise_lambda': noise_lambda}
elif dist == 'uniform':
lo, hi = dr_params[nonphysical_param]["range"]
lo_corr, hi_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.])
if op_type == 'additive':
lo *= sched_scaling
hi *= sched_scaling
lo_corr *= sched_scaling
hi_corr *= sched_scaling
elif op_type == 'scaling':
lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)
lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)
def noise_lambda(tensor, param_name=nonphysical_param):
params = self.dr_randomizations[param_name]
corr = params.get('corr', None)
if corr is None:
corr = torch.randn_like(tensor)
params['corr'] = corr
corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']
return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])
self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda}
if "sim_params" in dr_params and do_nonenv_randomize:
prop_attrs = dr_params["sim_params"]
prop = self.gym.get_sim_params(self.sim)
if self.first_randomization:
self.original_props["sim_params"] = {
attr: getattr(prop, attr) for attr in dir(prop)}
for attr, attr_randomization_params in prop_attrs.items():
apply_random_samples(
prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step)
self.gym.set_sim_params(self.sim, prop)
# If self.actor_params_generator is initialized: use it to
# sample actor simulation params. This gives users the
# freedom to generate samples from arbitrary distributions,
# e.g. use full-covariance distributions instead of the DR's
# default of treating each simulation parameter independently.
extern_offsets = {}
if self.actor_params_generator is not None:
for env_id in env_ids:
self.extern_actor_params[env_id] = \
self.actor_params_generator.sample()
extern_offsets[env_id] = 0
# randomise all attributes of each actor (hand, cube etc..)
# actor_properties are (stiffness, damping etc..)
# Loop over actors, then loop over envs, then loop over their props
# and lastly loop over the ranges of the params
for actor, actor_properties in dr_params["actor_params"].items():
# Loop over all envs as this part is not tensorised yet
for env_id in env_ids:
env = self.envs[env_id]
handle = self.gym.find_actor_handle(env, actor)
extern_sample = self.extern_actor_params[env_id]
# randomise dof_props, rigid_body, rigid_shape properties
# all obtained from the YAML file
# EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties
# prop_attrs:
# {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'}
# {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'}
for prop_name, prop_attrs in actor_properties.items():
if prop_name == 'color':
num_bodies = self.gym.get_actor_rigid_body_count(
env, handle)
for n in range(num_bodies):
self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,
gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))
continue
if prop_name == 'scale':
setup_only = prop_attrs.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
attr_randomization_params = prop_attrs
sample = generate_random_samples(attr_randomization_params, 1,
self.last_step, None)
og_scale = 1
if attr_randomization_params['operation'] == 'scaling':
new_scale = og_scale * sample
elif attr_randomization_params['operation'] == 'additive':
new_scale = og_scale + sample
self.gym.set_actor_scale(env, handle, new_scale)
continue
prop = param_getters_map[prop_name](env, handle)
set_random_properties = True
if isinstance(prop, list):
if self.first_randomization:
self.original_props[prop_name] = [
{attr: getattr(p, attr) for attr in dir(p)} for p in prop]
for p, og_p in zip(prop, self.original_props[prop_name]):
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], p, attr)
apply_random_samples(
p, og_p, attr, attr_randomization_params,
self.last_step, smpl)
else:
set_random_properties = False
else:
if self.first_randomization:
self.original_props[prop_name] = deepcopy(prop)
for attr, attr_randomization_params in prop_attrs.items():
setup_only = attr_randomization_params.get('setup_only', False)
if (setup_only and not self.sim_initialized) or not setup_only:
smpl = None
if self.actor_params_generator is not None:
smpl, extern_offsets[env_id] = get_attr_val_from_sample(
extern_sample, extern_offsets[env_id], prop, attr)
apply_random_samples(
prop, self.original_props[prop_name], attr,
attr_randomization_params, self.last_step, smpl)
else:
set_random_properties = False
if set_random_properties:
setter = param_setters_map[prop_name]
default_args = param_setter_defaults_map[prop_name]
setter(env, handle, prop, *default_args)
if self.actor_params_generator is not None:
for env_id in env_ids: # check that we used all dims in sample
if extern_offsets[env_id] > 0:
extern_sample = self.extern_actor_params[env_id]
if extern_offsets[env_id] != extern_sample.shape[0]:
print('env_id', env_id,
'extern_offset', extern_offsets[env_id],
'vs extern_sample.shape', extern_sample.shape)
raise Exception("Invalid extern_sample size")
self.first_randomization = False | 37,452 | Python | 43.586905 | 160 | 0.569476 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_base.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: base class.
Inherits Gym's VecTask class and abstract base class. Inherited by environment classes. Not directly executed.
Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml.
"""
import hydra
import math
import numpy as np
import os
import sys
import torch
from gym import logger
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
from isaacgymenvs.tasks.base.vec_task import VecTask
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from isaacgymenvs.tasks.factory.factory_schema_config_base import FactorySchemaConfigBase
class FactoryBase(VecTask, FactoryABCBase):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize VecTask superclass."""
self.cfg = cfg
self.cfg['headless'] = headless
self._get_base_yaml_params()
if self.cfg_base.mode.export_scene:
sim_device = 'cpu'
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) # create_sim() is called here
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_base', node=FactorySchemaConfigBase)
config_path = 'task/FactoryBase.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_franka_table.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_sim(self):
"""Set sim and PhysX params. Create sim object, ground plane, and envs."""
if self.cfg_base.mode.export_scene:
self.sim_params.use_gpu_pipeline = False
self.sim = super().create_sim(compute_device=self.device_id,
graphics_device=self.graphics_device_id,
physics_engine=self.physics_engine,
sim_params=self.sim_params)
self._create_ground_plane()
self.create_envs() # defined in subclass
def _create_ground_plane(self):
"""Set ground plane params. Add plane."""
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.distance = 0.0 # default = 0.0
plane_params.static_friction = 1.0 # default = 1.0
plane_params.dynamic_friction = 1.0 # default = 1.0
plane_params.restitution = 0.0 # default = 0.0
self.gym.add_ground(self.sim, plane_params)
def import_franka_assets(self):
"""Set Franka and table asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
franka_file = 'factory_franka.urdf'
franka_options = gymapi.AssetOptions()
franka_options.flip_visual_attachments = True
franka_options.fix_base_link = True
franka_options.collapse_fixed_joints = False
franka_options.thickness = 0.0 # default = 0.02
franka_options.density = 1000.0 # default = 1000.0
franka_options.armature = 0.01 # default = 0.0
franka_options.use_physx_armature = True
if self.cfg_base.sim.add_damping:
franka_options.linear_damping = 1.0 # default = 0.0; increased to improve stability
franka_options.max_linear_velocity = 1.0 # default = 1000.0; reduced to prevent CUDA errors
franka_options.angular_damping = 5.0 # default = 0.5; increased to improve stability
franka_options.max_angular_velocity = 2 * math.pi # default = 64.0; reduced to prevent CUDA errors
else:
franka_options.linear_damping = 0.0 # default = 0.0
franka_options.max_linear_velocity = 1000.0 # default = 1000.0
franka_options.angular_damping = 0.5 # default = 0.5
franka_options.max_angular_velocity = 64.0 # default = 64.0
franka_options.disable_gravity = True
franka_options.enable_gyroscopic_forces = True
franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
franka_options.use_mesh_materials = True
if self.cfg_base.mode.export_scene:
franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
table_options = gymapi.AssetOptions()
table_options.flip_visual_attachments = False # default = False
table_options.fix_base_link = True
table_options.thickness = 0.0 # default = 0.02
table_options.density = 1000.0 # default = 1000.0
table_options.armature = 0.0 # default = 0.0
table_options.use_physx_armature = True
table_options.linear_damping = 0.0 # default = 0.0
table_options.max_linear_velocity = 1000.0 # default = 1000.0
table_options.angular_damping = 0.0 # default = 0.5
table_options.max_angular_velocity = 64.0 # default = 64.0
table_options.disable_gravity = False
table_options.enable_gyroscopic_forces = True
table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
table_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
franka_asset = self.gym.load_asset(self.sim, urdf_root, franka_file, franka_options)
table_asset = self.gym.create_box(self.sim, self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width, self.cfg_base.env.table_height,
table_options)
return franka_asset, table_asset
def acquire_base_tensors(self):
"""Acquire and wrap tensors. Create views."""
_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) # shape = (num_envs * num_actors, 13)
_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim) # shape = (num_envs * num_bodies, 13)
_dof_state = self.gym.acquire_dof_state_tensor(self.sim) # shape = (num_envs * num_dofs, 2)
_dof_force = self.gym.acquire_dof_force_tensor(self.sim) # shape = (num_envs * num_dofs, 1)
_contact_force = self.gym.acquire_net_contact_force_tensor(self.sim) # shape = (num_envs * num_bodies, 3)
_jacobian = self.gym.acquire_jacobian_tensor(self.sim, 'franka') # shape = (num envs, num_bodies, 6, num_dofs)
_mass_matrix = self.gym.acquire_mass_matrix_tensor(self.sim, 'franka') # shape = (num_envs, num_dofs, num_dofs)
self.root_state = gymtorch.wrap_tensor(_root_state)
self.body_state = gymtorch.wrap_tensor(_body_state)
self.dof_state = gymtorch.wrap_tensor(_dof_state)
self.dof_force = gymtorch.wrap_tensor(_dof_force)
self.contact_force = gymtorch.wrap_tensor(_contact_force)
self.jacobian = gymtorch.wrap_tensor(_jacobian)
self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix)
self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 0:3]
self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 3:7]
self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 7:10]
self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 10:13]
self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3]
self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7]
self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10]
self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13]
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1]
self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[..., 0]
self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[..., 0:3]
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_mass_matrix = self.mass_matrix[:, 0:7, 0:7] # for Franka arm (not gripper)
self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3]
self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4]
self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3]
self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3]
self.hand_jacobian = self.jacobian[:, self.hand_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3]
self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4]
self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3]
self.left_finger_jacobian = self.jacobian[:, self.left_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3]
self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4]
self.right_finger_linvel = self.body_linvel[:, self.right_finger_body_id_env, 0:3]
self.right_finger_angvel = self.body_angvel[:, self.right_finger_body_id_env, 0:3]
self.right_finger_jacobian = self.jacobian[:, self.right_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.left_finger_force = self.contact_force[:, self.left_finger_body_id_env, 0:3]
self.right_finger_force = self.contact_force[:, self.right_finger_body_id_env, 0:3]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
self.fingertip_centered_pos = self.body_pos[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_quat = self.body_quat[:, self.fingertip_centered_body_id_env, 0:4]
self.fingertip_centered_linvel = self.body_linvel[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_angvel = self.body_angvel[:, self.fingertip_centered_body_id_env, 0:3]
self.fingertip_centered_jacobian = self.jacobian[:, self.fingertip_centered_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed
self.fingertip_midpoint_pos = self.fingertip_centered_pos.detach().clone() # initial value
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel.detach().clone() # initial value
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation
self.dof_torque = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros((self.num_envs, 3), device=self.device)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros((self.num_envs, 4), device=self.device)
self.ctrl_target_dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.ctrl_target_gripper_dof_pos = torch.zeros((self.num_envs, 2), device=self.device)
self.ctrl_target_fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device)
self.prev_actions = torch.zeros((self.num_envs, self.num_actions), device=self.device)
def refresh_base_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_jacobian_tensors(self.sim)
self.gym.refresh_mass_matrix_tensors(self.sim)
self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) * 0.5
self.fingertip_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length,
device=self.device)
# TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf)
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(self.fingertip_centered_angvel,
(self.fingertip_midpoint_pos - self.fingertip_centered_pos),
dim=1)
self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation
def parse_controller_spec(self):
"""Parse controller specification into lower-level controller configuration."""
cfg_ctrl_keys = {'num_envs',
'jacobian_type',
'gripper_prop_gains',
'gripper_deriv_gains',
'motor_ctrl_mode',
'gain_space',
'ik_method',
'joint_prop_gains',
'joint_deriv_gains',
'do_motion_ctrl',
'task_prop_gains',
'task_deriv_gains',
'do_inertial_comp',
'motion_ctrl_axes',
'do_force_ctrl',
'force_ctrl_method',
'wrench_prop_gains',
'force_ctrl_axes'}
self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys}
self.cfg_ctrl['num_envs'] = self.num_envs
self.cfg_ctrl['jacobian_type'] = self.cfg_task.ctrl.all.jacobian_type
self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
ctrl_type = self.cfg_task.ctrl.ctrl_type
if ctrl_type == 'gym_default':
self.cfg_ctrl['motor_ctrl_mode'] = 'gym'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.gym_default.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'joint_space_ik':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_ik.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = False
elif ctrl_type == 'joint_space_id':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'joint'
self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_id.ik_method
self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
elif ctrl_type == 'task_space_impedance':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = False
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = False
elif ctrl_type == 'operational_space_motion':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.operational_space_motion.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_deriv_gains, device=self.device).repeat(
(self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes, device=self.device).repeat(
(self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = False
elif ctrl_type == 'open_loop_force':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = False
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'open'
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.open_loop_force.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'closed_loop_force':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = False
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'closed'
self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
elif ctrl_type == 'hybrid_force_motion':
self.cfg_ctrl['motor_ctrl_mode'] = 'manual'
self.cfg_ctrl['gain_space'] = 'task'
self.cfg_ctrl['do_motion_ctrl'] = True
self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_inertial_comp'] = True
self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['do_force_ctrl'] = True
self.cfg_ctrl['force_ctrl_method'] = 'closed'
self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains,
device=self.device).repeat((self.num_envs, 1))
self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes,
device=self.device).repeat((self.num_envs, 1))
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
prop_gains = torch.cat((self.cfg_ctrl['joint_prop_gains'],
self.cfg_ctrl['gripper_prop_gains']), dim=-1).to('cpu')
deriv_gains = torch.cat((self.cfg_ctrl['joint_deriv_gains'],
self.cfg_ctrl['gripper_deriv_gains']), dim=-1).to('cpu')
# No tensor API for getting/setting actor DOF props; thus, loop required
for env_ptr, franka_handle, prop_gain, deriv_gain in zip(self.env_ptrs, self.franka_handles, prop_gains,
deriv_gains):
franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle)
franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_POS
franka_dof_props['stiffness'] = prop_gain
franka_dof_props['damping'] = deriv_gain
self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props)
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
# No tensor API for getting/setting actor DOF props; thus, loop required
for env_ptr, franka_handle in zip(self.env_ptrs, self.franka_handles):
franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle)
franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT
franka_dof_props['stiffness'][:] = 0.0 # zero passive stiffness
franka_dof_props['damping'][:] = 0.0 # zero passive damping
self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl['jacobian_type'] == 'geometric':
self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian
elif self.cfg_ctrl['jacobian_type'] == 'analytic':
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_midpoint_jacobian,
num_envs=self.num_envs,
device=self.device)
# Set PD joint pos target or joint torque
if self.cfg_ctrl['motor_ctrl_mode'] == 'gym':
self._set_dof_pos_target()
elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual':
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device)
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.ctrl_target_dof_pos),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
fingertip_midpoint_linvel=self.fingertip_midpoint_linvel,
fingertip_midpoint_angvel=self.fingertip_midpoint_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device)
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_torque),
gymtorch.unwrap_tensor(self.franka_actor_ids_sim),
len(self.franka_actor_ids_sim))
def print_sdf_warning(self):
"""Generate SDF warning message."""
logger.warn('Please be patient: SDFs may be generating, which may take a few minutes. Terminating prematurely may result in a corrupted SDF cache.')
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity.z = -gravity_mag
self.gym.set_sim_params(self.sim, sim_params)
def disable_gravity(self):
"""Disable gravity."""
sim_params = self.gym.get_sim_params(self.sim)
sim_params.gravity.z = 0.0
self.gym.set_sim_params(self.sim, sim_params)
def export_scene(self, label):
"""Export scene to USD."""
usd_export_options = gymapi.UsdExportOptions()
usd_export_options.export_physics = False
usd_exporter = self.gym.create_usd_exporter(usd_export_options)
self.gym.export_usd_sim(usd_exporter, self.sim, label)
sys.exit()
def extract_poses(self):
"""Extract poses of all bodies."""
if not hasattr(self, 'export_pos'):
self.export_pos = []
self.export_rot = []
self.frame_count = 0
pos = self.body_pos
rot = self.body_quat
self.export_pos.append(pos.cpu().numpy().copy())
self.export_rot.append(rot.cpu().numpy().copy())
self.frame_count += 1
if len(self.export_pos) == self.max_episode_length:
output_dir = self.__class__.__name__
save_dir = os.path.join('usd', output_dir)
os.makedirs(output_dir, exist_ok=True)
print(f'Exporting poses to {output_dir}...')
np.save(os.path.join(save_dir, 'body_position.npy'), np.array(self.export_pos))
np.save(os.path.join(save_dir, 'body_rotation.npy'), np.array(self.export_rot))
print('Export completed.')
sys.exit()
| 32,041 | Python | 58.668529 | 156 | 0.601635 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_gears.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for gears env.
Inherits base class and abstract environment class. Inherited by gear task class. Not directly executed.
Configuration defined in FactoryEnvGears.yaml. Asset info defined in factory_asset_info_gears.yaml.
"""
import hydra
import numpy as np
import os
import torch
from isaacgym import gymapi
from isaacgymenvs.tasks.factory.factory_base import FactoryBase
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv
class FactoryEnvGears(FactoryBase, FactoryABCEnv):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass. Acquire tensors."""
self._get_env_yaml_params()
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.acquire_base_tensors() # defined in superclass
self._acquire_env_tensors()
self.refresh_base_tensors() # defined in superclass
self.refresh_env_tensors()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv)
config_path = 'task/FactoryEnvGears.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env['task'] # strip superfluous nesting
asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Hydra search path (cfg dir)
self.asset_info_gears = hydra.compose(config_name=asset_info_path)
self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
def create_envs(self):
"""Set env options. Import assets. Create actors."""
lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0)
upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing)
num_per_row = int(np.sqrt(self.num_envs))
self.print_sdf_warning()
franka_asset, table_asset = self.import_franka_assets()
gear_small_asset, gear_medium_asset, gear_large_asset, base_asset = self._import_env_assets()
self._create_actors(lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset,
gear_large_asset, base_asset, table_asset)
def _import_env_assets(self):
"""Set gear and base asset options. Import assets."""
urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf')
gear_small_file = 'factory_gear_small.urdf'
gear_medium_file = 'factory_gear_medium.urdf'
gear_large_file = 'factory_gear_large.urdf'
if self.cfg_env.env.tight_or_loose == 'tight':
base_file = 'factory_gear_base_tight.urdf'
elif self.cfg_env.env.tight_or_loose == 'loose':
base_file = 'factory_gear_base_loose.urdf'
gear_options = gymapi.AssetOptions()
gear_options.flip_visual_attachments = False
gear_options.fix_base_link = False
gear_options.thickness = 0.0 # default = 0.02
gear_options.density = self.cfg_env.env.gears_density # default = 1000.0
gear_options.armature = 0.0 # default = 0.0
gear_options.use_physx_armature = True
gear_options.linear_damping = 0.0 # default = 0.0
gear_options.max_linear_velocity = 1000.0 # default = 1000.0
gear_options.angular_damping = 0.0 # default = 0.5
gear_options.max_angular_velocity = 64.0 # default = 64.0
gear_options.disable_gravity = False
gear_options.enable_gyroscopic_forces = True
gear_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
gear_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
gear_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
base_options = gymapi.AssetOptions()
base_options.flip_visual_attachments = False
base_options.fix_base_link = True
base_options.thickness = 0.0 # default = 0.02
base_options.density = self.cfg_env.env.base_density # default = 1000.0
base_options.armature = 0.0 # default = 0.0
base_options.use_physx_armature = True
base_options.linear_damping = 0.0 # default = 0.0
base_options.max_linear_velocity = 1000.0 # default = 1000.0
base_options.angular_damping = 0.0 # default = 0.5
base_options.max_angular_velocity = 64.0 # default = 64.0
base_options.disable_gravity = False
base_options.enable_gyroscopic_forces = True
base_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
base_options.use_mesh_materials = False
if self.cfg_base.mode.export_scene:
base_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE
gear_small_asset = self.gym.load_asset(self.sim, urdf_root, gear_small_file, gear_options)
gear_medium_asset = self.gym.load_asset(self.sim, urdf_root, gear_medium_file, gear_options)
gear_large_asset = self.gym.load_asset(self.sim, urdf_root, gear_large_file, gear_options)
base_asset = self.gym.load_asset(self.sim, urdf_root, base_file, base_options)
return gear_small_asset, gear_medium_asset, gear_large_asset, base_asset
def _create_actors(self, lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset,
gear_large_asset, base_asset, table_asset):
"""Set initial actor poses. Create actors. Set shape and DOF properties."""
franka_pose = gymapi.Transform()
franka_pose.p.x = self.cfg_base.env.franka_depth
franka_pose.p.y = 0.0
franka_pose.p.z = 0.0
franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0)
gear_pose = gymapi.Transform()
gear_pose.p.x = 0.0
gear_pose.p.y = self.cfg_env.env.gears_lateral_offset
gear_pose.p.z = self.cfg_base.env.table_height
gear_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
base_pose = gymapi.Transform()
base_pose.p.x = 0.0
base_pose.p.y = 0.0
base_pose.p.z = self.cfg_base.env.table_height
base_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
table_pose = gymapi.Transform()
table_pose.p.x = 0.0
table_pose.p.y = 0.0
table_pose.p.z = self.cfg_base.env.table_height * 0.5
table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.env_ptrs = []
self.franka_handles = []
self.gear_small_handles = []
self.gear_medium_handles = []
self.gear_large_handles = []
self.base_handles = []
self.table_handles = []
self.shape_ids = []
self.franka_actor_ids_sim = [] # within-sim indices
self.gear_small_actor_ids_sim = [] # within-sim indices
self.gear_medium_actor_ids_sim = [] # within-sim indices
self.gear_large_actor_ids_sim = [] # within-sim indices
self.base_actor_ids_sim = [] # within-sim indices
self.table_actor_ids_sim = [] # within-sim indices
actor_count = 0
for i in range(self.num_envs):
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
if self.cfg_env.sim.disable_franka_collisions:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0)
else:
franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0)
self.franka_actor_ids_sim.append(actor_count)
actor_count += 1
gear_small_handle = self.gym.create_actor(env_ptr, gear_small_asset, gear_pose, 'gear_small', i, 0, 0)
self.gear_small_actor_ids_sim.append(actor_count)
actor_count += 1
gear_medium_handle = self.gym.create_actor(env_ptr, gear_medium_asset, gear_pose, 'gear_medium', i, 0, 0)
self.gear_medium_actor_ids_sim.append(actor_count)
actor_count += 1
gear_large_handle = self.gym.create_actor(env_ptr, gear_large_asset, gear_pose, 'gear_large', i, 0, 0)
self.gear_large_actor_ids_sim.append(actor_count)
actor_count += 1
base_handle = self.gym.create_actor(env_ptr, base_asset, base_pose, 'base', i, 0, 0)
self.base_actor_ids_sim.append(actor_count)
actor_count += 1
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0)
self.table_actor_ids_sim.append(actor_count)
actor_count += 1
link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR)
hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR)
left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ACTOR)
right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger',
gymapi.DOMAIN_ACTOR)
self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id]
franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle)
for shape_id in self.shape_ids:
franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction
franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0
franka_shape_props[shape_id].restitution = 0.0 # default = 0.0
franka_shape_props[shape_id].compliance = 0.0 # default = 0.0
franka_shape_props[shape_id].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props)
gear_small_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_small_handle)
gear_small_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_small_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_small_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_small_shape_props[0].restitution = 0.0 # default = 0.0
gear_small_shape_props[0].compliance = 0.0 # default = 0.0
gear_small_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_small_handle, gear_small_shape_props)
gear_medium_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_medium_handle)
gear_medium_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_medium_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_medium_shape_props[0].restitution = 0.0 # default = 0.0
gear_medium_shape_props[0].compliance = 0.0 # default = 0.0
gear_medium_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_medium_handle, gear_medium_shape_props)
gear_large_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_large_handle)
gear_large_shape_props[0].friction = self.cfg_env.env.gears_friction
gear_large_shape_props[0].rolling_friction = 0.0 # default = 0.0
gear_large_shape_props[0].torsion_friction = 0.0 # default = 0.0
gear_large_shape_props[0].restitution = 0.0 # default = 0.0
gear_large_shape_props[0].compliance = 0.0 # default = 0.0
gear_large_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, gear_large_handle, gear_large_shape_props)
base_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, base_handle)
base_shape_props[0].friction = self.cfg_env.env.base_friction
base_shape_props[0].rolling_friction = 0.0 # default = 0.0
base_shape_props[0].torsion_friction = 0.0 # default = 0.0
base_shape_props[0].restitution = 0.0 # default = 0.0
base_shape_props[0].compliance = 0.0 # default = 0.0
base_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, base_handle, base_shape_props)
table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle)
table_shape_props[0].friction = self.cfg_base.env.table_friction
table_shape_props[0].rolling_friction = 0.0 # default = 0.0
table_shape_props[0].torsion_friction = 0.0 # default = 0.0
table_shape_props[0].restitution = 0.0 # default = 0.0
table_shape_props[0].compliance = 0.0 # default = 0.0
table_shape_props[0].thickness = 0.0 # default = 0.0
self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props)
self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle)
self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle)
self.env_ptrs.append(env_ptr)
self.franka_handles.append(franka_handle)
self.gear_small_handles.append(gear_small_handle)
self.gear_medium_handles.append(gear_medium_handle)
self.gear_large_handles.append(gear_large_handle)
self.base_handles.append(base_handle)
self.table_handles.append(table_handle)
self.num_actors = int(actor_count / self.num_envs) # per env
self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env
self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env
# For setting targets
self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device)
self.gear_small_actor_ids_sim = torch.tensor(self.gear_small_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.gear_medium_actor_ids_sim = torch.tensor(self.gear_medium_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.gear_large_actor_ids_sim = torch.tensor(self.gear_large_actor_ids_sim, dtype=torch.int32,
device=self.device)
self.base_actor_ids_sim = torch.tensor(self.base_actor_ids_sim, dtype=torch.int32, device=self.device)
# For extracting root pos/quat
self.gear_small_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_small', gymapi.DOMAIN_ENV)
self.gear_medium_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_medium', gymapi.DOMAIN_ENV)
self.gear_large_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_large', gymapi.DOMAIN_ENV)
self.base_actor_id_env = self.gym.find_actor_index(env_ptr, 'base', gymapi.DOMAIN_ENV)
# For extracting body pos/quat, force, and Jacobian
self.gear_small_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_small_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.gear_mediums_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_medium_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.gear_large_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_large_handle, 'gear_small',
gymapi.DOMAIN_ENV)
self.base_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, base_handle, 'base', gymapi.DOMAIN_ENV)
self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand',
gymapi.DOMAIN_ENV)
self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger',
gymapi.DOMAIN_ENV)
self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_rightfinger', gymapi.DOMAIN_ENV)
self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle,
'panda_fingertip_centered',
gymapi.DOMAIN_ENV)
def _acquire_env_tensors(self):
"""Acquire and wrap tensors. Create views."""
self.gear_small_pos = self.root_pos[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_quat = self.root_quat[:, self.gear_small_actor_id_env, 0:4]
self.gear_small_linvel = self.root_linvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_small_angvel = self.root_angvel[:, self.gear_small_actor_id_env, 0:3]
self.gear_medium_pos = self.root_pos[:, self.gear_medium_actor_id_env, 0:3]
self.gear_medium_quat = self.root_quat[:, self.gear_medium_actor_id_env, 0:4]
self.gear_medium_linvel = self.root_linvel[:, self.gear_medium_actor_id_env, 0:3]
self.gear_medium_angvel = self.root_angvel[:, self.gear_medium_actor_id_env, 0:3]
self.gear_large_pos = self.root_pos[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_quat = self.root_quat[:, self.gear_large_actor_id_env, 0:4]
self.gear_large_linvel = self.root_linvel[:, self.gear_large_actor_id_env, 0:3]
self.gear_large_angvel = self.root_angvel[:, self.gear_large_actor_id_env, 0:3]
self.base_pos = self.root_pos[:, self.base_actor_id_env, 0:3]
self.base_quat = self.root_quat[:, self.base_actor_id_env, 0:4]
self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_small_com_quat = self.gear_small_quat # always equal
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1)
self.gear_small_com_angvel = self.gear_small_angvel # always equal
self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_medium_com_quat = self.gear_medium_quat # always equal
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1)
self.gear_medium_com_angvel = self.gear_medium_angvel # always equal
self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_large_com_quat = self.gear_large_quat # always equal
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1)
self.gear_large_com_angvel = self.gear_large_angvel # always equal
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos,
quat=self.gear_small_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel,
(self.gear_small_com_pos - self.gear_small_pos),
dim=1)
self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos,
quat=self.gear_medium_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel,
(self.gear_medium_com_pos - self.gear_medium_pos),
dim=1)
self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos,
quat=self.gear_large_quat,
offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5,
device=self.device)
self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel,
(self.gear_large_com_pos - self.gear_large_pos),
dim=1)
| 25,262 | Python | 60.617073 | 150 | 0.586731 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt place task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskNutBoltPlace
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
from isaacgymenvs.utils import torch_jit_utils
class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
self._acquire_task_tensors()
self.parse_controller_spec()
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
if self.viewer is not None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskNutBoltPlacePPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
# Nut-bolt tensors
self.nut_base_pos_local = \
self.bolt_head_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths
self.bolt_tip_pos_local = \
bolt_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
# Keypoint tensors
self.keypoint_offsets = \
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale
self.keypoints_nut = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device)
self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device)
self.identity_quat = \
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1)
self.actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pos of keypoints on gripper, nut, and bolt in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_quat,
self.nut_pos,
self.identity_quat,
(keypoint_offset + self.nut_base_pos_local))[1]
self.keypoints_bolt[:, idx] = torch_jit_utils.tf_combine(self.bolt_quat,
self.bolt_pos,
self.identity_quat,
(keypoint_offset + self.bolt_tip_pos_local))[1]
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_pos,
self.nut_quat,
self.bolt_pos,
self.bolt_quat]
if self.cfg_task.rl.add_obs_bolt_tip_pos:
obs_tensors += [self.bolt_tip_pos_local]
self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations)
return self.obs_buf
def compute_reward(self):
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf)
def _update_rew_buf(self):
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale
self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \
- action_penalty * self.cfg_task.rl.action_penalty_scale
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self.max_episode_length - 1)
if is_last_step:
# Check if nut is close enough to bolt
is_nut_close_to_bolt = self._check_nut_close_to_bolt()
self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus
self.extras['successes'] = torch.mean(is_nut_close_to_bolt.float())
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
for _ in range(self.cfg_task.env.num_gripper_close_sim_steps):
self.ctrl_target_dof_pos[env_ids, 7:9] = 0.0
delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),
device=self.device) # no arm motion
self._apply_actions_as_ctrl_targets(actions=delta_hand_pose,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False)
self.gym.simulate(self.sim)
self.render()
self.enable_gravity(gravity_mag=abs(self.cfg_base.sim.gravity[2]))
self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = \
torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)),
(self.nut_widths_max * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact
dim=-1) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
def _reset_object(self, env_ids):
"""Reset root states of nut and bolt."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
# Randomize root state of nut within gripper
self.root_pos[env_ids, self.nut_actor_id_env, 0] = 0.0
self.root_pos[env_ids, self.nut_actor_id_env, 1] = 0.0
fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset
nut_base_pos_local = self.bolt_head_heights.squeeze(-1)
self.root_pos[env_ids, self.nut_actor_id_env, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local
nut_noise_pos_in_gripper = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device))
self.root_pos[env_ids, self.nut_actor_id_env, :] += nut_noise_pos_in_gripper[env_ids]
nut_rot_euler = torch.tensor([0.0, 0.0, math.pi * 0.5], device=self.device).repeat(len(env_ids), 1)
nut_noise_rot_in_gripper = \
2 * (torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper
nut_rot_euler[:, 2] += nut_noise_rot_in_gripper
nut_rot_quat = torch_utils.quat_from_euler_xyz(nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2])
self.root_quat[env_ids, self.nut_actor_id_env] = nut_rot_quat
# Randomize root state of bolt
bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, dtype=torch.float32, device=self.device))
self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \
bolt_noise_xy[env_ids, 0]
self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \
bolt_noise_xy[env_ids, 1]
self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height
self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32,
device=self.device).repeat(len(env_ids), 1)
self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0
self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0
nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids],
self.bolt_actor_ids_sim[env_ids]),
dim=0)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim),
len(nut_bolt_actor_ids_sim))
def _reset_buffers(self, env_ids):
"""Reset buffers. """
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):
"""Apply actions from policy as position/rotation targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))
self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,
1))
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)
if self.cfg_ctrl['do_force_ctrl']:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))
self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _open_gripper(self, sim_steps=20):
"""Fully open gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.1, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20):
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),
device=self.device) # no arm motion
self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False)
# Step sim
for _ in range(sim_steps):
self.render()
self.gym.simulate(self.sim)
def _lift_gripper(self, gripper_dof_pos=0.0, lift_distance=0.3, sim_steps=20):
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance # lift along z
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False)
self.render()
self.gym.simulate(self.sim)
def _get_keypoint_offsets(self, num_keypoints):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
return keypoint_offsets
def _get_keypoint_dist(self):
"""Get keypoint distances."""
keypoint_dist = torch.sum(torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1)
return keypoint_dist
def _check_nut_close_to_bolt(self):
"""Check if nut is close to bolt."""
keypoint_dist = torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1)
is_nut_close_to_bolt = torch.where(torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,
torch.ones_like(self.progress_buf),
torch.zeros_like(self.progress_buf))
return is_nut_close_to_bolt
def _randomize_gripper_pose(self, env_ids, sim_steps):
"""Move gripper to random pose."""
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = \
torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \
+ torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device)
self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1)
fingertip_midpoint_pos_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device))
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device).unsqueeze(0).repeat(self.num_envs, 1)
fingertip_midpoint_rot_noise = \
2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device))
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2])
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl['jacobian_type'],
rot_error_type='axis_angle')
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False)
self.gym.simulate(self.sim)
self.render()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
# Set DOF state
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
| 23,304 | Python | 49.226293 | 141 | 0.596421 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt screw task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskNutBoltScrew
Initial Franka/nut states are ideal for M16 nut-and-bolt.
In this example, initial state randomization is not applied; thus, policy should succeed almost instantly.
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.utils import torch_jit_utils as torch_utils
import isaacgymenvs.tasks.factory.factory_control as fc
from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize environment superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
self._acquire_task_tensors()
self.parse_controller_spec()
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
if self.viewer != None:
self._set_viewer_params()
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskNutBoltScrewPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
target_heights = self.cfg_base.env.table_height + self.bolt_head_heights + self.nut_heights * 0.5
self.target_pos = target_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))
def _refresh_task_tensors(self):
"""Refresh tensors."""
self.fingerpad_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length - self.asset_info_franka_table.franka_fingerpad_length * 0.5,
device=self.device)
self.finger_nut_keypoint_dist = self._get_keypoint_dist(body='finger_nut')
self.nut_keypoint_dist = self._get_keypoint_dist(body='nut')
self.nut_dist_to_target = torch.norm(self.target_pos - self.nut_com_pos, p=2,
dim=-1) # distance between nut COM and target
self.nut_dist_to_fingerpads = torch.norm(self.fingerpad_midpoint_pos - self.nut_com_pos, p=2,
dim=-1) # distance between nut COM and midpoint between centers of fingerpads
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_com_pos,
self.nut_com_quat,
self.nut_com_linvel,
self.nut_com_angvel]
if self.cfg_task.rl.add_obs_finger_force:
obs_tensors += [self.left_finger_force, self.right_finger_force]
obs_tensors = torch.cat(obs_tensors, dim=-1)
self.obs_buf[:, :obs_tensors.shape[-1]] = obs_tensors # shape = (num_envs, num_observations)
return self.obs_buf
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
# Get successful and failed envs at current timestep
curr_successes = self._get_curr_successes()
curr_failures = self._get_curr_failures(curr_successes)
self._update_reset_buf(curr_successes, curr_failures)
self._update_rew_buf(curr_successes)
def _update_reset_buf(self, curr_successes, curr_failures):
"""Assign environments for reset if successful or failed."""
self.reset_buf[:] = torch.logical_or(curr_successes, curr_failures)
def _update_rew_buf(self, curr_successes):
"""Compute reward at current timestep."""
keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist)
action_penalty = torch.norm(self.actions, p=2, dim=-1)
self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \
- action_penalty * self.cfg_task.rl.action_penalty_scale \
+ curr_successes * self.cfg_task.rl.success_bonus
def reset_idx(self, env_ids):
"""Reset specified environments. Zero buffers."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device).repeat((len(env_ids), 1)),
(self.nut_widths_max[env_ids] * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max[env_ids] * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact
dim=-1) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(multi_env_ids_int32),
len(multi_env_ids_int32))
def _reset_object(self, env_ids):
"""Reset root state of nut."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids]
self.root_pos[env_ids, self.nut_actor_id_env] = \
nut_pos * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat(len(env_ids), 1)
nut_rot = self.cfg_task.randomize.nut_rot_initial * torch.ones((len(env_ids), 1), device=self.device) * math.pi / 180.0
self.root_quat[env_ids, self.nut_actor_id_env] = torch.cat((torch.zeros((len(env_ids), 1), device=self.device),
torch.zeros((len(env_ids), 1), device=self.device),
torch.sin(nut_rot * 0.5),
torch.cos(nut_rot * 0.5)),
dim=-1)
self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0
self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(self.nut_actor_ids_sim),
len(self.nut_actor_ids_sim))
def _reset_buffers(self, env_ids):
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):
"""Apply actions from policy as position/rotation targets or force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))
self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if self.cfg_task.rl.unidirectional_rot:
rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,
1))
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)
if self.cfg_ctrl['do_force_ctrl']:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if self.cfg_task.rl.unidirectional_force:
force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))
self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def _get_keypoint_dist(self, body):
"""Get keypoint distances."""
axis_length = self.asset_info_franka_table.franka_hand_length + self.asset_info_franka_table.franka_finger_length
if body == 'finger' or body == 'nut':
# Keypoint distance between finger/nut and target
if body == 'finger':
self.keypoint1 = self.fingertip_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device)
elif body == 'nut':
self.keypoint1 = self.nut_com_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device)
self.keypoint1_targ = self.target_pos
self.keypoint2_targ = self.keypoint1_targ + torch.tensor([0.0, 0.0, axis_length], device=self.device)
elif body == 'finger_nut':
# Keypoint distance between finger and nut
self.keypoint1 = self.fingerpad_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device)
self.keypoint1_targ = self.nut_com_pos
self.keypoint2_targ = fc.translate_along_local_z(pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device)
self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0
self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0
self.keypoint3_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0
self.keypoint4_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0
keypoint_dist = torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1) \
+ torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1) \
+ torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1) \
+ torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1)
return keypoint_dist
def _get_curr_successes(self):
"""Get success mask at current timestep."""
curr_successes = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device)
# If nut is close enough to target pos
is_close = torch.where(self.nut_dist_to_target < self.thread_pitches.squeeze(-1),
torch.ones_like(curr_successes),
torch.zeros_like(curr_successes))
curr_successes = torch.logical_or(curr_successes, is_close)
return curr_successes
def _get_curr_failures(self, curr_successes):
"""Get failure mask at current timestep."""
curr_failures = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device)
# If max episode length has been reached
self.is_expired = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length,
torch.ones_like(curr_failures),
curr_failures)
# If nut is too far from target pos
self.is_far = torch.where(self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh,
torch.ones_like(curr_failures),
curr_failures)
# If nut has slipped (distance-based definition)
self.is_slipped = \
torch.where(
self.nut_dist_to_fingerpads > self.asset_info_franka_table.franka_fingerpad_length * 0.5 + self.nut_heights.squeeze(-1) * 0.5,
torch.ones_like(curr_failures),
curr_failures)
self.is_slipped = torch.logical_and(self.is_slipped, torch.logical_not(curr_successes)) # ignore slip if successful
# If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt)
self.is_fallen = torch.logical_and(
torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1) > self.bolt_widths.squeeze(-1) * 0.5,
self.nut_com_pos[:, 2] < self.cfg_base.env.table_height + self.bolt_head_heights.squeeze(
-1) + self.bolt_shank_lengths.squeeze(-1) + self.nut_heights.squeeze(-1) * 0.5)
curr_failures = torch.logical_or(curr_failures, self.is_expired)
curr_failures = torch.logical_or(curr_failures, self.is_far)
curr_failures = torch.logical_or(curr_failures, self.is_slipped)
curr_failures = torch.logical_or(curr_failures, self.is_fallen)
return curr_failures
| 19,807 | Python | 50.183462 | 183 | 0.584238 |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_insertion.py | # Copyright (c) 2021-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for insertion task.
Inherits insertion environment class and abstract task class (not enforced). Can be executed with
python train.py task=FactoryTaskInsertion
Only the environment is provided; training a successful RL policy is an open research problem left to the user.
"""
import hydra
import math
import omegaconf
import os
import torch
from isaacgym import gymapi, gymtorch
from isaacgymenvs.tasks.factory.factory_env_insertion import FactoryEnvInsertion
from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask
class FactoryTaskInsertion(FactoryEnvInsertion, FactoryABCTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
"""Initialize instance variables. Initialize task superclass."""
super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)
self.cfg = cfg
self._get_task_yaml_params()
if self.viewer != None:
self._set_viewer_params()
if self.cfg_base.mode.export_scene:
self.export_scene(label='franka_task_insertion')
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self.cfg)
self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask
asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir)
self.asset_info_insertion = hydra.compose(config_name=asset_info_path)
self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting
ppo_path = 'train/FactoryTaskInsertionPPO.yaml' # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
def pre_physics_step(self, actions):
"""Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains."""
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
self.progress_buf[:] += 1
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.compute_observations()
self.compute_reward()
def compute_observations(self):
"""Compute observations."""
return self.obs_buf # shape = (num_envs, num_observations)
def compute_reward(self):
"""Detect successes and failures. Update reward and reset buffers."""
self._update_rew_buf()
self._update_reset_buf()
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
def reset_idx(self, env_ids):
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _reset_franka(self, env_ids):
"""Reset DOF states and DOF targets of Franka."""
# shape of dof_pos = (num_envs, num_dofs)
# shape of dof_vel = (num_envs, num_dofs)
# Initialize Franka to middle of joint limits, plus joint noise
franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0],
self.franka_handles[0]) # same across all envs
lower_lims = franka_dof_props['lower']
upper_lims = franka_dof_props['upper']
self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \
+ (torch.rand((self.num_envs, 1),
device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180
self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0
franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids]
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(franka_actor_ids_sim_int32),
len(franka_actor_ids_sim_int32))
self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs]
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos))
def _reset_object(self, env_ids):
"""Reset root state of plug."""
# shape of root_pos = (num_envs, num_actors, 3)
# shape of root_quat = (num_envs, num_actors, 4)
# shape of root_linvel = (num_envs, num_actors, 3)
# shape of root_angvel = (num_envs, num_actors, 3)
if self.cfg_task.randomize.initial_state == 'random':
self.root_pos[env_ids, self.plug_actor_id_env] = \
torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy,
self.cfg_task.randomize.plug_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy,
torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.plug_bias_z)), dim=1)
elif self.cfg_task.randomize.initial_state == 'goal':
self.root_pos[env_ids, self.plug_actor_id_env] = torch.tensor([0.0, 0.0, self.cfg_base.env.table_height],
device=self.device)
self.root_linvel[env_ids, self.plug_actor_id_env] = 0.0
self.root_angvel[env_ids, self.plug_actor_id_env] = 0.0
plug_actor_ids_sim_int32 = self.plug_actor_ids_sim.to(dtype=torch.int32, device=self.device)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(plug_actor_ids_sim_int32[env_ids]),
len(plug_actor_ids_sim_int32[env_ids]))
def _reset_buffers(self, env_ids):
"""Reset buffers. """
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _set_viewer_params(self):
"""Set viewer parameters."""
cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)
cam_target = gymapi.Vec3(0.0, 0.0, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
| 9,283 | Python | 45.42 | 170 | 0.636971 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.