file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
858
Python
44.210524
74
0.7331
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/core.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import abc import typing import uuid class ChangeEvent(object): def __init__(self, added: typing.Tuple[str], modified: typing.Tuple[str], removed: typing.Tuple[str]): super(ChangeEvent, self).__init__() self.__added: typing.Tuple[str] = added self.__modified: typing.Tuple[str] = modified self.__removed: typing.Tuple[str] = removed def __str__(self): o = 'omni.universalmaterialmap.core.service.core.ChangeEvent(' o += '\n\tadded: ' o += ', '.join(self.__added) o += '\n\tmodified: ' o += ', '.join(self.__modified) o += '\n\tremoved: ' o += ', '.join(self.__removed) o += '\n)' return o @property def added(self) -> typing.Tuple[str]: return self.__added @property def modified(self) -> typing.Tuple[str]: return self.__modified @property def removed(self) -> typing.Tuple[str]: return self.__removed class IDelegate(metaclass=abc.ABCMeta): """ Interface for an online library database table. """ @abc.abstractmethod def get_ids(self) -> typing.List[str]: """ Returns a list of identifiers. """ raise NotImplementedError @abc.abstractmethod def read(self, identifier: str) -> typing.Dict: """ Returns a JSON dictionary if an item by the given identifier exists - otherwise None """ raise NotImplementedError @abc.abstractmethod def write(self, identifier: str, contents: typing.Dict) -> str: """ Creates or updates an item by using the JSON contents data. """ raise NotImplementedError @abc.abstractmethod def delete(self, identifier: str) -> None: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def can_show_in_store(self, identifier: str) -> bool: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def show_in_store(self, identifier: str) -> None: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def can_poll(self) -> bool: """ States if delegate is able to poll file changes and provide subscription to those changes. """ raise NotImplementedError @abc.abstractmethod def start_polling(self) -> None: """ Starts monitoring files for changes. """ raise NotImplementedError @abc.abstractmethod def stop_polling(self) -> None: """ Stops monitoring files for changes. """ raise NotImplementedError @abc.abstractmethod def add_change_subscription(self, callback: typing.Callable[[ChangeEvent], typing.NoReturn]) -> uuid.uuid4: """ Creates a subscription for file changes in location managed by delegate. """ raise NotImplementedError @abc.abstractmethod def remove_change_subscription(self, subscription_id: uuid.uuid4) -> None: """ Removes the subscription for file changes in location managed by delegate. """ raise NotImplementedError
4,024
Python
34.307017
111
0.657306
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/resources/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import os import shutil import json import inspect from ...data import FileUtility, Target, ConversionGraph, ConversionManifest def __copy(source_path: str, destination_path: str) -> None: try: shutil.copy(source_path, destination_path) except Exception as error: print('Error installing UMM data. Unable to copy source "{0}" to destination "{1}".\n Details: {2}'.format(source_path, destination_path, error)) raise error def __install_library(source_root: str, destination_root: str) -> None: source_root = source_root.replace('\\', '/') destination_root = destination_root.replace('\\', '/') for directory, sub_directories, filenames in os.walk(source_root): directory = directory.replace('\\', '/') destination_directory = directory.replace(source_root, destination_root) destination_directory_created = os.path.exists(destination_directory) for filename in filenames: if not filename.lower().endswith('.json'): continue source_path = '{0}/{1}'.format(directory, filename) destination_path = '{0}/{1}'.format(destination_directory, filename) if not destination_directory_created: try: os.makedirs(destination_directory) destination_directory_created = True except Exception as error: print('Universal Material Map error installing data. Unable to create directory "{0}".\n Details: {1}'.format(destination_directory, error)) raise error if not os.path.exists(destination_path): __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed "{0}".'.format(destination_path)) continue try: with open(source_path, 'r') as fp: source = FileUtility.FromData(data=json.load(fp)).content except Exception as error: print('Universal Material Map error installing data. Unable to read source "{0}". \n Details: {1}'.format(source_path, error)) raise error try: with open(destination_path, 'r') as fp: destination = FileUtility.FromData(data=json.load(fp)).content except Exception as error: print('Warning: Universal Material Map error installing data. Unable to read destination "{0}". It is assumed that the installed version is more recent than the one attempted to be installed.\n Details: {1}'.format(destination_path, error)) continue if isinstance(source, Target) and isinstance(destination, Target): if source.revision > destination.revision: __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path)) continue if isinstance(source, ConversionGraph) and isinstance(destination, ConversionGraph): if source.revision > destination.revision: __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path)) continue if isinstance(source, ConversionManifest) and isinstance(destination, ConversionManifest): if source.version_major < destination.version_major: continue if source.version_minor <= destination.version_minor: continue __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0}.{1} of "{2}".'.format(source.version_major, source.version_minor, destination_path)) continue def install() -> None: current_path = inspect.getfile(inspect.currentframe()).replace('\\', '/') current_path = current_path[:current_path.rfind('/')] library_names = [] for o in os.listdir(current_path): path = '{0}/{1}'.format(current_path, o) if os.path.isdir(path) and not o == '__pycache__': library_names.append(o) libraries_directory = os.path.expanduser('~').replace('\\', '/') if not libraries_directory.endswith('/Documents'): # os.path.expanduser() has different behaviour between 2.7 and 3 libraries_directory = '{0}/Documents'.format(libraries_directory) libraries_directory = '{0}/Omniverse'.format(libraries_directory) for library_name in library_names: source_root = '{0}/{1}/UMMLibrary'.format(current_path, library_name) destination_root = '{0}/{1}/UMMLibrary'.format(libraries_directory, library_name) __install_library(source_root=source_root, destination_root=destination_root)
5,935
Python
49.735042
256
0.643134
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/converter.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import sys import traceback import os import re import json import math import bpy import bpy_types from . import get_library, get_value, CORE_MATERIAL_PROPERTIES, create_template, developer_mode, get_template_data_by_shader_node, get_template_data_by_class_name, create_from_template from ..core.converter.core import ICoreConverter, IObjectConverter, IDataConverter from ..core.converter import util from ..core.service import store from ..core.data import Plug, ConversionManifest, DagNode, ConversionGraph, TargetInstance from ..core.util import get_extension_from_image_file_format __initialized: bool = False __manifest: ConversionManifest = None def _get_manifest() -> ConversionManifest: if not getattr(sys.modules[__name__], '__manifest'): setattr(sys.modules[__name__], '__manifest', store.get_conversion_manifest(library=get_library())) if developer_mode: manifest: ConversionManifest = getattr(sys.modules[__name__], '__manifest') print('UMM DEBUG: blender.converter._get_manifest(): num entries = "{0}"'.format(len(manifest.conversion_maps))) for conversion_map in manifest.conversion_maps: print('UMM DEBUG: blender.converter._get_manifest(): Entry: graph_id = "{0}", render_context = "{1}"'.format(conversion_map.conversion_graph_id, conversion_map.render_context)) return getattr(sys.modules[__name__], '__manifest') def _get_conversion_graph_impl(source_class: str, render_context: str) -> typing.Union[ConversionGraph, typing.NoReturn]: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl(source_class="{0}", render_context="{1}")'.format(source_class, render_context)) for conversion_map in _get_manifest().conversion_maps: if not conversion_map.render_context == render_context: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.render_context "{0}" != "{1}")'.format(conversion_map.render_context, render_context)) continue if not conversion_map.conversion_graph: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph "{0}")'.format(conversion_map.conversion_graph)) continue if not conversion_map.conversion_graph.source_node: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.source_node "{0}")'.format(conversion_map.conversion_graph.source_node)) continue if not conversion_map.conversion_graph.source_node.target.root_node.class_name == source_class: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph.source_node.target.root_node.class_name "{0}" != "{1}")'.format(conversion_map.conversion_graph.source_node.target.root_node.class_name, source_class)) continue if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found match "{0}")'.format(conversion_map.conversion_graph.filename)) return conversion_map.conversion_graph if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found no match!)') return None def _instance_to_output_entity(graph: ConversionGraph, instance: object) -> TargetInstance: if developer_mode: print('_instance_to_output_entity') for output in graph.source_node.outputs: if output.name == 'node_id_output': continue if util.can_set_plug_value(instance=instance, plug=output): util.set_plug_value(instance=instance, plug=output) else: print('UMM Warning: Unable to set output plug "{0}"... using default value of "{1}"'.format(output.name, output.default_value)) output.value = output.default_value return graph.get_output_entity() def _data_to_output_entity(graph: ConversionGraph, data: typing.List[typing.Tuple[str, typing.Any]]) -> TargetInstance: for output in graph.source_node.outputs: if output.name == 'node_id_output': continue o = [o for o in data if o[0] == output.name] if len(o): output.value = o[0][1] else: output.value = output.default_value return graph.get_output_entity() def _instance_to_data(instance: object, graph: ConversionGraph) -> typing.List[typing.Tuple[str, typing.Any]]: target_instance = _instance_to_output_entity(graph=graph, instance=instance) if developer_mode: print('_instance_to_data') print('\ttarget_instance.target.store_id', target_instance.target.store_id) # Compute target attribute values attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)] for plug in target_instance.inputs: if not plug.input: continue if developer_mode: print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid)) if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() if developer_mode: print('\t{} computed value = {}'.format(plug.name, plug.computed_value)) attribute_data.append((plug.name, plug.computed_value)) return attribute_data def _to_convertible_instance(instance: object, material: bpy.types.Material = None) -> object: if developer_mode: print('_to_convertible_instance', type(instance)) if material is None: if isinstance(instance, bpy.types.Material): material = instance else: for m in bpy.data.materials: if not m.use_nodes: continue if not len([o for o in m.node_tree.nodes if o == instance]): continue material = m break if material is None: return instance if not material.use_nodes: return material if instance == material: # Find the Surface Shader. for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial): continue if not link.to_socket.name == 'Surface': continue result = _to_convertible_instance(instance=link.from_node, material=material) if result is not None: return result # No surface shader found - return instance return instance if isinstance(instance, bpy.types.ShaderNodeAddShader): for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.to_node == instance: continue # if not link.to_socket.name == 'Shader': # continue result = _to_convertible_instance(instance=link.from_node, material=material) if result is not None: return result # if isinstance(instance, bpy.types.ShaderNodeBsdfGlass): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfGlossy): # return instance if isinstance(instance, bpy.types.ShaderNodeBsdfPrincipled): return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfRefraction): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfTranslucent): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfTransparent): # return instance # if isinstance(instance, bpy.types.ShaderNodeEeveeSpecular): # return instance # if isinstance(instance, bpy.types.ShaderNodeEmission): # return instance # if isinstance(instance, bpy.types.ShaderNodeSubsurfaceScattering): # return instance return None class CoreConverter(ICoreConverter): def __init__(self): super(CoreConverter, self).__init__() def get_conversion_manifest(self) -> typing.List[typing.Tuple[str, str]]: """ Returns data indicating what source class can be converted to a render context. Example: [('lambert', 'MDL'), ('blinn', 'MDL'),] """ output = [] for conversion_map in _get_manifest().conversion_maps: if not conversion_map.render_context: continue if not conversion_map.conversion_graph: continue if not conversion_map.conversion_graph.source_node: continue output.append((conversion_map.conversion_graph.source_node.target.root_node.class_name, conversion_map.render_context)) return output class ObjectConverter(CoreConverter, IObjectConverter): """ """ MATERIAL_CLASS = 'bpy.types.Material' SHADER_NODES = [ 'bpy.types.ShaderNodeBsdfGlass', 'bpy.types.ShaderNodeBsdfGlossy', 'bpy.types.ShaderNodeBsdfPrincipled', 'bpy.types.ShaderNodeBsdfRefraction', 'bpy.types.ShaderNodeBsdfTranslucent', 'bpy.types.ShaderNodeBsdfTransparent', 'bpy.types.ShaderNodeEeveeSpecular', 'bpy.types.ShaderNodeEmission', 'bpy.types.ShaderNodeSubsurfaceScattering', ] def can_create_instance(self, class_name: str) -> bool: """ Returns true if worker can generate an object of the given class name. """ if class_name == ObjectConverter.MATERIAL_CLASS: return True return class_name in ObjectConverter.SHADER_NODES def create_instance(self, class_name: str, name: str = 'material') -> object: """ Creates an object of the given class name. """ material = bpy.data.materials.new(name=name) if class_name in ObjectConverter.SHADER_NODES: material.use_nodes = True return material def can_set_plug_value(self, instance: object, plug: Plug) -> bool: """ Returns true if worker can set the plug's value given the instance and its attributes. """ if plug.input: return False if isinstance(instance, bpy.types.Material): for o in CORE_MATERIAL_PROPERTIES: if o[0] == plug.name: return hasattr(instance, plug.name) return False if isinstance(instance, bpy_types.ShaderNode): return len([o for o in instance.inputs if o.name == plug.name]) == 1 return False def set_plug_value(self, instance: object, plug: Plug) -> typing.NoReturn: """ Sets the plug's value given the value of the instance's attribute named the same as the plug. """ if isinstance(instance, bpy.types.Material): plug.value = getattr(instance, plug.name) if developer_mode: print('set_plug_value') print('\tinstance', type(instance)) print('\tname', plug.name) print('\tvalue', plug.value) return inputs = [o for o in instance.inputs if o.name == plug.name] if not len(inputs) == 1: return plug.value = get_value(socket=inputs[0]) if developer_mode: # print('set_plug_value') # print('\tinstance', type(instance)) # print('\tname', plug.name) # print('\tvalue', plug.value) print('\tset_plug_value: {} = {}'.format(plug.name, plug.value)) def can_set_instance_attribute(self, instance: object, name: str): """ Resolves if worker can set an attribute by the given name on the instance. """ return False def set_instance_attribute(self, instance: object, name: str, value: typing.Any) -> typing.NoReturn: """ Sets the named attribute on the instance to the value. """ raise NotImplementedError() def can_convert_instance(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ return False def convert_instance_to_instance(self, instance: object, render_context: str) -> typing.Any: """ Converts the instance to another object given the render_context. """ raise NotImplementedError() def can_convert_instance_to_data(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ node = _to_convertible_instance(instance=instance) if node is not None and not node == instance: if developer_mode: print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node))) instance = node template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance) if template is None: class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__) conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) if not conversion_graph: return False try: destination_target_instance = _instance_to_output_entity(graph=conversion_graph, instance=instance) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False return destination_target_instance is not None else: conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context) return conversion_graph is not None def convert_instance_to_data(self, instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ node = _to_convertible_instance(instance=instance) if node is not None and not node == instance: if developer_mode: print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node))) instance = node template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance) if template is None: class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__) conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) return _instance_to_data(instance=instance, graph=conversion_graph) else: conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context) if developer_mode: print('conversion_graph', conversion_graph.filename) # set plug values on conversion_graph.source_node.outputs for output in conversion_graph.source_node.outputs: if output.name == 'node_id_output': continue if developer_mode: print('output', output.name) internal_node = None for a in conversion_graph.source_node.target.nodes: for b in a.outputs: if output.id == b.id: internal_node = a break if internal_node is not None: break if internal_node is None: raise NotImplementedError(f"No internal node found for {output.name}") map_definition = None for o in template_map['maps']: if o['blender_node'] == internal_node.id and o['blender_socket'] == output.name: map_definition = o break if map_definition is None: raise NotImplementedError(f"No map definition found for {output.name}") if developer_mode: print('map_definition', map_definition['blender_node']) if map_definition['blender_node'] == '': output.value = output.default_value if developer_mode: print('output.value', output.value) continue for shader_node in material.node_tree.nodes: if not shader_node.name == map_definition['blender_node']: continue if isinstance(shader_node, bpy.types.ShaderNodeTexImage): if map_definition['blender_socket'] == 'image': if shader_node.image and (shader_node.image.source == 'FILE' or shader_node.image.source == 'TILED'): print(f'UMM: image.filepath: "{shader_node.image.filepath}"') print(f'UMM: image.source: "{shader_node.image.source}"') print(f'UMM: image.file_format: "{shader_node.image.file_format}"') value = shader_node.image.filepath if (shader_node.image.source == 'TILED'): # Find all numbers in the path. numbers = re.findall('[0-9]+', value) if (len(numbers) > 0): # Get the string representation of the last number. num_str = str(numbers[-1]) # Replace the number substring with '<UDIM>'. split_items = value.rsplit(num_str, 1) if (len(split_items) == 2): value = split_items[0] + '<UDIM>' + split_items[1] try: if value is None or value == '': file_format = shader_node.image.file_format file_format = get_extension_from_image_file_format(file_format, shader_node.image.name) if not shader_node.image.name.endswith(file_format): value = f'{shader_node.image.name}.{file_format}' else: value = shader_node.image.name output.value = [value, shader_node.image.colorspace_settings.name] else: output.value = [os.path.abspath(bpy.path.abspath(value)), shader_node.image.colorspace_settings.name] except Exception as error: print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(shader_node.image.filepath, error)) output.value = ['', 'raw'] print(f'UMM: output.value: "{output.value}"') else: if developer_mode: print('setting default value for output.value') if not shader_node.image: print('\tshader_node.image == None') else: print('\tshader_node.image.source == {}'.format(shader_node.image.source)) output.value = ['', 'raw'] if developer_mode: print('output.value', output.value) break raise NotImplementedError(f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}") if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled): socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] output.value = socket.default_value if developer_mode: print('output.value', output.value) break if isinstance(shader_node, bpy.types.ShaderNodeGroup): if map_definition['blender_socket'] not in shader_node.inputs.keys(): if developer_mode: print(f'{map_definition["blender_socket"]} not in shader_node.inputs.keys()') break socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] output.value = socket.default_value if developer_mode: print('output.value', output.value) break if isinstance(shader_node, bpy.types.ShaderNodeMapping): socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] value = socket.default_value if output.name == 'Rotation': value = [ math.degrees(value[0]), math.degrees(value[1]), math.degrees(value[2]) ] output.value = value if developer_mode: print('output.value', output.value) break # compute to target_instance for output target_instance = conversion_graph.get_output_entity() if developer_mode: print('_instance_to_data') print('\ttarget_instance.target.store_id', target_instance.target.store_id) # Compute target attribute values attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)] for plug in target_instance.inputs: if not plug.input: continue if developer_mode: print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid)) if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() if developer_mode: print('\t{} computed value = {}'.format(plug.name, plug.computed_value)) value = plug.computed_value if plug.internal_value_type == 'bool': value = True if value else False attribute_data.append((plug.name, value)) return attribute_data def can_convert_attribute_values(self, instance: object, render_context: str, destination: object) -> bool: """ Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """ raise NotImplementedError() def convert_attribute_values(self, instance: object, render_context: str, destination: object) -> typing.NoReturn: """ Attribute values are converted and set on the destination object's attributes. """ raise NotImplementedError() def can_apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ if developer_mode: print('can_apply_data_to_instance()') if not isinstance(instance, bpy.types.Material): if developer_mode: print('can_apply_data_to_instance: FALSE - instance not bpy.types.Material') return False if not render_context == 'Blender': if developer_mode: print('can_apply_data_to_instance: FALSE - render_context not "Blender"') return False conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context) if not conversion_graph: if developer_mode: print('can_apply_data_to_instance: FALSE - conversion_graph is None') return False if developer_mode: print(f'conversion_graph {conversion_graph.filename}') try: destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False if developer_mode: if destination_target_instance is None: print('destination_target_instance is None') elif destination_target_instance is None: print('destination_target_instance.target is None') else: print('destination_target_instance.target is not None') if destination_target_instance is None or destination_target_instance.target is None: return False if developer_mode: print(f'num destination_target_instance.target.nodes: {len(destination_target_instance.target.nodes)}') if len(destination_target_instance.target.nodes) < 2: return True template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name) if developer_mode: print(f'return {template is not None}') return template is not None def apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> None: """ Implementation requires that `instance` is type `bpy.types.Material`. """ if developer_mode: print('apply_data_to_instance()') if not isinstance(instance, bpy.types.Material): raise Exception('instance type not supported', type(instance)) if not render_context == 'Blender': raise Exception('render_context not supported', render_context) conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context) # This only works for Blender import of MDL/USDPreview. Blender export would need to use convert_instance_to_data(). destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) material: bpy.types.Material = instance # Make sure we're using nodes material.use_nodes = True # Remove existing nodes - we're starting from scratch - assuming Blender import to_delete = [o for o in material.node_tree.nodes] while len(to_delete): material.node_tree.nodes.remove(to_delete.pop()) if len(destination_target_instance.target.nodes) < 2: # Create base graph output_node = material.node_tree.nodes.new('ShaderNodeOutputMaterial') output_node.location = [300.0, 300.0] bsdf_node = material.node_tree.nodes.new('ShaderNodeBsdfPrincipled') bsdf_node.location = [0.0, 300.0] material.node_tree.links.new(bsdf_node.outputs[0], output_node.inputs[0]) node_cache = dict() node_location = [-500, 300] # Create graph if texture value for plug in destination_target_instance.inputs: if not plug.input: continue if isinstance(plug.computed_value, list) or isinstance(plug.computed_value, tuple): if len(plug.computed_value) == 2 and isinstance(plug.computed_value[0], str) and isinstance(plug.computed_value[1], str): key = '{0}|{1}'.format(plug.computed_value[0], plug.computed_value[1]) if key in node_cache.keys(): node = node_cache[key] else: try: path = plug.computed_value[0] if not path == '': node = material.node_tree.nodes.new('ShaderNodeTexImage') path = plug.computed_value[0] if '<UDIM>' in path: pattern = path.replace('\\', '/') pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]') directory = pattern[:pattern.rfind('/') + 1] pattern = pattern.replace(directory, '') image_set = False for item in os.listdir(directory): if re.match(pattern, item): tile_path = '{}{}'.format(directory, item) if not os.path.isfile(tile_path): continue if not image_set: node.image = bpy.data.images.load(tile_path) node.image.source = 'TILED' image_set = True continue tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item) node.image.tiles.new(int(tile_indexes[-1])) else: node.image = bpy.data.images.load(path) node.image.colorspace_settings.name = plug.computed_value[1] else: continue except Exception as error: print('Warning: UMM failed to properly setup a ShaderNodeTexImage. Details: {0}\n{1}'.format(error, traceback.format_exc())) continue node_cache[key] = node node.location = node_location node_location[1] -= 300 bsdf_input = [o for o in bsdf_node.inputs if o.name == plug.name][0] if plug.name == 'Metallic': separate_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB': separate_node = link.to_node break if separate_node is None: separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB') separate_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], separate_node.inputs[0]) material.node_tree.links.new(separate_node.outputs[2], bsdf_input) elif plug.name == 'Roughness': separate_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB': separate_node = link.to_node break if separate_node is None: separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB') separate_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], separate_node.inputs[0]) material.node_tree.links.new(separate_node.outputs[1], bsdf_input) elif plug.name == 'Normal': normal_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeNormalMap': normal_node = link.to_node break if normal_node is None: normal_node = material.node_tree.nodes.new('ShaderNodeNormalMap') normal_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], normal_node.inputs[1]) material.node_tree.links.new(normal_node.outputs[0], bsdf_input) else: material.node_tree.links.new(node.outputs[0], bsdf_input) continue # Set Value blender_inputs = [o for o in bsdf_node.inputs if o.name == plug.name] if len(blender_inputs) == 0: for property_name, property_object in bsdf_node.rna_type.properties.items(): if not property_name == plug.name: continue if property_object.is_readonly: break try: setattr(bsdf_node, property_name, plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(plug.name, plug.computed_value, error)) return if developer_mode: print(f'TEMPLATE CREATION BASED ON {destination_target_instance.target.root_node.class_name}') # find template to use template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name) if developer_mode: print(f"TEMPLATE NAME {template['name']}") # create graph create_from_template(material=material, template=template) # set attributes use_albedo_map = False use_normal_map = False use_detail_normal_map = False use_emission_map = False for input_plug in destination_target_instance.inputs: # if developer_mode: # print('input_plug', input_plug.name) internal_node = None for a in destination_target_instance.target.nodes: for b in a.inputs: if input_plug.id == b.id: internal_node = a break if internal_node is not None: break if internal_node is None: raise NotImplementedError(f"No internal node found for {input_plug.name}") map_definition = None for o in template_map['maps']: if o['blender_node'] == internal_node.id and o['blender_socket'] == input_plug.name: map_definition = o break if map_definition is None: raise NotImplementedError(f"No map definition found for {internal_node.id} {input_plug.name}") for shader_node in material.node_tree.nodes: if not shader_node.name == map_definition['blender_node']: continue # if developer_mode: # print(f'node: {shader_node.name}') if isinstance(shader_node, bpy.types.ShaderNodeTexImage): if map_definition['blender_socket'] == 'image': # if developer_mode: # print(f'\tbpy.types.ShaderNodeTexImage: path: {input_plug.computed_value[0]}') # print(f'\tbpy.types.ShaderNodeTexImage: colorspace: {input_plug.computed_value[1]}') path = input_plug.computed_value[0] if not path == '': if '<UDIM>' in path: pattern = path.replace('\\', '/') pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]') directory = pattern[:pattern.rfind('/') + 1] pattern = pattern.replace(directory, '') image_set = False for item in os.listdir(directory): if re.match(pattern, item): tile_path = '{}{}'.format(directory, item) if not os.path.isfile(tile_path): continue if not image_set: shader_node.image = bpy.data.images.load(tile_path) shader_node.image.source = 'TILED' image_set = True continue tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item) shader_node.image.tiles.new(int(tile_indexes[-1])) else: shader_node.image = bpy.data.images.load(path) if map_definition['blender_node'] == 'Albedo Map': use_albedo_map = True if map_definition['blender_node'] == 'Normal Map': use_normal_map = True if map_definition['blender_node'] == 'Detail Normal Map': use_detail_normal_map = True if map_definition['blender_node'] == 'Emissive Map': use_emission_map = True shader_node.image.colorspace_settings.name = input_plug.computed_value[1] continue raise NotImplementedError( f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}") if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, input_plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = input_plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue if isinstance(shader_node, bpy.types.ShaderNodeGroup): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, input_plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = input_plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue if isinstance(shader_node, bpy.types.ShaderNodeMapping): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] value = input_plug.computed_value if input_plug.name == 'Rotation': value[0] = math.radians(value[0]) value[1] = math.radians(value[1]) value[2] = math.radians(value[2]) if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue # UX assist with special attributes for shader_node in material.node_tree.nodes: if shader_node.name == 'OmniPBR Compute' and isinstance(shader_node, bpy.types.ShaderNodeGroup): shader_node.inputs['Use Albedo Map'].default_value = 1 if use_albedo_map else 0 shader_node.inputs['Use Normal Map'].default_value = 1 if use_normal_map else 0 shader_node.inputs['Use Detail Normal Map'].default_value = 1 if use_detail_normal_map else 0 shader_node.inputs['Use Emission Map'].default_value = 1 if use_emission_map else 0 break class DataConverter(CoreConverter, IDataConverter): """ """ def can_convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool: """ Resolves if worker can convert the given class and source_data to another class and target data. """ conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) if not conversion_graph: return False try: destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False return destination_target_instance is not None def convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ if developer_mode: print('UMM DEBUG: DataConverter.convert_data_to_data()') print('\tclass_name="{0}"'.format(class_name)) print('\trender_context="{0}"'.format(render_context)) print('\tsource_data=[') for o in source_data: if o[1] == '': print('\t\t("{0}", ""),'.format(o[0])) continue print('\t\t("{0}", {1}),'.format(o[0], o[1])) print('\t]') conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) attribute_data = [(util.TARGET_CLASS_IDENTIFIER, destination_target_instance.target.root_node.class_name)] for plug in destination_target_instance.inputs: if not plug.input: continue if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() attribute_data.append((plug.name, plug.computed_value)) return attribute_data class OT_InstanceToDataConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.instance_to_data_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): print('Conversion Operator: execute') # Get object by name: bpy.data.objects['Cube'] # Get material by name: bpy.data.materials['MyMaterial'] # node = [o for o in bpy.context.active_object.active_material.node_tree.nodes if o.select][0] print('selected_node', bpy.context.active_object, type(bpy.context.active_object)) # print('\n'.join(dir(bpy.context.active_object))) material_slot: bpy.types.MaterialSlot # https://docs.blender.org/api/current/bpy.types.MaterialSlot.html?highlight=materialslot#bpy.types.MaterialSlot for material_slot in bpy.context.active_object.material_slots: material: bpy.types.Material = material_slot.material if material.node_tree: for node in material.node_tree.nodes: if isinstance(node, bpy.types.ShaderNodeOutputMaterial): for input in node.inputs: if not input.type == 'SHADER': continue if not input.is_linked: continue for link in input.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue instance = link.from_node for render_context in ['MDL', 'USDPreview']: if util.can_convert_instance_to_data(instance=instance, render_context=render_context): util.convert_instance_to_data(instance=instance, render_context=render_context) else: print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context)) else: instance = material for render_context in ['MDL', 'USDPreview']: if util.can_convert_instance_to_data(instance=instance, render_context=render_context): util.convert_instance_to_data(instance=instance, render_context=render_context) else: print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context)) return {'FINISHED'} class OT_DataToInstanceConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.data_to_instance_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ''), ('reflection_roughness_constant', 0.4000000059604645), ('reflectionroughness_texture', ''), ('metallic_constant', 0.0), ('metallic_texture', ''), ('specular_level', 0.5), ('enable_emission', True), ('emissive_color', (0.0, 0.0, 0.0)), ('emissive_color_texture', ''), ('emissive_intensity', 1.0), ('normalmap_texture', ''), ('enable_opacity', True), ('opacity_constant', 1.0), ] if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data): converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data) destination_class = converted_data[0][1] if util.can_create_instance(class_name=destination_class): instance = util.create_instance(class_name=destination_class) print('instance "{0}".'.format(instance)) temp = converted_data[:] while len(temp): item = temp.pop(0) property_name = item[0] property_value = item[1] if util.can_set_instance_attribute(instance=instance, name=property_name): util.set_instance_attribute(instance=instance, name=property_name, value=property_value) else: print('Cannot create instance from "{0}".'.format(source_class)) return {'FINISHED'} class OT_DataToDataConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.data_to_data_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ''), ('reflection_roughness_constant', 0.4000000059604645), ('reflectionroughness_texture', ''), ('metallic_constant', 0.0), ('metallic_texture', ''), ('specular_level', 0.5), ('enable_emission', True), ('emissive_color', (0.0, 0.0, 0.0)), ('emissive_color_texture', ''), ('emissive_intensity', 1.0), ('normalmap_texture', ''), ('enable_opacity', True), ('opacity_constant', 1.0), ] if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data): converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data) print('converted_data:', converted_data) else: print('UMM Failed to convert data. util.can_convert_data_to_data() returned False') return {'FINISHED'} class OT_ApplyDataToInstance(bpy.types.Operator): bl_idname = 'universalmaterialmap.apply_data_to_instance' bl_label = 'Universal Material Map Apply Data To Instance Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} instance = bpy.context.active_object.active_material render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('albedo_add', 0.02), # Adds a constant value to the diffuse color ('albedo_desaturation', 0.19999999), # Desaturates the diffuse color ('ao_texture', ('', 'raw')), ('ao_to_diffuse', 1), # Controls the amount of ambient occlusion multiplied into the diffuse color channel ('bump_factor', 10), # Strength of normal map ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_albedo.png', 'sRGB')), ('diffuse_tint', (0.96202534, 0.8118357, 0.8118357)), # When enabled, this color value is multiplied over the final albedo color ('enable_emission', 0), ('enable_ORM_texture', 1), ('metallic_constant', 1), ('metallic_texture', ('', 'raw')), ('metallic_texture_influence', 1), ('normalmap_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_normal.png', 'raw')), ('ORM_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_orm.png', 'raw')), ('reflection_roughness_constant', 1), # Higher roughness values lead to more blurry reflections ('reflection_roughness_texture_influence', 1), # Blends between the constant value and the lookup of the roughness texture ('reflectionroughness_texture', ('', 'raw')), ('texture_rotate', 45), ('texture_scale', (2, 2)), ('texture_translate', (0.1, 0.9)), ] if util.can_apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance): util.apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance) else: print('UMM Failed to convert data. util.can_convert_data_to_data() returned False') return {'FINISHED'} class OT_CreateTemplateOmniPBR(bpy.types.Operator): bl_idname = 'universalmaterialmap.create_template_omnipbr' bl_label = 'Convert to OmniPBR Graph' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} create_template(source_class='OmniPBR', material=bpy.context.active_object.active_material) return {'FINISHED'} class OT_CreateTemplateOmniGlass(bpy.types.Operator): bl_idname = 'universalmaterialmap.create_template_omniglass' bl_label = 'Convert to OmniGlass Graph' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} create_template(source_class='OmniGlass', material=bpy.context.active_object.active_material) return {'FINISHED'} class OT_DescribeShaderGraph(bpy.types.Operator): bl_idname = 'universalmaterialmap.describe_shader_graph' bl_label = 'Universal Material Map Describe Shader Graph Operator' bl_description = 'Universal Material Map' @staticmethod def describe_node(node) -> dict: node_definition = dict() node_definition['name'] = node.name node_definition['label'] = node.label node_definition['location'] = [node.location[0], node.location[1]] node_definition['width'] = node.width node_definition['height'] = node.height node_definition['parent'] = node.parent.name if node.parent else None node_definition['class'] = type(node).__name__ node_definition['inputs'] = [] node_definition['outputs'] = [] node_definition['nodes'] = [] node_definition['links'] = [] node_definition['properties'] = [] node_definition['texts'] = [] if node_definition['class'] == 'NodeFrame': node_definition['properties'].append( { 'name': 'use_custom_color', 'value': node.use_custom_color, } ) node_definition['properties'].append( { 'name': 'color', 'value': [node.color[0], node.color[1], node.color[2]], } ) node_definition['properties'].append( { 'name': 'shrink', 'value': node.shrink, } ) if node.text is not None: text_definition = dict() text_definition['name'] = node.text.name text_definition['contents'] = node.text.as_string() node_definition['texts'].append(text_definition) elif node_definition['class'] == 'ShaderNodeRGB': for index, output in enumerate(node.outputs): definition = dict() definition['index'] = index definition['name'] = output.name definition['class'] = type(output).__name__ if definition['class'] == 'NodeSocketColor': default_value = output.default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['outputs'].append(definition) elif node_definition['class'] == 'ShaderNodeMixRGB': node_definition['properties'].append( { 'name': 'blend_type', 'value': node.blend_type, } ) node_definition['properties'].append( { 'name': 'use_clamp', 'value': node.use_clamp, } ) for index, input in enumerate(node.inputs): definition = dict() definition['index'] = index definition['name'] = input.name definition['class'] = type(input).__name__ if definition['class'] == 'NodeSocketFloatFactor': definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketColor': default_value = node.inputs[input.name].default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['inputs'].append(definition) elif node_definition['class'] == 'ShaderNodeGroup': for index, input in enumerate(node.inputs): definition = dict() definition['index'] = index definition['name'] = input.name definition['class'] = type(input).__name__ if definition['class'] == 'NodeSocketFloatFactor': definition['min_value'] = node.node_tree.inputs[input.name].min_value definition['max_value'] = node.node_tree.inputs[input.name].max_value definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketIntFactor': definition['min_value'] = node.node_tree.inputs[input.name].min_value definition['max_value'] = node.node_tree.inputs[input.name].max_value definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketColor': default_value = node.inputs[input.name].default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['inputs'].append(definition) for index, output in enumerate(node.outputs): definition = dict() definition['index'] = index definition['name'] = output.name definition['class'] = type(output).__name__ node_definition['outputs'].append(definition) for child in node.node_tree.nodes: node_definition['nodes'].append(OT_DescribeShaderGraph.describe_node(child)) for link in node.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue link_definition = dict() link_definition['from_node'] = link.from_node.name link_definition['from_socket'] = link.from_socket.name link_definition['to_node'] = link.to_node.name link_definition['to_socket'] = link.to_socket.name node_definition['links'].append(link_definition) elif node_definition['class'] == 'ShaderNodeUVMap': pass elif node_definition['class'] == 'ShaderNodeTexImage': pass elif node_definition['class'] == 'ShaderNodeOutputMaterial': pass elif node_definition['class'] == 'ShaderNodeBsdfPrincipled': pass elif node_definition['class'] == 'ShaderNodeMapping': pass elif node_definition['class'] == 'ShaderNodeNormalMap': pass elif node_definition['class'] == 'ShaderNodeHueSaturation': pass elif node_definition['class'] == 'ShaderNodeSeparateRGB': pass elif node_definition['class'] == 'NodeGroupInput': pass elif node_definition['class'] == 'NodeGroupOutput': pass elif node_definition['class'] == 'ShaderNodeMath': node_definition['properties'].append( { 'name': 'operation', 'value': node.operation, } ) node_definition['properties'].append( { 'name': 'use_clamp', 'value': node.use_clamp, } ) elif node_definition['class'] == 'ShaderNodeVectorMath': node_definition['properties'].append( { 'name': 'operation', 'value': node.operation, } ) else: raise NotImplementedError(node_definition['class']) return node_definition def execute(self, context): material = bpy.context.active_object.active_material output = dict() output['name'] = 'Principled Omni Glass' output['nodes'] = [] output['links'] = [] for node in material.node_tree.nodes: output['nodes'].append(OT_DescribeShaderGraph.describe_node(node)) for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue link_definition = dict() link_definition['from_node'] = link.from_node.name link_definition['from_socket'] = link.from_socket.name link_definition['to_node'] = link.to_node.name link_definition['to_socket'] = link.to_socket.name output['links'].append(link_definition) print(json.dumps(output, indent=4)) return {'FINISHED'} def initialize(): if getattr(sys.modules[__name__], '__initialized'): return setattr(sys.modules[__name__], '__initialized', True) util.register(converter=DataConverter()) util.register(converter=ObjectConverter()) print('Universal Material Map: Registered Converter classes.') initialize()
67,817
Python
49.724009
263
0.552177
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/material.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import traceback import bpy from ..core.converter import util def apply_data_to_instance(instance_name: str, source_class: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> dict: ## bugfix: Extract class correctly from exporters that name the class like a Python function call. real_source_class = source_class.partition("(")[0] try: for material in bpy.data.materials: if not isinstance(material, bpy.types.Material): continue if material.name == instance_name: if util.can_apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material): return util.apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material) print(f'Omniverse UMM: Unable to apply data at import for material "{instance_name}". This is not an error - just means that conversion data does not support the material.') result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there is no Conversion Graph for that scenario. No changes were applied to "{2}".'.format(real_source_class, render_context, instance_name) return result except Exception as error: print('Warning: Universal Material Map: function "apply_data_to_instance": Unexpected error:') print('\targument "instance_name" = "{0}"'.format(instance_name)) print('\targument "source_class" = "{0}"'.format(real_source_class)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\targument "source_data" = "{0}"'.format(source_data)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there was an unexpected error. Some changes may have been applied to "{2}". Details: {3}'.format(real_source_class, render_context, instance_name, error) return result def convert_instance_to_data(instance_name: str, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: try: for material in bpy.data.materials: if not isinstance(material, bpy.types.Material): continue if material.name == instance_name: if util.can_convert_instance_to_data(instance=material, render_context=render_context): return util.convert_instance_to_data(instance=material, render_context=render_context) result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context) return result except Exception as error: print('Warning: Universal Material Map: function "convert_instance_to_data": Unexpected error:') print('\targument "instance_name" = "{0}"'.format(instance_name)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert material "{0}" for render context "{1}" there was an unexpected error. Details: {2}'.format(instance_name, render_context, error) return result result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context) return result
5,004
Python
57.197674
246
0.670464
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import os import re import sys import json import bpy from ..core.data import Library from ..core.feature import POLLING from ..core.service import store from ..core.service import delegate from ..core.util import get_extension_from_image_file_format LIBRARY_ID = '195c69e1-7765-4a16-bb3a-ecaa222876d9' __initialized = False developer_mode: bool = False CORE_MATERIAL_PROPERTIES = [ ('diffuse_color', 'RGBA'), ('metallic', 'VALUE'), ('specular_color', 'STRING'), ('roughness', 'VALUE'), ('use_backface_culling', 'BOOLEAN'), ('blend_method', 'STRING'), ('shadow_method', 'STRING'), ('alpha_threshold', 'VALUE'), ('use_screen_refraction', 'BOOLEAN'), ('refraction_depth', 'VALUE'), ('use_sss_translucency', 'BOOLEAN'), ('pass_index', 'INT'), ] def show_message(message: str = '', title: str = 'Message Box', icon: str = 'INFO'): try: def draw(self, context): self.layout.label(text=message) bpy.context.window_manager.popup_menu(draw, title=title, icon=icon) except: print('{0}\n{1}'.format(title, message)) def initialize(): if getattr(sys.modules[__name__], '__initialized'): return setattr(sys.modules[__name__], '__initialized', True) directory = os.path.expanduser('~').replace('\\', '/') if not directory.endswith('/Documents'): directory = '{0}/Documents'.format(directory) directory = '{0}/Omniverse/Blender/UMMLibrary'.format(directory) library = Library.Create( library_id=LIBRARY_ID, name='Blender', manifest=delegate.FilesystemManifest(root_directory='{0}'.format(directory)), conversion_graph=delegate.Filesystem(root_directory='{0}/ConversionGraph'.format(directory)), target=delegate.Filesystem(root_directory='{0}/Target'.format(directory)), ) store.register_library(library=library) from ..blender import converter converter.initialize() from ..blender import generator generator.initialize() if POLLING: # TODO: On application exit > un_initialize() pass def un_initialize(): if POLLING: store.on_shutdown() def get_library(): """ :return: omni.universalmaterialmap.core.data.Library """ initialize() return store.get_library(library_id=LIBRARY_ID) def __get_value_impl(socket: bpy.types.NodeSocketStandard, depth=0, max_depth=100) -> typing.Any: # Local utility function which returns a file extension # corresponding to the given image file format string. # This mimics similar logic used in the Blender USD IO # C++ implementation. debug = False if debug: print('__get_value_impl: depth={0}'.format(depth)) if depth > max_depth: if debug: print('\t reached max_depth ({0}). terminating recursion'.format(max_depth)) return None if debug: print('\tsocket.is_linked'.format(socket.is_linked)) if socket.is_linked: for link in socket.links: if not isinstance(link, bpy.types.NodeLink): if debug: print('\t\tlink is not bpy.types.NodeLink: {0}'.format(type(link))) continue if not link.is_valid: if debug: print('\t\tlink is not valid') continue instance = link.from_node if debug: print('\t\tlink.from_node: {0}'.format(type(instance))) if isinstance(instance, bpy.types.ShaderNodeTexImage): print(f'UMM: image.filepath: "{instance.image.filepath}"') print(f'UMM: image.source: "{instance.image.source}"') print(f'UMM: image.file_format: "{instance.image.file_format}"') if debug: print('\t\tinstance.image: {0}'.format(instance.image)) if instance.image: print('\t\tinstance.image.source: {0}'.format(instance.image.source)) if instance.image and (instance.image.source == 'FILE' or instance.image.source == 'TILED'): value = instance.image.filepath if (instance.image.source == 'TILED'): # Find all numbers in the path. numbers = re.findall('[0-9]+', value) if (len(numbers) > 0): # Get the string representation of the last number. num_str = str(numbers[-1]) # Replace the number substring with '<UDIM>'. split_items = value.rsplit(num_str, 1) if (len(split_items)==2): value = split_items[0] + '<UDIM>' + split_items[1] if debug: print('\t\tinstance.image.filepath: {0}'.format(value)) try: if value and instance.image.packed_file: # The image is packed, so ignore the filepath, which is likely # invalid, and return just the base name. value = bpy.path.basename(value) # Make sure the file has a valid extension for # the expected format. file_format = instance.image.file_format file_format = get_extension_from_image_file_format(file_format, base_name=value) value = bpy.path.ensure_ext(value, '.' + file_format) print(f'UMM: packed image data: "{[value, instance.image.colorspace_settings.name]}"') return [value, instance.image.colorspace_settings.name] if value is None or value == '': file_format = instance.image.file_format file_format = get_extension_from_image_file_format(file_format) value = f'{instance.image.name}.{file_format}' if debug: print(f'\t\tvalue: {value}') print(f'UMM: image data: "{[value, instance.image.colorspace_settings.name]}"') return [value, instance.image.colorspace_settings.name] return [os.path.abspath(bpy.path.abspath(value)), instance.image.colorspace_settings.name] except Exception as error: print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(instance.image.filepath, error)) return None if isinstance(instance, bpy.types.ShaderNodeNormalMap): for o in instance.inputs: if o.name == 'Color': value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth) if value: return value for o in instance.inputs: value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth) if debug: print('\t\tre-entrant: input="{0}", value="{1}"'.format(o.name, value)) if value: return value return None def get_value(socket: bpy.types.NodeSocketStandard) -> typing.Any: debug = False value = __get_value_impl(socket=socket) if debug: print('get_value', value, socket.default_value) return socket.default_value if not value else value def _create_node_from_template(node_tree: bpy.types.NodeTree, node_definition: dict, parent: object = None) -> object: node = node_tree.nodes.new(node_definition['class']) if parent: node.parent = parent node.name = node_definition['name'] node.label = node_definition['label'] node.location = node_definition['location'] if node_definition['class'] == 'NodeFrame': node.width = node_definition['width'] node.height = node_definition['height'] for o in node_definition['properties']: setattr(node, o['name'], o['value']) if node_definition['class'] == 'NodeFrame': for text_definition in node_definition['texts']: existing = None for o in bpy.data.texts: if o.name == text_definition['name']: existing = o break if existing is None: existing = bpy.data.texts.new(text_definition['name']) existing.write(text_definition['contents']) node.text = existing node.location = node_definition['location'] elif node_definition['class'] == 'ShaderNodeGroup': node.node_tree = bpy.data.node_groups.new('node tree', 'ShaderNodeTree') child_cache = dict() for child_definition in node_definition['nodes']: child_cache[child_definition['name']] = _create_node_from_template(node_tree=node.node_tree, node_definition=child_definition) for input_definition in node_definition['inputs']: node.node_tree.inputs.new(input_definition['class'], input_definition['name']) if input_definition['class'] == 'NodeSocketFloatFactor': node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value'] node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value'] node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketIntFactor': node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value'] node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value'] node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketColor': node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] for output_definition in node_definition['outputs']: node.node_tree.outputs.new(output_definition['class'], output_definition['name']) for link_definition in node_definition['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] node.node_tree.links.new(from_socket, to_socket) node.width = node_definition['width'] node.height = node_definition['height'] node.location = node_definition['location'] elif node_definition['class'] == 'ShaderNodeMixRGB': for input_definition in node_definition['inputs']: if input_definition['class'] == 'NodeSocketFloatFactor': node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketColor': node.inputs[input_definition['name']].default_value = input_definition['default_value'] elif node_definition['class'] == 'ShaderNodeRGB': for output_definition in node_definition['outputs']: if output_definition['class'] == 'NodeSocketColor': node.outputs[output_definition['name']].default_value = output_definition['default_value'] return node def create_template(source_class: str, material: bpy.types.Material) -> None: template_filepath = '{}'.format(__file__).replace('\\', '/') template_filepath = template_filepath[:template_filepath.rfind('/')] template_filepath = '{}/template/{}.json'.format(template_filepath, source_class.lower()) if not os.path.exists(template_filepath): return with open(template_filepath, 'r') as template_file: template = json.load(template_file) # Make sure we're using nodes. material.use_nodes = True # Remove existing nodes - we're starting from scratch. to_delete = [o for o in material.node_tree.nodes] while len(to_delete): material.node_tree.nodes.remove(to_delete.pop()) # Create nodes according to template. child_cache = dict() for node_definition in template['nodes']: if node_definition['parent'] is None: node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition) child_cache[node_definition['name']] = node for node_definition in template['nodes']: if node_definition['parent'] is not None: parent = child_cache[node_definition['parent']] node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent) child_cache[node_definition['name']] = node for link_definition in template['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] material.node_tree.links.new(from_socket, to_socket) def create_from_template(material: bpy.types.Material, template: dict) -> None: # Make sure we're using nodes. material.use_nodes = True # Create nodes according to template. child_cache = dict() for node_definition in template['nodes']: if node_definition['parent'] is None: node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition) child_cache[node_definition['name']] = node for node_definition in template['nodes']: if node_definition['parent'] is not None: parent = child_cache[node_definition['parent']] node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent) child_cache[node_definition['name']] = node for link_definition in template['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] material.node_tree.links.new(from_socket, to_socket) def get_parent_material(shader_node: object) -> bpy.types.Material: for material in bpy.data.materials: if shader_node == material: return material if not material.use_nodes: continue if not material.node_tree or not material.node_tree.nodes: continue for node in material.node_tree.nodes: if shader_node == node: return material return None def get_template_data_by_shader_node(shader_node: object) -> typing.Tuple[typing.Dict, typing.Dict, str, bpy.types.Material]: material: bpy.types.Material = get_parent_material(shader_node=shader_node) if material and material.use_nodes and material.node_tree and material.node_tree.nodes: template_directory = '{}'.format(__file__).replace('\\', '/') template_directory = template_directory[:template_directory.rfind('/')] template_directory = f'{template_directory}/template' for item in os.listdir(template_directory): if item.lower().endswith('_map.json'): continue if not item.lower().endswith('.json'): continue template_filepath = f'{template_directory}/{item}' with open(template_filepath, 'r') as template_file: template = json.load(template_file) material_has_all_template_nodes = True for node_definition in template['nodes']: found_node = False for node in material.node_tree.nodes: if node.name == node_definition['name']: found_node = True break if not found_node: material_has_all_template_nodes = False break if not material_has_all_template_nodes: continue template_has_all_material_nodes = True for node in material.node_tree.nodes: found_template = False for node_definition in template['nodes']: if node.name == node_definition['name']: found_template = True break if not found_template: template_has_all_material_nodes = False break if not template_has_all_material_nodes: continue template_shader_name = template['name'] map_filename = '{}_map.json'.format(item[:item.rfind('.')]) template_map_filepath = f'{template_directory}/{map_filename}' with open(template_map_filepath, 'r') as template_map_file: template_map = json.load(template_map_file) return template, template_map, template_shader_name, material return None, None, None, None def get_template_data_by_class_name(class_name: str) -> typing.Tuple[typing.Dict, typing.Dict]: template_directory = '{}'.format(__file__).replace('\\', '/') template_directory = template_directory[:template_directory.rfind('/')] template_directory = f'{template_directory}/template' for item in os.listdir(template_directory): if item.lower().endswith('_map.json'): continue if not item.lower().endswith('.json'): continue template_filepath = f'{template_directory}/{item}' with open(template_filepath, 'r') as template_file: template = json.load(template_file) if not template['name'] == class_name: continue map_filename = '{}_map.json'.format(item[:item.rfind('.')]) template_map_filepath = f'{template_directory}/{map_filename}' with open(template_map_filepath, 'r') as template_map_file: template_map = json.load(template_map_file) return template, template_map return None, None
19,919
Python
43.663677
172
0.599377
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/menu.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from . import developer_mode class UniversalMaterialMapMenu(bpy.types.Menu): bl_label = "Omniverse" bl_idname = "OBJECT_MT_umm_node_menu" def draw(self, context): layout = self.layout layout.operator('universalmaterialmap.create_template_omnipbr', text='Replace with OmniPBR graph template') layout.operator('universalmaterialmap.create_template_omniglass', text='Replace with OmniGlass graph template') if developer_mode: layout.operator('universalmaterialmap.generator', text='DEV: Generate Targets') layout.operator('universalmaterialmap.instance_to_data_converter', text='DEV: Convert Instance to Data') layout.operator('universalmaterialmap.data_to_instance_converter', text='DEV: Convert Data to Instance') layout.operator('universalmaterialmap.data_to_data_converter', text='DEV: Convert Data to Data') layout.operator('universalmaterialmap.apply_data_to_instance', text='DEV: Apply Data to Instance') layout.operator('universalmaterialmap.describe_shader_graph', text='DEV: Describe Shader Graph')
1,999
Python
45.511627
119
0.724362
NVIDIA-Omniverse/kit-app-template/repo.toml
######################################################################################################################## # Repo tool base settings ######################################################################################################################## [repo] # Use the Kit Template repo configuration as a base. Only override things specific to the repo. import_configs = [ "${root}/_repo/deps/repo_kit_tools/kit-template/repo.toml", "${root}/_repo/deps/repo_kit_tools/kit-template/repo-external-app.toml", ] # Repository Name name = "kit-app-template" ######################################################################################################################## # Extensions precacher ######################################################################################################################## [repo_precache_exts] # Apps to run and precache apps = [ "${root}/source/apps/omni.usd_explorer.kit", "${root}/source/apps/my_name.my_app.kit", ] registries = [ { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, ]
1,265
TOML
37.363635
136
0.422925
NVIDIA-Omniverse/kit-app-template/README.md
# Omniverse Kit App Template [Omniverse Kit App Template](https://github.com/NVIDIA-Omniverse/kit-app-template) - is the place to start learning about developing Omniverse Apps. This project contains everything necessary to develop and package an Omniverse App. ## Links * Recommended: [Tutorial](https://docs.omniverse.nvidia.com/kit/docs/kit-app-template) for getting started with application development. * [Developer Guide](https://docs.omniverse.nvidia.com/dev-guide/latest/index.html). ## Build 1. Clone [this repo](https://github.com/NVIDIA-Omniverse/kit-app-template) to your local machine. 2. Open a command prompt and navigate to the root of your cloned repo. 3. Run `build.bat` to bootstrap your dev environment and build an example app. 4. Run `_build\windows-x86_64\release\my_name.my_app.bat` (or other apps) to open an example kit application. You should have now launched your simple kit-based application! ## Contributing The source code for this repository is provided as-is and we are not accepting outside contributions.
1,048
Markdown
44.608694
148
0.781489
NVIDIA-Omniverse/kit-app-template/tools/VERSION.md
2023.2.1
9
Markdown
3.999998
8
0.666667
NVIDIA-Omniverse/kit-app-template/tools/deps/repo-deps.packman.xml
<project toolsVersion="5.0"> <dependency name="repo_man" linkPath="../../_repo/deps/repo_man"> <package name="repo_man" version="1.50.6"/> </dependency> <dependency name="repo_build" linkPath="../../_repo/deps/repo_build"> <package name="repo_build" version="0.60.1"/> </dependency> <dependency name="repo_ci" linkPath="../../_repo/deps/repo_ci"> <package name="repo_ci" version="0.6.0" /> </dependency> <dependency name="repo_changelog" linkPath="../../_repo/deps/repo_changelog"> <package name="repo_changelog" version="0.3.13"/> </dependency> <dependency name="repo_docs" linkPath="../../_repo/deps/repo_docs"> <package name="repo_docs" version="0.39.2"/> </dependency> <dependency name="repo_kit_tools" linkPath="../../_repo/deps/repo_kit_tools"> <package name="repo_kit_tools" version="0.14.17"/> </dependency> <dependency name="repo_test" linkPath="../_repo/deps/repo_test"> <package name="repo_test" version="2.16.1" /> </dependency> <dependency name="repo_source" linkPath="../../_repo/deps/repo_source"> <package name="repo_source" version="0.4.3" /> </dependency> <dependency name="repo_package" linkPath="../../_repo/deps/repo_package"> <package name="repo_package" version="5.9.3" /> </dependency> <dependency name="repo_format" linkPath="../../_repo/deps/repo_format"> <package name="repo_format" version="2.8.0" /> </dependency> <dependency name="repo_kit_template" linkPath="../../_repo/deps/repo_kit_template"> <package name="repo_kit_template" version="0.1.9" /> </dependency> </project>
1,593
XML
43.277777
85
0.648462
NVIDIA-Omniverse/kit-app-template/tools/deps/kit-sdk.packman.xml
<project toolsVersion="5.0"> <dependency name="kit_sdk_${config}" linkPath="../../_build/${platform}/${config}/kit" tags="${config} non-redist"> <package name="kit-kernel" version="105.1.2+release.134727.de96b556.tc.${platform}.${config}"/> </dependency> </project>
274
XML
44.833326
117
0.664234
NVIDIA-Omniverse/kit-app-template/tools/deps/user.toml
[exts."omni.kit.registry.nucleus"] registries = [ { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, ]
296
TOML
48.499992
136
0.675676
NVIDIA-Omniverse/kit-app-template/tools/deps/kit-sdk-deps.packman.xml
<project toolsVersion="5.0"> <!-- Only edit this file to pull kit depedencies. --> <!-- Put all extension-specific dependencies in `ext-deps.packman.xml`. --> <!-- This file contains shared Kit SDK dependencies used by most kit extensions. --> <!-- Import Kit SDK all-deps xml file to steal some deps from it: --> <import path="../../_build/${platform}/${config}/kit/dev/all-deps.packman.xml"> <filter include="pybind11" /> <filter include="fmt" /> <filter include="python" /> <filter include="carb_sdk_plugins" /> <filter include="winsdk" /> </import> <!-- Pull those deps of the same version as in Kit SDK. Override linkPath to point correctly, other properties can also be override, including version. --> <dependency name="carb_sdk_plugins" linkPath="../../_build/target-deps/carb_sdk_plugins" tags="non-redist" /> <dependency name="pybind11" linkPath="../../_build/target-deps/pybind11" /> <dependency name="fmt" linkPath="../../_build/target-deps/fmt" /> <dependency name="python" linkPath="../../_build/target-deps/python" /> <!-- Import host deps from Kit SDK to keep in sync --> <import path="../../_build/${platform}/${config}/kit/dev/deps/host-deps.packman.xml"> <filter include="premake" /> <filter include="msvc" /> <filter include="linbuild" /> </import> <dependency name="premake" linkPath="../../_build/host-deps/premake" /> <dependency name="msvc" linkPath="../../_build/host-deps/msvc" /> <dependency name="winsdk" linkPath="../../_build/host-deps/winsdk" /> <dependency name="linbuild" linkPath="../../_build/host-deps/linbuild" tags="non-redist"/> </project>
1,651
XML
49.060605
157
0.660206
NVIDIA-Omniverse/kit-app-template/tools/repoman/repoman.py
import os import sys import io import contextlib import packmanapi REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..") REPO_DEPS_FILE = os.path.join(REPO_ROOT, "tools/deps/repo-deps.packman.xml") def bootstrap(): """ Bootstrap all omni.repo modules. Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing. """ #with contextlib.redirect_stdout(io.StringIO()): deps = packmanapi.pull(REPO_DEPS_FILE) for dep_path in deps.values(): if dep_path not in sys.path: sys.path.append(dep_path) if __name__ == "__main__": bootstrap() import omni.repo.man omni.repo.man.main(REPO_ROOT)
709
Python
23.482758
100
0.662905
NVIDIA-Omniverse/kit-app-template/tools/packman/packmanconf.py
# Use this file to bootstrap packman into your Python environment (3.7.x). Simply # add the path by doing sys.insert to where packmanconf.py is located and then execute: # # >>> import packmanconf # >>> packmanconf.init() # # It will use the configured remote(s) and the version of packman in the same folder, # giving you full access to the packman API via the following module # # >> import packmanapi # >> dir(packmanapi) import os import platform import sys def init(): """Call this function to initialize the packman configuration. Calls to the packman API will work after successfully calling this function. Note: This function only needs to be called once during the execution of your program. Calling it repeatedly is harmless but wasteful. Compatibility with your Python interpreter is checked and upon failure the function will report what is required. Example: >>> import packmanconf >>> packmanconf.init() >>> import packmanapi >>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH) """ major = sys.version_info[0] minor = sys.version_info[1] if major != 3 or minor != 10: raise RuntimeError( f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided" ) conf_dir = os.path.dirname(os.path.abspath(__file__)) os.environ["PM_INSTALL_PATH"] = conf_dir packages_root = get_packages_root(conf_dir) version = get_version(conf_dir) module_dir = get_module_dir(conf_dir, packages_root, version) sys.path.insert(1, module_dir) def get_packages_root(conf_dir: str) -> str: root = os.getenv("PM_PACKAGES_ROOT") if not root: platform_name = platform.system() if platform_name == "Windows": drive, _ = os.path.splitdrive(conf_dir) root = os.path.join(drive, "packman-repo") elif platform_name == "Darwin": # macOS root = os.path.join( os.path.expanduser("~"), "Library/Application Support/packman-cache" ) elif platform_name == "Linux": try: cache_root = os.environ["XDG_HOME_CACHE"] except KeyError: cache_root = os.path.join(os.path.expanduser("~"), ".cache") return os.path.join(cache_root, "packman") else: raise RuntimeError(f"Unsupported platform '{platform_name}'") # make sure the path exists: os.makedirs(root, exist_ok=True) return root def get_module_dir(conf_dir, packages_root: str, version: str) -> str: module_dir = os.path.join(packages_root, "packman-common", version) if not os.path.exists(module_dir): import tempfile tf = tempfile.NamedTemporaryFile(delete=False) target_name = tf.name tf.close() url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip" print(f"Downloading '{url}' ...") import urllib.request urllib.request.urlretrieve(url, target_name) from importlib.machinery import SourceFileLoader # import module from path provided script_path = os.path.join(conf_dir, "bootstrap", "install_package.py") ip = SourceFileLoader("install_package", script_path).load_module() print("Unpacking ...") ip.install_package(target_name, module_dir) os.unlink(tf.name) return module_dir def get_version(conf_dir: str): path = os.path.join(conf_dir, "packman") if not os.path.exists(path): # in dev repo fallback path += ".sh" with open(path, "rt", encoding="utf8") as launch_file: for line in launch_file.readlines(): if line.startswith("PM_PACKMAN_VERSION"): _, value = line.split("=") return value.strip() raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
3,931
Python
35.407407
95
0.632663
NVIDIA-Omniverse/kit-app-template/tools/packman/config.packman.xml
<config remotes="cloudfront"> <remote2 name="cloudfront"> <transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" /> </remote2> </config>
211
XML
34.333328
123
0.691943
NVIDIA-Omniverse/kit-app-template/tools/packman/bootstrap/install_package.py
# Copyright 2019 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import zipfile import tempfile import sys import os import stat import time from typing import Any, Callable RENAME_RETRY_COUNT = 100 RENAME_RETRY_DELAY = 0.1 logging.basicConfig(level=logging.WARNING, format="%(message)s") logger = logging.getLogger("install_package") def remove_directory_item(path): if os.path.islink(path) or os.path.isfile(path): try: os.remove(path) except PermissionError: # make sure we have access and try again: os.chmod(path, stat.S_IRWXU) os.remove(path) else: # try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction! clean_out_folder = False try: # make sure we have access preemptively - this is necessary because recursing into a directory without permissions # will only lead to heart ache os.chmod(path, stat.S_IRWXU) os.rmdir(path) except OSError: clean_out_folder = True if clean_out_folder: # we should make sure the directory is empty names = os.listdir(path) for name in names: fullname = os.path.join(path, name) remove_directory_item(fullname) # now try to again get rid of the folder - and not catch if it raises: os.rmdir(path) class StagingDirectory: def __init__(self, staging_path): self.staging_path = staging_path self.temp_folder_path = None os.makedirs(staging_path, exist_ok=True) def __enter__(self): self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path) return self def get_temp_folder_path(self): return self.temp_folder_path # this function renames the temp staging folder to folder_name, it is required that the parent path exists! def promote_and_rename(self, folder_name): abs_dst_folder_name = os.path.join(self.staging_path, folder_name) os.rename(self.temp_folder_path, abs_dst_folder_name) def __exit__(self, type, value, traceback): # Remove temp staging folder if it's still there (something went wrong): path = self.temp_folder_path if os.path.isdir(path): remove_directory_item(path) def rename_folder(staging_dir: StagingDirectory, folder_name: str): try: staging_dir.promote_and_rename(folder_name) except OSError as exc: # if we failed to rename because the folder now exists we can assume that another packman process # has managed to update the package before us - in all other cases we re-raise the exception abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name) if os.path.exists(abs_dst_folder_name): logger.warning( f"Directory {abs_dst_folder_name} already present, package installation already completed" ) else: raise def call_with_retry( op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20 ) -> Any: retries_left = retry_count while True: try: return func() except (OSError, IOError) as exc: logger.warning(f"Failure while executing {op_name} [{str(exc)}]") if retries_left: retry_str = "retry" if retries_left == 1 else "retries" logger.warning( f"Retrying after {retry_delay} seconds" f" ({retries_left} {retry_str} left) ..." ) time.sleep(retry_delay) else: logger.error("Maximum retries exceeded, giving up") raise retries_left -= 1 def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name): dst_path = os.path.join(staging_dir.staging_path, folder_name) call_with_retry( f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}", lambda: rename_folder(staging_dir, folder_name), RENAME_RETRY_COUNT, RENAME_RETRY_DELAY, ) def install_package(package_path, install_path): staging_path, version = os.path.split(install_path) with StagingDirectory(staging_path) as staging_dir: output_folder = staging_dir.get_temp_folder_path() with zipfile.ZipFile(package_path, allowZip64=True) as zip_file: zip_file.extractall(output_folder) # attempt the rename operation rename_folder_with_retry(staging_dir, version) print(f"Package successfully installed to {install_path}") if __name__ == "__main__": executable_paths = os.getenv("PATH") paths_list = executable_paths.split(os.path.pathsep) if executable_paths else [] target_path_np = os.path.normpath(sys.argv[2]) target_path_np_nc = os.path.normcase(target_path_np) for exec_path in paths_list: if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc: raise RuntimeError(f"packman will not install to executable path '{exec_path}'") install_package(sys.argv[1], target_path_np)
5,776
Python
36.270968
145
0.645083
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "1.0.32" # The title and description fields are primarily for displaying extension info in UI title = "Setup Extension for USD Explorer" description = "an extensions that Setup my App" # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "https://gitlab-master.nvidia.com/omniverse/usd_explorer" # One of categories for UI. category = "setup" # Keywords for the extension keywords = ["kit", "app", "setup"] # Icon to show in the extension manager icon = "data/icon.png" # Preview to show in the extension manager preview_image = "data/preview.png" # Use omni.ui to build simple UI [dependencies] "omni.kit.quicklayout" = {} "omni.kit.window.title" = {} "omni.kit.browser.asset" = {} "omni.kit.window.console" = {} "omni.kit.window.content_browser" = {} "omni.kit.window.material" = {} "omni.kit.window.toolbar" = {version = "1.5.4", exact = true} "omni.kit.property.bundle" = {} "omni.kit.property.layer" = {} "omni.kit.viewport.navigation.usd_explorer.bundle" = {} "omni.kit.window.preferences" = {} # from omni.view.app.setup "omni.kit.viewport.menubar.camera" = { optional=true } "omni.kit.widget.layers" = { optional=true } "omni.kit.widgets.custom" = {} "omni.kit.window.file" = {} # Main python module this extension provides, it will be publicly available as "import omni.hello.world". [[python.module]] name = "omni.usd_explorer.setup" [settings] app.layout.name = "viewport_only" app.application_mode = "review" exts."omni.kit.viewport.menubar.camera".expand = true # Expand the extra-camera settings by default exts."omni.kit.window.file".useNewFilePicker = true exts."omni.kit.tool.asset_importer".useNewFilePicker = true exts."omni.kit.tool.collect".useNewFilePicker = true exts."omni.kit.widget.layers".useNewFilePicker = true exts."omni.kit.renderer.core".imgui.enableMips = true exts."omni.kit.browser.material".enabled = false exts."omni.kit.window.material".load_after_startup = true exts."omni.kit.widget.cloud_share".require_access_code = false exts."omni.kit.mesh.raycast".bvhBuildOnFirstRequired = true # Avoids mesh raycast to initialize during stage open app.content.emptyStageOnStart = true app.viewport.createCameraModelRep = false # Disable creation of camera meshes in USD # USDRT app.usdrt.scene_delegate.enableProxyCubes = false app.usdrt.scene_delegate.geometryStreaming.enabled = true app.usdrt.scene_delegate.numFramesBetweenLoadBatches = 2 app.usdrt.scene_delegate.geometryStreaming.numberOfVerticesToLoadPerChunk = 600000 exts."omni.kit.viewport.navigation.camera_manipulator".defaultOperation = "" [[test]] dependencies = [ "omni.kit.core.tests", "omni.kit.ui_test", "omni.kit.mainwindow", "omni.kit.viewport.window", "omni.kit.viewport.utility", ] args = [ "--/app/file/ignoreUnsavedOnExit=true", # "--/renderer/enabled=pxr", # "--/renderer/active=pxr", "--/app/window/width=1280", "--/app/window/height=720", "--/app/window/dpiScaleOverride=1.0", "--/app/window/scaleToMonitor=false", "--/exts/omni.kit.viewport.window/startup/windowName=Viewport", "--reset-user", "--no-window", "--/app/fastShutdown=1" ]
3,294
TOML
33.322916
113
0.728597
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menubar_helper.py
from pathlib import Path import carb import carb.settings import carb.tokens import omni.ui as ui from omni.ui import color as cl ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons") VIEW_MENUBAR_STYLE = { "MenuBar.Window": {"background_color": 0xA0000000}, "MenuBar.Item.Background": { "background_color": 0, }, "Menu.Item.Background": { "background_color": 0, } } VIEWPORT_CAMERA_STYLE = { "Menu.Item.Icon::Expand": {"image_url": f"{ICON_PATH}/caret_s2_right_dark.svg", "color": cl.viewport_menubar_light}, "Menu.Item.Icon::Expand:checked": {"image_url": f"{ICON_PATH}/caret_s2_left_dark.svg"}, } class MenubarHelper: def __init__(self) -> None: self._settings = carb.settings.get_settings() # Set menubar background and style try: from omni.kit.viewport.menubar.core import DEFAULT_MENUBAR_NAME from omni.kit.viewport.menubar.core import get_instance as get_menubar_instance instance = get_menubar_instance() if not instance: # pragma: no cover return default_menubar = instance.get_menubar(DEFAULT_MENUBAR_NAME) default_menubar.background_visible = True default_menubar.style.update(VIEW_MENUBAR_STYLE) default_menubar.show_separator = True except ImportError: # pragma: no cover carb.log_warn("Viewport menubar not found!") try: import omni.kit.viewport.menubar.camera self._camera_menubar_instance = omni.kit.viewport.menubar.camera.get_instance() if not self._camera_menubar_instance: # pragma: no cover return # Change expand button icon self._camera_menubar_instance._camera_menu._style.update(VIEWPORT_CAMERA_STYLE) # New menu item for camera speed self._camera_menubar_instance.register_menu_item(self._create_camera_speed, order=100) # OM-76591 - Removing "Create from view" item - Bob self._camera_menubar_instance.deregister_menu_item(self._camera_menubar_instance._camera_menu._build_create_camera) except ImportError: carb.log_warn("Viewport menubar not found!") self._camera_menubar_instance = None except AttributeError: # pragma: no cover self._camera_menubar_instance = None # Hide default render and settings menubar self._settings.set("/persistent/exts/omni.kit.viewport.menubar.render/visible", False) self._settings.set("/persistent/exts/omni.kit.viewport.menubar.settings/visible", False) def destroy(self) -> None: if self._camera_menubar_instance: self._camera_menubar_instance.deregister_menu_item(self._create_camera_speed) def _create_camera_speed(self, _vc, _r: ui.Menu) -> None: from omni.kit.viewport.menubar.core import SettingModel, SliderMenuDelegate ui.MenuItem( "Speed", hide_on_click=False, delegate=SliderMenuDelegate( model=SettingModel("/persistent/app/viewport/camMoveVelocity", draggable=True), min=self._settings.get_as_float("/persistent/app/viewport/camVelocityMin") or 0.01, max=self._settings.get_as_float("/persistent/app/viewport/camVelocityMax"), tooltip="Set the Fly Mode navigation speed", width=0, reserve_status=True, ), )
3,517
Python
42.974999
127
0.642593
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/__init__.py
from .setup import *
21
Python
9.999995
20
0.714286
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/setup.py
import asyncio import weakref from functools import partial import os from pathlib import Path from typing import cast, Optional import omni.client import omni.ext import omni.kit.menu.utils import omni.kit.app import omni.kit.context_menu import omni.kit.ui import omni.usd from omni.kit.quicklayout import QuickLayout from omni.kit.menu.utils import MenuLayout from omni.kit.window.title import get_main_window_title from omni.kit.usd.layers import LayerUtils from omni.kit.viewport.menubar.core import get_instance as get_mb_inst, DEFAULT_MENUBAR_NAME from omni.kit.viewport.menubar.core.viewport_menu_model import ViewportMenuModel from omni.kit.viewport.utility import get_active_viewport, get_active_viewport_window, disable_selection import carb import carb.settings import carb.dictionary import carb.events import carb.tokens import carb.input import omni.kit.imgui as _imgui from pxr import Sdf, Usd from .navigation import Navigation from .menu_helper import MenuHelper from .menubar_helper import MenubarHelper from .stage_template import SunnySkyStage from .ui_state_manager import UIStateManager SETTINGS_PATH_FOCUSED = "/app/workspace/currentFocused" APPLICATION_MODE_PATH = "/app/application_mode" MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active" CURRENT_TOOL_PATH = "/app/viewport/currentTool" ROOT_WINDOW_NAME = "DockSpace" ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons") SETTINGS_STARTUP_EXPAND_VIEWPORT = "/app/startup/expandViewport" VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu" TELEPORT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/visible" async def _load_layout_startup(layout_file: str, keep_windows_open: bool=False) -> None: try: # few frames delay to avoid the conflict with the layout of omni.kit.mainwindow for i in range(3): await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) # WOR: some layout don't happy collectly the first time await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception) carb.log_warn(f"Failed to load layout {layout_file}: {exc}") async def _load_layout(layout_file: str, keep_windows_open:bool=False) -> None: try: # few frames delay to avoid the conflict with the layout of omni.kit.mainwindow for i in range(3): await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception) carb.log_warn(f"Failed to load layout {layout_file}: {exc}") async def _clear_startup_scene_edits() -> None: try: for i in range(50): # This could possibly be a smaller value. I want to ensure this happens after RTX startup await omni.kit.app.get_app().next_update_async() # type: ignore omni.usd.get_context().set_pending_edit(False) except Exception as exc: # pragma: no cover carb.log_warn(f"Failed to clear stage edits on startup: {exc}") # This extension is mostly loading the Layout updating menu class SetupExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. @property def _app(self): return omni.kit.app.get_app() @property def _settings(self): return carb.settings.get_settings() def on_startup(self, ext_id: str) -> None: self._ext_id = ext_id self._menubar_helper = MenubarHelper() self._menu_helper = MenuHelper() # using imgui directly to adjust some color and Variable imgui = _imgui.acquire_imgui() # match Create overides imgui.push_style_color(_imgui.StyleColor.ScrollbarGrab, carb.Float4(0.4, 0.4, 0.4, 1)) imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabHovered, carb.Float4(0.6, 0.6, 0.6, 1)) imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabActive, carb.Float4(0.8, 0.8, 0.8, 1)) # DockSplitterSize is the variable that drive the size of the Dock Split connection imgui.push_style_var_float(_imgui.StyleVar.DockSplitterSize, 2) # setup the Layout for your app self._layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts") layout_file = Path(self._layouts_path).joinpath(f"{self._settings.get('/app/layout/name')}.json") self.__setup_window_task = asyncio.ensure_future(_load_layout_startup(f"{layout_file}", True)) self.review_layout_path = str(Path(self._layouts_path) / "comment_layout.json") self.default_layout_path = str(Path(self._layouts_path) / "default.json") self.layout_user_path = str(Path(self._layouts_path) / "layout_user.json") # remove the user defined layout so that we always load the default layout when startup if os.path.exists(self.layout_user_path): os.remove(self.layout_user_path) # setup the menu and their layout self._current_layout_priority = 0 self._layout_menu_items = [] self._layout_file_menu() self._menu_layout = [] if self._settings.get_as_bool('/app/view/debug/menus'): self._layout_menu() # setup the Application Title window_title = get_main_window_title() if window_title: window_title.set_app_version(self._settings.get_as_string("/app/titleVersion")) # self._context_menu() self._register_my_menu() self._navigation = Navigation() self._navigation.on_startup(ext_id) self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events( APPLICATION_MODE_PATH, weakref.proxy(self)._on_application_mode_changed ) self._set_viewport_menubar_visibility(False) self._test = asyncio.ensure_future(_clear_startup_scene_edits()) # OM-95865: Ensure teleport on by default. self._usd_context = omni.usd.get_context() self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop( self._on_stage_open_event, name="TeleportDefaultOn" ) if self._settings.get_as_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT): self._set_viewport_fill_on() self._stage_templates = [SunnySkyStage()] disable_selection(get_active_viewport()) self._ui_state_manager = UIStateManager() self._setup_ui_state_changes() omni.kit.menu.utils.add_layout([ MenuLayout.Menu("Window", [ MenuLayout.Item("Viewport", source="Window/Viewport/Viewport 1"), MenuLayout.Item("Playlist", remove=True), MenuLayout.Item("Layout", remove=True), MenuLayout.Item("" if any(v in self._app.get_app_version() for v in ("alpha", "beta")) else "Extensions", remove=True), MenuLayout.Sort(exclude_items=["Extensions"], sort_submenus=True), ]) ]) def show_documentation(*x): import webbrowser webbrowser.open("http://docs.omniverse.nvidia.com/explorer") self._help_menu_items = [ omni.kit.menu.utils.MenuItemDescription(name="Documentation", onclick_fn=show_documentation, appear_after=[omni.kit.menu.utils.MenuItemOrder.FIRST]) ] omni.kit.menu.utils.add_menu_items(self._help_menu_items, name="Help") def _on_stage_open_event(self, event: carb.events.IEvent) -> None: if event.type == int(omni.usd.StageEventType.OPENED): app_mode = self._settings.get_as_string(APPLICATION_MODE_PATH).lower() # exit all tools self._settings.set(CURRENT_TOOL_PATH, "none") # OM-95865, OMFP-1993: Activate Teleport upon scene load ... # OMFP-2743: ... but only when in Review mode. if app_mode == "review": asyncio.ensure_future(self._stage_post_open_teleport_toggle()) # toggle RMB viewport context menu based on application mode value = False if app_mode == "review" else True self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, value) # teleport is activated after loading a stage and app is in Review mode async def _stage_post_open_teleport_toggle(self) -> None: await self._app.next_update_async() if hasattr(self, "_usd_context") and self._usd_context is not None and not self._usd_context.is_new_stage(): self._settings.set("/exts/omni.kit.viewport.navigation.core/activeOperation", "teleport") def _set_viewport_fill_on(self) -> None: vp_window = get_active_viewport_window() vp_widget = vp_window.viewport_widget if vp_window else None if vp_widget: vp_widget.expand_viewport = True def _set_viewport_menubar_visibility(self, show: bool) -> None: mb_inst = get_mb_inst() if mb_inst and hasattr(mb_inst, "get_menubar"): main_menubar = mb_inst.get_menubar(DEFAULT_MENUBAR_NAME) if main_menubar.visible_model.as_bool != show: main_menubar.visible_model.set_value(show) ViewportMenuModel()._item_changed(None) # type: ignore def _on_application_mode_changed(self, item: carb.dictionary.Item, _typ: carb.settings.ChangeEventType) -> None: if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "review": omni.usd.get_context().get_selection().clear_selected_prim_paths() disable_selection(get_active_viewport()) current_mode: str = cast(str, item.get_dict()) asyncio.ensure_future(self.defer_load_layout(current_mode)) async def defer_load_layout(self, current_mode: str) -> None: keep_windows = True # Focus Mode Toolbar self._settings.set_bool(SETTINGS_PATH_FOCUSED, True) # current_mode not in ("review", "layout")) # Turn off all tools and modal self._settings.set_string(CURRENT_TOOL_PATH, "none") self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) if current_mode == "review": # save the current layout for restoring later if switch back QuickLayout.save_file(self.layout_user_path) # we don't want to keep any windows except the ones which are visible in self.review_layout_path await _load_layout(self.review_layout_path, False) else: # current_mode == "layout": # check if there is any user modified layout, if yes use that one layout_filename = self.layout_user_path if os.path.exists(self.layout_user_path) else self.default_layout_path await _load_layout(layout_filename, keep_windows) self._set_viewport_menubar_visibility(current_mode == "layout") def _setup_ui_state_changes(self) -> None: windows_to_hide_on_modal = ["Measure", "Section", "Waypoints"] self._ui_state_manager.add_hide_on_modal(window_names=windows_to_hide_on_modal, restore=True) window_titles = ["Markups", "Waypoints"] for window in window_titles: setting_name = f'/exts/omni.usd_explorer.setup/{window}/visible' self._ui_state_manager.add_window_visibility_setting(window, setting_name) # toggle icon visibilites based on window visibility self._ui_state_manager.add_settings_copy_dependency( source_path="/exts/omni.usd_explorer.setup/Markups/visible", target_path="/exts/omni.kit.markup.core/show_icons", ) self._ui_state_manager.add_settings_copy_dependency( source_path="/exts/omni.usd_explorer.setup/Waypoints/visible", target_path="/exts/omni.kit.waypoint.core/show_icons", ) def _custom_quicklayout_menu(self) -> None: # we setup a simple ways to Load custom layout from the exts def add_layout_menu_entry(name, parameter, key): import inspect editor_menu = omni.kit.ui.get_editor_menu() layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts") menu_path = f"Layout/{name}" menu = editor_menu.add_item(menu_path, None, False, self._current_layout_priority) # type: ignore self._current_layout_priority = self._current_layout_priority + 1 if inspect.isfunction(parameter): # pragma: no cover (Never used, see commented out section below regarding quick save/load) menu_action = omni.kit.menu.utils.add_action_to_menu( menu_path, lambda *_: asyncio.ensure_future(parameter()), name, (carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key), ) else: menu_action = omni.kit.menu.utils.add_action_to_menu( menu_path, lambda *_: asyncio.ensure_future(_load_layout(f"{layouts_path}/{parameter}.json")), name, (carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key), ) self._layout_menu_items.append((menu, menu_action)) add_layout_menu_entry("Reset Layout", "default", carb.input.KeyboardInput.KEY_1) add_layout_menu_entry("Viewport Only", "viewport_only", carb.input.KeyboardInput.KEY_2) add_layout_menu_entry("Markup Editor", "markup_editor", carb.input.KeyboardInput.KEY_3) # add_layout_menu_entry("Waypoint Viewer", "waypoint_viewer", carb.input.KeyboardInput.KEY_4) # # you can enable Quick Save and Quick Load here # if False: # # create Quick Load & Quick Save # from omni.kit.quicklayout import QuickLayout # async def quick_save(): # QuickLayout.quick_save(None, None) # async def quick_load(): # QuickLayout.quick_load(None, None) # add_layout_menu_entry("Quick Save", quick_save, carb.input.KeyboardInput.KEY_7) # add_layout_menu_entry("Quick Load", quick_load, carb.input.KeyboardInput.KEY_8) def _register_my_menu(self) -> None: context_menu: Optional[omni.kit.context_menu.ContextMenuExtension] = omni.kit.context_menu.get_instance() if not context_menu: # pragma: no cover return def _layout_file_menu(self) -> None: self._menu_file_layout = [ MenuLayout.Menu( "File", [ MenuLayout.Item("New"), MenuLayout.Item("New From Stage Template"), MenuLayout.Item("Open"), MenuLayout.Item("Open Recent"), MenuLayout.Seperator(), MenuLayout.Item("Re-open with New Edit Layer"), MenuLayout.Seperator(), MenuLayout.Item("Share"), MenuLayout.Seperator(), MenuLayout.Item("Save"), MenuLayout.Item("Save As..."), MenuLayout.Item("Save With Options"), MenuLayout.Item("Save Selected"), MenuLayout.Item("Save Flattened As...", remove=True), MenuLayout.Seperator(), MenuLayout.Item("Collect As..."), MenuLayout.Item("Export"), MenuLayout.Seperator(), MenuLayout.Item("Import"), MenuLayout.Item("Add Reference"), MenuLayout.Item("Add Payload"), MenuLayout.Seperator(), MenuLayout.Item("Exit"), ] ) ] omni.kit.menu.utils.add_layout(self._menu_file_layout) def _layout_menu(self) -> None: self._menu_layout = [ MenuLayout.Menu( "Window", [ MenuLayout.SubMenu( "Animation", [ MenuLayout.Item("Timeline"), MenuLayout.Item("Sequencer"), MenuLayout.Item("Curve Editor"), MenuLayout.Item("Retargeting"), MenuLayout.Item("Animation Graph"), MenuLayout.Item("Animation Graph Samples"), ], ), MenuLayout.SubMenu( "Layout", [ MenuLayout.Item("Quick Save", remove=True), MenuLayout.Item("Quick Load", remove=True), ], ), MenuLayout.SubMenu( "Browsers", [ MenuLayout.Item("Content", source="Window/Content"), MenuLayout.Item("Materials"), MenuLayout.Item("Skies"), ], ), MenuLayout.SubMenu( "Rendering", [ MenuLayout.Item("Render Settings"), MenuLayout.Item("Movie Capture"), MenuLayout.Item("MDL Material Graph"), MenuLayout.Item("Tablet XR"), ], ), MenuLayout.SubMenu( "Simulation", [ MenuLayout.Group( "Flow", [ MenuLayout.Item("Presets", source="Window/Flow/Presets"), MenuLayout.Item("Monitor", source="Window/Flow/Monitor"), ], ), MenuLayout.Group( "Blast", [ MenuLayout.Item("Settings", source="Window/Blast/Settings"), MenuLayout.SubMenu( "Documentation", [ MenuLayout.Item("Kit UI", source="Window/Blast/Documentation/Kit UI"), MenuLayout.Item( "Programming", source="Window/Blast/Documentation/Programming" ), MenuLayout.Item( "USD Schemas", source="Window/Blast/Documentation/USD Schemas" ), ], ), ], ), MenuLayout.Item("Debug"), # MenuLayout.Item("Performance"), MenuLayout.Group( "Physics", [ MenuLayout.Item("Demo Scenes"), MenuLayout.Item("Settings", source="Window/Physics/Settings"), MenuLayout.Item("Debug"), MenuLayout.Item("Test Runner"), MenuLayout.Item("Character Controller"), MenuLayout.Item("OmniPVD"), MenuLayout.Item("Physics Helpers"), ], ), ], ), MenuLayout.SubMenu( "Utilities", [ MenuLayout.Item("Console"), MenuLayout.Item("Profiler"), MenuLayout.Item("USD Paths"), MenuLayout.Item("Statistics"), MenuLayout.Item("Activity Monitor"), ], ), # Remove 'Viewport 2' entry MenuLayout.SubMenu( "Viewport", [ MenuLayout.Item("Viewport 2", remove=True), ], ), MenuLayout.Sort(exclude_items=["Extensions"]), MenuLayout.Item("New Viewport Window", remove=True), ], ), # that is you enable the Quick Layout Menu MenuLayout.Menu( "Layout", [ MenuLayout.Item("Default", source="Reset Layout"), MenuLayout.Item("Viewport Only"), MenuLayout.Item("Markup Editor"), MenuLayout.Item("Waypoint Viewer"), MenuLayout.Seperator(), MenuLayout.Item("UI Toggle Visibility", source="Window/UI Toggle Visibility"), MenuLayout.Item("Fullscreen Mode", source="Window/Fullscreen Mode"), MenuLayout.Seperator(), MenuLayout.Item("Save Layout", source="Window/Layout/Save Layout..."), MenuLayout.Item("Load Layout", source="Window/Layout/Load Layout..."), # MenuLayout.Seperator(), # MenuLayout.Item("Quick Save", source="Window/Layout/Quick Save"), # MenuLayout.Item("Quick Load", source="Window/Layout/Quick Load"), ], ), MenuLayout.Menu("Tools", [MenuLayout.SubMenu("Animation", remove=True)]), ] omni.kit.menu.utils.add_layout(self._menu_layout) # type: ignore # if you want to support the Quick Layout Menu self._custom_quicklayout_menu() def on_shutdown(self): if self._menu_layout: omni.kit.menu.utils.remove_layout(self._menu_layout) # type: ignore self._menu_layout.clear() self._layout_menu_items.clear() self._navigation.on_shutdown() del self._navigation self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub) del self._application_mode_changed_sub self._stage_event_sub = None # From View setup self._menubar_helper.destroy() if self._menu_helper and hasattr(self._menu_helper, "destroy"): self._menu_helper.destroy() self._menu_helper = None self._stage_templates = []
23,462
Python
45.005882
167
0.557753
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/navigation.py
import asyncio import carb import carb.settings import carb.tokens import carb.dictionary import omni.kit.app import omni.ext import omni.ui as ui import omni.kit.actions.core from omni.kit.viewport.navigation.core import ( NAVIGATION_TOOL_OPERATION_ACTIVE, ViewportNavigationTooltip, get_navigation_bar, ) __all__ = ["Navigation"] CURRENT_TOOL_PATH = "/app/viewport/currentTool" SETTING_NAVIGATION_ROOT = "/exts/omni.kit.tool.navigation/" NAVIGATION_BAR_VISIBLE_PATH = "/exts/omni.kit.viewport.navigation.core/isVisible" APPLICATION_MODE_PATH = "/app/application_mode" WALK_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.walk/visible" CAPTURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.capture/visible" MARKUP_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.markup/visible" MEASURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.measure/visible" SECTION_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.section/visible" TELEPORT_SEPARATOR_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/spvisible" WAYPOINT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.waypoint/visible" VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu" MENUBAR_APP_MODES_PATH = "/exts/omni.kit.usd_presenter.main.menubar/include_modify_mode" WELCOME_WINDOW_VISIBLE_PATH = "/exts/omni.kit.usd_presenter.window.welcome/visible" ACTIVE_OPERATION_PATH = "/exts/omni.kit.viewport.navigation.core/activeOperation" class Navigation: NAVIGATION_BAR_NAME = None # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id: str) -> None: sections = ext_id.split("-") self._ext_name = sections[0] self._settings = carb.settings.get_settings() self._navigation_bar = get_navigation_bar() self._tool_bar_button = None self._dict = carb.dictionary.get_dictionary() self._panel_visible = True self._navigation_bar.show() self._settings.set(CURRENT_TOOL_PATH, "navigation") self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport") self._viewport_welcome_window_visibility_changed_sub = self._settings.subscribe_to_node_change_events( WELCOME_WINDOW_VISIBLE_PATH, self._on_welcome_window_visibility_change ) # OMFP-1799 Set nav bar visibility defaults. These should remain fixed now. self._settings.set(WALK_VISIBLE_PATH, False) self._settings.set(MARKUP_VISIBLE_PATH, True) self._settings.set(WAYPOINT_VISIBLE_PATH, True) self._settings.set(TELEPORT_SEPARATOR_VISIBLE_PATH, True) self._settings.set(CAPTURE_VISIBLE_PATH, True) self._settings.set(MEASURE_VISIBLE_PATH, True) self._settings.set(SECTION_VISIBLE_PATH, True) self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events( APPLICATION_MODE_PATH, self._on_application_mode_changed ) self._show_tooltips = False self._nav_bar_visibility_sub = self._settings.subscribe_to_node_change_events( NAVIGATION_BAR_VISIBLE_PATH, self._delay_reset_tooltip) _prev_navbar_vis = None _prev_tool = None _prev_operation = None def _on_welcome_window_visibility_change(self, item: carb.dictionary.Item, *_) -> None: if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)): return welcome_window_vis = self._dict.get(item) # preserve the state of the navbar upon closing the Welcome window if the app is in Layout mode if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "layout": # preserve the state of the navbar visibility if welcome_window_vis: self._prev_navbar_vis = self._settings.get_as_bool(NAVIGATION_BAR_VISIBLE_PATH) self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis)) self._prev_tool = self._settings.get(CURRENT_TOOL_PATH) self._prev_operation = self._settings.get(ACTIVE_OPERATION_PATH) else: # restore the state of the navbar visibility if self._prev_navbar_vis is not None: self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, self._prev_navbar_vis) self._prev_navbar_vis = None if self._prev_tool is not None: self._settings.set(CURRENT_TOOL_PATH, self._prev_tool) if self._prev_operation is not None: self._settings.set(ACTIVE_OPERATION_PATH, self._prev_operation) return else: if welcome_window_vis: self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "none") else: self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport") self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis)) def _on_application_mode_changed(self, item: carb.dictionary.Item, *_) -> None: if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)): return current_mode = self._dict.get(item) self._test = asyncio.ensure_future(self._switch_by_mode(current_mode)) async def _switch_by_mode(self, current_mode: str) -> None: await omni.kit.app.get_app().next_update_async() state = True if current_mode == "review" else False self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, state) self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, not(state)) # toggle RMB viewport context menu self._delay_reset_tooltip(None) # OM-92161: Need to reset the tooltip when change the mode def _delay_reset_tooltip(self, *_) -> None: async def delay_set_tooltip() -> None: for _i in range(4): await omni.kit.app.get_app().next_update_async() # type: ignore ViewportNavigationTooltip.set_visible(self._show_tooltips) asyncio.ensure_future(delay_set_tooltip()) def _on_showtips_click(self, *_) -> None: self._show_tooltips = not self._show_tooltips ViewportNavigationTooltip.set_visible(self._show_tooltips) def on_shutdown(self) -> None: self._navigation_bar = None self._viewport_welcome_window_visibility_changed_sub = None self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub) # type:ignore self._application_mode_changed_sub = None self._dict = None
6,679
Python
45.713286
119
0.676898
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/ui_state_manager.py
import carb.dictionary import carb.settings import omni.ui as ui from functools import partial from typing import Any, Dict, List, Tuple, Union MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active" class UIStateManager: def __init__(self) -> None: self._settings = carb.settings.acquire_settings_interface() self._modal_changed_sub = self._settings.subscribe_to_node_change_events( MODAL_TOOL_ACTIVE_PATH, self._on_modal_setting_changed ) self._hide_on_modal: List[Tuple[str,bool]] = [] self._modal_restore_window_states: Dict[str,bool] = {} self._settings_dependencies: Dict[Tuple(str,str), Dict[Any, Any]] = {} self._settings_changed_subs = {} self._window_settings = {} self._window_vis_changed_id = ui.Workspace.set_window_visibility_changed_callback(self._on_window_vis_changed) def destroy(self) -> None: if self._settings: if self._modal_changed_sub: self._settings.unsubscribe_to_change_events(self._modal_changed_sub) self._settings = None self._hide_on_modal = [] self._modal_restore_window_states = {} self._settings_dependencies = {} self._window_settings = {} if self._window_vis_changed_id: ui.Workspace.remove_window_visibility_changed_callback(self._window_vis_changed_id) self._window_vis_changed_id = None def __del__(self) -> None: self.destroy() def add_hide_on_modal(self, window_names: Union[str, List[str]], restore: bool) -> None: if isinstance(window_names, str): window_names = [window_names] for window_name in window_names: if window_name not in self._hide_on_modal: self._hide_on_modal.append((window_name, restore)) def remove_hide_on_modal(self, window_names: Union[str, List[str]]) -> None: if isinstance(window_names, str): window_names = [window_names] self._hide_on_modal = [item for item in self._hide_on_modal if item[0] not in window_names] def add_window_visibility_setting(self, window_name: str, setting_path: str) -> None: window = ui.Workspace.get_window(window_name) if window is not None: self._settings.set(setting_path, window.visible) else: # handle the case when the window is created later self._settings.set(setting_path, False) if window_name not in self._window_settings.keys(): self._window_settings[window_name] = [] self._window_settings[window_name].append(setting_path) def remove_window_visibility_setting(self, window_name: str, setting_path: str) -> None: if window_name in self._window_settings.keys(): setting_list = self._window_settings[window_name] if setting_path in setting_list: setting_list.remove(setting_path) if len(setting_list) == 0: del self._window_settings[window_name] def remove_all_window_visibility_settings(self, window_name: str) -> None: if window_name in self._window_settings.keys(): del self._window_settings[window_name] def add_settings_dependency(self, source_path: str, target_path: str, value_map: Dict[Any, Any]) -> None: key = (source_path, target_path) if key in self._settings_dependencies.keys(): carb.log_error(f'Settings dependency {source_path} -> {target_path} already exists. Ignoring.') return self._settings_dependencies[key] = value_map self._settings_changed_subs[key] = self._settings.subscribe_to_node_change_events( source_path, partial(self._on_settings_dependency_changed, source_path) ) def add_settings_copy_dependency(self, source_path: str, target_path: str) -> None: self.add_settings_dependency(source_path, target_path, None) def remove_settings_dependency(self, source_path: str, target_path: str) -> None: key = (source_path, target_path) if key in self._settings_dependencies.keys(): del self._settings_dependencies[key] if key in self._settings_changed_subs.keys(): sub = self._settings_changed_subs.pop(key) self._settings.unsubscribe_to_change_events(sub) def _on_settings_dependency_changed(self, path: str, item, event_type) -> None: value = self._settings.get(path) # setting does not exist if value is None: return target_settings = [source_target[1] for source_target in self._settings_dependencies.keys() if source_target[0] == path] for target_setting in target_settings: value_map = self._settings_dependencies[(path, target_setting)] # None means copy everything if value_map is None: self._settings.set(target_setting, value) elif value in value_map.keys(): self._settings.set(target_setting, value_map[value]) def _on_modal_setting_changed(self, item, event_type) -> None: modal = self._settings.get_as_bool(MODAL_TOOL_ACTIVE_PATH) if modal: self._hide_windows() else: self._restore_windows() def _hide_windows(self) -> None: for window_info in self._hide_on_modal: window_name, restore_later = window_info[0], window_info[1] window = ui.Workspace.get_window(window_name) if window is not None: if restore_later: self._modal_restore_window_states[window_name] = window.visible window.visible = False def _restore_windows(self) -> None: for window_info in self._hide_on_modal: window_name, restore_later = window_info[0], window_info[1] if restore_later: if window_name in self._modal_restore_window_states.keys(): old_visibility = self._modal_restore_window_states[window_name] if old_visibility is not None: window = ui.Workspace.get_window(window_name) if window is not None: window.visible = old_visibility self._modal_restore_window_states[window_name] = None def _on_window_vis_changed(self, title: str, state: bool) -> None: if title in self._window_settings.keys(): for setting in self._window_settings[title]: self._settings.set_bool(setting, state)
6,634
Python
44.136054
128
0.611999
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/stage_template.py
import carb import omni.ext import omni.kit.commands from omni.kit.stage_templates import register_template, unregister_template from pxr import Gf, Sdf, Usd, UsdGeom, UsdLux class SunnySkyStage: def __init__(self): register_template("SunnySky", self.new_stage) def __del__(self): unregister_template("SunnySky") def new_stage(self, rootname, usd_context_name): # Create basic DistantLight usd_context = omni.usd.get_context(usd_context_name) stage = usd_context.get_stage() # get up axis up_axis = UsdGeom.GetStageUpAxis(stage) with Usd.EditContext(stage, stage.GetRootLayer()): # create Environment omni.kit.commands.execute( "CreatePrim", prim_path="/Environment", prim_type="Xform", select_new_prim=False, create_default_xform=True, context_name=usd_context_name ) texture_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/light_rigs/HDR/partly_cloudy.hdr") # create Sky omni.kit.commands.execute( "CreatePrim", prim_path="/Environment/Sky", prim_type="DomeLight", select_new_prim=False, attributes={ UsdLux.Tokens.inputsIntensity: 1000, UsdLux.Tokens.inputsTextureFile: texture_path, UsdLux.Tokens.inputsTextureFormat: UsdLux.Tokens.latlong, UsdLux.Tokens.inputsSpecular: 1, UsdGeom.Tokens.visibility: "inherited", } if hasattr(UsdLux.Tokens, 'inputsIntensity') else \ { UsdLux.Tokens.intensity: 1000, UsdLux.Tokens.textureFile: texture_path, UsdLux.Tokens.textureFormat: UsdLux.Tokens.latlong, UsdGeom.Tokens.visibility: "inherited", }, create_default_xform=True, context_name=usd_context_name ) prim = stage.GetPrimAtPath("/Environment/Sky") prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1)) prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0)) if up_axis == "Y": prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(270, 0, 0)) else: prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 90)) prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"]) # create DistantLight omni.kit.commands.execute( "CreatePrim", prim_path="/Environment/DistantLight", prim_type="DistantLight", select_new_prim=False, attributes={ UsdLux.Tokens.inputsAngle: 4.3, UsdLux.Tokens.inputsIntensity: 3000, UsdGeom.Tokens.visibility: "inherited", } if hasattr(UsdLux.Tokens, 'inputsIntensity') else \ { UsdLux.Tokens.angle: 4.3, UsdLux.Tokens.intensity: 3000, UsdGeom.Tokens.visibility: "inherited", }, create_default_xform=True, context_name=usd_context_name ) prim = stage.GetPrimAtPath("/Environment/DistantLight") prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1)) prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0)) if up_axis == "Y": prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(310.6366313590111, -125.93251524567805, 0.8821359067542289)) else: prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(41.35092544555664, 0.517652153968811, -35.92928695678711)) prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"])
4,590
Python
48.902173
166
0.56732
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menu_helper.py
import asyncio import carb.settings import omni.kit.app import omni.kit.commands import omni.kit.menu.utils import omni.renderer_capture from omni.kit.menu.utils import MenuLayout SETTINGS_APPLICATION_MODE_PATH = "/app/application_mode" class MenuHelper: def __init__(self) -> None: self._settings = carb.settings.get_settings() self._current_layout = None self._pending_layout = None self._changing_layout_task: asyncio.Task = None self._menu_layout_empty = [] self._menu_layout_modify = [] omni.kit.menu.utils.add_hook(self._menu_hook) self._app_mode_sub = self._settings.subscribe_to_node_change_events( SETTINGS_APPLICATION_MODE_PATH, self._on_application_mode_changed ) self._menu_hook() def destroy(self) -> None: omni.kit.menu.utils.remove_hook(self._menu_hook) if self._changing_layout_task and not self._changing_layout_task.done(): self._changing_layout_task.cancel() self._changing_layout_task = None if self._app_mode_sub: self._settings.unsubscribe_to_change_events(self._app_mode_sub) self._app_mode_sub = None self._app_ready_sub = None if self._current_layout: omni.kit.menu.utils.remove_layout(self._current_layout) self._current_layout = None def _menu_hook(self, *args, **kwargs) -> None: if self._settings.get_as_bool("/app/view/debug/menus"): return LAYOUT_EMPTY_ALLOWED_MENUS = set() LAYOUT_MODIFY_ALLOWED_MENUS = {"File", "Edit", "Window", "Tools", "Help"} # make NEW list object instead of clear original # the original list may be held by self._current_layout and omni.kit.menu.utils self._menu_layout_empty = [] self._menu_layout_modify = [] menu_instance = omni.kit.menu.utils.get_instance() if not menu_instance: # pragma: no cover return # Build new layouts using allowlists for key in menu_instance._menu_defs: if key.lower().endswith("widget"): continue if key not in LAYOUT_EMPTY_ALLOWED_MENUS: self._menu_layout_empty.append(MenuLayout.Menu(key, remove=True)) if key not in LAYOUT_MODIFY_ALLOWED_MENUS: self._menu_layout_modify.append(MenuLayout.Menu(key, remove=True)) # Remove 'Viewport 2' entry if key == "Window": for menu_item_1 in menu_instance._menu_defs[key]: for menu_item_2 in menu_item_1: if menu_item_2.name == "Viewport": menu_item_2.sub_menu = [mi for mi in menu_item_2.sub_menu if mi.name != "Viewport 2"] if self._changing_layout_task is None or self._changing_layout_task.done(): self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout()) def _on_application_mode_changed(self, *args) -> None: if self._changing_layout_task is None or self._changing_layout_task.done(): self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout()) async def _delayed_change_layout(self): mode = self._settings.get_as_string(SETTINGS_APPLICATION_MODE_PATH) if mode in ["present", "review"]: pending_layout = self._menu_layout_empty else: pending_layout = self._menu_layout_modify # Don't change layout inside of menu callback _on_application_mode_changed # omni.ui throws error if self._current_layout: # OMFP-2737: Do no rebuild menu (change menu layout) if layout is same # Here only check number of layout menu items and name of every of layout menu item same_layout = len(self._current_layout) == len(pending_layout) if same_layout: for index, item in enumerate(self._current_layout): if item.name != pending_layout[index].name: same_layout = False if same_layout: return omni.kit.menu.utils.remove_layout(self._current_layout) self._current_layout = None omni.kit.menu.utils.add_layout(pending_layout) # type: ignore self._current_layout = pending_layout.copy() self._changing_layout_task = None
4,434
Python
37.565217
113
0.608029
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_release_config.py
import carb.settings import carb.tokens import omni.kit.app import omni.kit.test class TestConfig(omni.kit.test.AsyncTestCase): async def test_l1_public_release_configuration(self): settings = carb.settings.get_settings() app_version = settings.get("/app/version") # This test covers a moment in time when we switch version to RC. # Following test cases must be satisfied. is_rc = "-rc." in app_version # title_format_string = settings.get("exts/omni.kit.window.modifier.titlebar/titleFormatString") # if is_rc: # Make sure the title format string doesn't use app version if app version contains rc # title_using_app_version = "/app/version" in title_format_string # self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'") # Make sure the title format string has "Beta" in it # title_has_beta = "Beta" in title_format_string # self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it") # if is_rc: # Make sure the title format string doesn't use app version if app version contains rc # title_using_app_version = "/app/version" in title_format_string # self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'") # Make sure the title format string has "Beta" in it # title_has_beta = "Beta" in title_format_string # self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it") # Make sure we set build to external when going into RC release mode # external = settings.get("/privacy/externalBuild") or False # self.assertEqual( # external, # is_rc, # "check failed: is this an RC build? %s Is /privacy/externalBuild set to true? %s" % (is_rc, external), # ) # if is_rc: # # Make sure we remove some extensions from public release # EXTENSIONS = [ # # "omni.kit.profiler.tracy", # "omni.kit.window.jira", # "omni.kit.testing.services", # "omni.kit.tests.usd_stress", # "omni.kit.tests.basic_validation", # # "omni.kit.extension.reports", # ] # manager = omni.kit.app.get_app().get_extension_manager() # ext_names = {e["name"] for e in manager.get_extensions()} # for ext in EXTENSIONS: # self.assertEqual( # ext in ext_names, # False, # f"looks like {ext} was not removed from public build", # ) async def test_l1_usd_explorer_and_usd_explorer_full_have_same_version(self): manager = omni.kit.app.get_app().get_extension_manager() EXTENSIONS = [ "omni.usd_explorer", "omni.usd_explorer.full", ] # need to find both extensions and they need the same version id usd_explorer_exts = [e for e in manager.get_extensions() if e.get("name", "") in EXTENSIONS] self.assertEqual(len(usd_explorer_exts), 2) self.assertEqual( usd_explorer_exts[0]["version"], usd_explorer_exts[1]["version"], "omni.usd_explorer.kit and omni.usd_explorer.full.kit have different versions", )
3,572
Python
43.662499
143
0.594905
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_state_manager.py
## Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import carb.settings import omni.kit.app import omni.ui as ui from omni.kit.test import AsyncTestCase from ..ui_state_manager import UIStateManager, MODAL_TOOL_ACTIVE_PATH class TestUIStateManager(AsyncTestCase): async def setUp(self): self._sm = UIStateManager() self._settings = carb.settings.get_settings() async def tearDown(self): self._sm = None async def test_destroy(self): self._sm.add_hide_on_modal('dummy', False) self._sm.add_settings_copy_dependency('a', 'b') self._sm.add_settings_dependency('c', 'd', {1: 2}) self._sm.add_window_visibility_setting('my_window', 'my_setting') self._sm.destroy() async def test_hide_on_modal(self): self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) self._sm.add_hide_on_modal('NO_RESTORE', False) self._sm.add_hide_on_modal(['A_RESTORE', 'B_RESTORE'], True) window_no_restore = ui.Window('NO_RESTORE') window_restore_1 = ui.Window('A_RESTORE') window_restore_2 = ui.Window('B_RESTORE') window_no_restore.visible = True window_restore_1.visible = True window_restore_2.visible = False await self._wait() self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True) await self._wait() self.assertFalse(window_no_restore.visible) self.assertFalse(window_restore_1.visible) self.assertFalse(window_restore_2.visible) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) await self._wait() self.assertFalse(window_no_restore.visible) self.assertTrue(window_restore_1.visible) self.assertFalse(window_restore_2.visible) self._sm.remove_hide_on_modal(window_restore_1.title) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True) await self._wait() self.assertTrue(window_restore_1.visible) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) async def test_window_visibility_setting(self): window_name = 'Dummy' setting_path = '/apps/dummy' setting_path2 = '/apps/dummy2' window = ui.Window(window_name) window.visible = True await self._wait() self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path) self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path2) self.assertIsNotNone(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path2)) window.visible = False self.assertFalse(self._settings.get(setting_path)) self.assertFalse(self._settings.get(setting_path2)) window.visible = True self.assertTrue(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path2)) self._sm.remove_window_visibility_setting(window_name=window_name, setting_path=setting_path) window.visible = False self.assertTrue(self._settings.get(setting_path)) self.assertFalse(self._settings.get(setting_path2)) self._sm.remove_all_window_visibility_settings(window_name=window_name) window.visible = True self.assertFalse(self._settings.get(setting_path2)) async def test_setting_dependency(self): setting_path_copy_from = '/app/copy_from' setting_path_copy_to = '/ext/copy_to' setting_path_map_from = '/ext/map_from' setting_path_map_to = '/something/map_to' self._sm.add_settings_copy_dependency(setting_path_copy_from, setting_path_copy_to) self._settings.set_string(setting_path_copy_from, 'hello_world') self.assertEqual(self._settings.get(setting_path_copy_from), self._settings.get(setting_path_copy_to)) # doesn't work the other way around self._settings.set_string(setting_path_copy_to, 'no_copy_back') self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world') self._sm.add_settings_dependency(setting_path_map_from, setting_path_map_to, {1: 2, 3: 4}) self._settings.set_int(setting_path_map_from, 1) self.assertEqual(self._settings.get(setting_path_map_to), 2) self._settings.set_int(setting_path_map_from, 3) self.assertEqual(self._settings.get(setting_path_map_to), 4) # not in the map self._settings.set_int(setting_path_map_from, 42) self.assertEqual(self._settings.get(setting_path_map_to), 4) self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world') self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back') self._sm.remove_settings_dependency(setting_path_copy_from, setting_path_copy_to) self._settings.set_string(setting_path_copy_from, 'this_is_not_copied') self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back') async def _wait(self, frames: int = 5): for _ in range(frames): await omni.kit.app.get_app().next_update_async()
5,552
Python
42.046511
110
0.67219
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/__init__.py
# run startup tests first from .test_app_startup import * # run all other tests after from .test_extensions import * from .test_release_config import * from .test import * from .test_state_manager import *
206
Python
24.874997
34
0.757282
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test.py
import omni.kit.app from omni.ui.tests.test_base import OmniUiTest from omni.kit import ui_test ext_id = 'omni.usd_explorer.setup' class TestSetupToolExtension(OmniUiTest): async def test_extension(self): manager = omni.kit.app.get_app().get_extension_manager() self.assertTrue(ext_id) self.assertTrue(manager.is_extension_enabled(ext_id)) app = omni.kit.app.get_app() for _ in range(500): await app.next_update_async() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertTrue(not manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menubar_helper_camera_dependency(self): manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled('omni.kit.viewport.menubar.camera', True) await ui_test.human_delay() manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menu_helper(self): from ..menu_helper import MenuHelper menu_helper = MenuHelper() menu_helper.destroy() async def test_menubar_helper_menu(self): from ..menubar_helper import MenubarHelper menubar_helper = MenubarHelper() menubar_helper._create_camera_speed(None, None) menubar_helper.destroy() async def test_menu_helper_debug_setting(self): SETTINGS_VIEW_DEBUG_MENUS = '/app/view/debug/menus' import carb.settings settings = carb.settings.get_settings() manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) orig_value = settings.get(SETTINGS_VIEW_DEBUG_MENUS) settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, True) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, orig_value) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menu_helper_application_mode_change(self): from ..menu_helper import SETTINGS_APPLICATION_MODE_PATH import carb.settings settings = carb.settings.get_settings() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'welcome') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'comment') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() async def test_menu_helper_widget_menu(self): import omni.kit.menu.utils omni.kit.menu.utils.add_menu_items([], name='test widget') from ..menu_helper import MenuHelper menu_helper = MenuHelper() menu_helper.destroy() async def test_startup_expand_viewport(self): from ..setup import SETTINGS_STARTUP_EXPAND_VIEWPORT import carb.settings settings = carb.settings.get_settings() orig_value = settings.get(SETTINGS_STARTUP_EXPAND_VIEWPORT) settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, True) manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, orig_value) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_navigation_invalid_dict(self): from ..navigation import Navigation navigation = Navigation() navigation._show_tooltips = False navigation._dict = 42 navigation._on_application_mode_changed(None, None) navigation._on_showtips_click() async def test_navigation_current_tool_mode_change(self): from ..navigation import CURRENT_TOOL_PATH, APPLICATION_MODE_PATH import carb.settings settings = carb.settings.get_settings() settings.set_string(APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'welcome') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() async def test_setup_clear_startup_scene_edits(self): from ..setup import _clear_startup_scene_edits await _clear_startup_scene_edits() import omni.usd self.assertFalse(omni.usd.get_context().has_pending_edit()) async def test_stage_template(self): import omni.kit.stage_templates omni.kit.stage_templates.new_stage(template='SunnySky')
6,826
Python
34.190721
79
0.665397
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_app_startup.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import omni.kit.app from omni.kit.test import AsyncTestCase class TestAppStartup(AsyncTestCase): async def test_l1_app_startup_time(self): """Get startup time - send to nvdf""" for _ in range(60): await omni.kit.app.get_app().next_update_async() try: from omni.kit.core.tests import app_startup_time app_startup_time(self.id()) except: # noqa pass self.assertTrue(True) async def test_l1_app_startup_warning_count(self): """Get the count of warnings during startup - send to nvdf""" for _ in range(60): await omni.kit.app.get_app().next_update_async() try: from omni.kit.core.tests import app_startup_warning_count app_startup_warning_count(self.id()) except: # noqa pass self.assertTrue(True)
1,323
Python
32.948717
77
0.657596
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_extensions.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import sys import carb.settings import omni.kit.app import omni.kit.actions.core from omni.kit.core.tests import validate_extensions_load, validate_extensions_tests from omni.kit.test import AsyncTestCase from pxr import Usd, UsdGeom, Gf class TestUSDExplorerExtensions(AsyncTestCase): async def test_l1_extensions_have_tests(self): """Loop all enabled extensions to see if they have at least one (1) unittest""" await omni.kit.app.get_app().next_update_async() await omni.kit.app.get_app().next_update_async() # This list should be empty or near empty ideally EXCLUSION_LIST = [ # extensions from Kit "omni.mdl", "omni.ansel.init", # extensions from USD Explorer ] # These extensions only run tests on win32 for now if sys.platform != "win32": EXCLUSION_LIST.append("omni.hydra.scene_api") EXCLUSION_LIST.append("omni.rtx.tests") self.assertEqual(validate_extensions_tests(EXCLUSION_LIST), 0) async def test_l1_extensions_load(self): """Loop all enabled extensions to see if they loaded correctly""" self.assertEqual(validate_extensions_load(), 0) async def test_regression_omfp_2304(self): """Regression test for OMFP-2304""" loaded_omni_kit_collaboration_selection_outline = False manager = omni.kit.app.get_app().get_extension_manager() for ext in manager.get_extensions(): if ext["name"] == "omni.kit.collaboration.selection_outline": loaded_omni_kit_collaboration_selection_outline = True break self.assertTrue(loaded_omni_kit_collaboration_selection_outline) async def _wait(self, frames: int = 10): for _ in range(frames): await omni.kit.app.get_app().next_update_async() async def wait_stage_loading(self): while True: _, files_loaded, total_files = omni.usd.get_context().get_stage_loading_status() if files_loaded or total_files: await self._wait() continue break await self._wait(100) async def _get_1_1_1_rotation(self) -> Gf.Vec3d: """Loads a stage and returns the transformation of the (1,1,1) vector by the directional light's rotation""" await self._wait() omni.kit.actions.core.execute_action("omni.kit.window.file", "new") await self.wait_stage_loading() context = omni.usd.get_context() self.assertIsNotNone(context) stage = context.get_stage() self.assertIsNotNone(stage) prim_path = '/Environment/DistantLight' prim = stage.GetPrimAtPath(prim_path) self.assertTrue(prim.IsValid()) # Extract the prim's transformation matrix in world space xformAPI = UsdGeom.XformCache() transform_matrix_world = xformAPI.GetLocalToWorldTransform(prim) unit_point = Gf.Vec3d(1, 1, 1) transformed_point = transform_matrix_world.Transform(unit_point) return transformed_point async def test_regression_omfp_OMFP_3314(self): """Regression test for OMFP-3314""" settings = carb.settings.get_settings() UP_AXIS_PATH = "/persistent/app/stage/upAxis" settings.set("/persistent/app/newStage/defaultTemplate", "SunnySky") settings.set_string(UP_AXIS_PATH, "Z") point_z_up = await self._get_1_1_1_rotation() settings.set_string(UP_AXIS_PATH, "Y") point_y_up = await self._get_1_1_1_rotation() # with the default camera position: # in y-up: z points bottom left, x points bottom right, y points up # in z-up: x points bottom left, y points bottom right, z points up places = 4 self.assertAlmostEqual(point_y_up[2], point_z_up[0], places=places) self.assertAlmostEqual(point_y_up[0], point_z_up[1], places=places) self.assertAlmostEqual(point_y_up[1], point_z_up[2], places=places)
4,461
Python
40.314814
116
0.656355
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.0.32] - 2023-11-02 ### Changed - OMFP-3224: Added regression test - Added unit tests for state manager ## [1.0.31] - 2023-10-25 ### Changed - OMFP-3094: Restored Window/Viewport menu ## [1.0.30] - 2023-10-26 ### Changed - OMFP-2904: Show "Examples" by default in Layout mode ## [1.0.29] - 2023-10-25 ### Changed - OMFP-3224: Fix stage template light directions. ## [1.0.28] - 2023-10-23 ### Changed - OMFP-2654: Upgraded carb.imgui with omni.kit.imgui ## [1.0.27] - 2023-10-20 ### Changed - OMFP-2649: Missed the Layout item, it is now hidden as requested. ## [1.0.26] - 2023-10-20 ### Changed - Update embedded light rigs and textures ## [1.0.25] - 2023-10-19 ### Changed - Added regression test for OMFP-2304 ## [1.0.24] - 2023-10-19 ### Changed - OMFP-1981: always load the default layout when startup the app ## [1.0.23] - 2023-10-18 ### Changed - OMFP-2649: Hiding menu entries. ## [1.0.22] - 2023-10-18 ### Changed - Updated About dialog PNG to match the new application icon. ## [1.0.21] - 2023-10-18 ### Changed - OMFP-2737: Do no rebuild menu (change menu layout) if layout is same ## [1.0.20] - 2023-10-18 ### Changed - make windows invisible which are not desired to be in Review mode, OMFP-2252 activity progress window and OMFP-1981 scene optimizer window. - OMFP-1981: when user switch between modes, make sure the user defined layout in Layout mode is kept. ## [1.0.19] - 2023-10-17 ### Changed - OMFP-2547 - remove markup from modal list, markup window visibility is now handled in omni.kit.markup.core ## [1.0.18] - 2023-10-17 ### Changed - Fixed test ## [1.0.17] - 2023-10-16 ### Changed - Navigation bar visibility fixes ## [1.0.16] - 2023-10-13 ### Changed - Waypoint and markup visibilities are bound to their list windows ## [1.0.15] - 2023-10-12 ### Changed - OMFP-2417 - Rename 'comment' -> 'review' and 'modify' -> 'layout' ## [1.0.14] - 2023-10-12 ### Changed - Added more unit tests. ## [1.0.13] - 2023-10-11 ### Changed - OMFP-2328: Fix "Sunnysky" oriented incorrectly ## [1.0.12] - 2023-10-10 ### Changed - OMFP-2226 - Remove second Viewport menu item from layouts. ## [1.0.11] - 2023-10-11 ### Changed - Added UI state manager. ## [1.0.10] - 2023-10-10 ### Changed - Deactivate tools when app mode is changed. ## [1.0.9] - 2023-10-09 ### Changed - OMFP-2200 - Disabling the viewport expansion, this should keep us locked to a 16:9 aspect ratio. ## [1.0.8] - 2023-10-06 ### Changed - Added a new stage template and made it default ## [1.0.7] - 2023-10-06 ### Changed - Enable UI aware "expand_viewport" mode rather than lower-level fill_viewport mode ## [1.0.6] - 2023-10-05 ### Changed - Used allowlists for building main menu entries to guard against unexpected menus. ## [1.0.5] - 2023-10-05 ### Fixed - Regression in hiding viewport toolbar. ## [1.0.4] - 2023-10-04 ### Changed - Modify mode now shows selected menus on main menubar. ## [1.0.3] - 2023-10-04 - Hide Viewport top toolbar in Comment Mode ## [1.0.2] - 2023-10-03 - Navigation Toolbar hidden by default in Modify Mode ## [1.0.1] - 2023-09-27 - Renamed to omni.usd_explorer.setup ## [1.0.0] - 2021-04-26 - Initial version of extension UI template with a window
3,289
Markdown
23.37037
141
0.672545
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/docs/README.md
# omni.usd_explorer.setup
25
Markdown
24.999975
25
0.8
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "1.0.0" # The title and description fields are primarily for displaying extension info in UI title = "Simple UI Extension Template" description = "The simplest python extension example. Use it as a starting point for your extensions." # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # Path (relative to the root) of changelog changelog = "docs/CHANGELOG.md" # URL of the extension source repository. repository = "https://github.com/NVIDIA-Omniverse/kit-app-template" # One of categories for UI. category = "Example" # Keywords for the extension keywords = ["kit", "example"] # Icon to show in the extension manager icon = "data/icon.png" # Preview to show in the extension manager preview_image = "data/preview.png" # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} # Main python module this extension provides, it will be publicly available as "import omni.hello.world". [[python.module]] name = "omni.hello.world" [[test]] # Extra dependencies only to be used during test run dependencies = [ "omni.kit.ui_test" # UI testing extension ]
1,200
TOML
26.295454
105
0.738333
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/extension.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import omni.ext import omni.ui as ui # Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)` def some_public_function(x: int): print(f"[omni.hello.world] some_public_function was called with {x}") return x ** x # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MyExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[omni.hello.world] MyExtension startup") self._count = 0 self._window = ui.Window("My Window", width=300, height=300) with self._window.frame: with ui.VStack(): label = ui.Label("") def on_click(): self._count += 1 label.text = f"count: {self._count}" def on_reset(): self._count = 0 label.text = "empty" on_reset() with ui.HStack(): ui.Button("Add", clicked_fn=on_click) ui.Button("Reset", clicked_fn=on_reset) def on_shutdown(self): print("[omni.hello.world] MyExtension shutdown")
2,141
Python
36.578947
119
0.64269
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/__init__.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .extension import *
609
Python
39.666664
74
0.770115
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/tests/__init__.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .test_hello_world import *
617
Python
37.624998
74
0.768233
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/tests/test_hello_world.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test # Extnsion for writing UI tests (simulate UI interaction) import omni.kit.ui_test as ui_test # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.hello.world # Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class Test(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): pass # After running each test async def tearDown(self): pass # Actual test, notice it is "async" function, so "await" can be used if needed async def test_hello_public_function(self): result = omni.hello.world.some_public_function(4) self.assertEqual(result, 256) async def test_window_button(self): # Find a label in our window label = ui_test.find("My Window//Frame/**/Label[*]") # Find buttons in our window add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'") reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'") # Click reset button await reset_button.click() self.assertEqual(label.widget.text, "empty") await add_button.click() self.assertEqual(label.widget.text, "count: 1") await add_button.click() self.assertEqual(label.widget.text, "count: 2")
2,253
Python
35.950819
142
0.70395
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.0.0] - 2021-04-26 - Initial version of extension UI template with a window
178
Markdown
18.888887
80
0.702247
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/docs/README.md
# Simple UI Extension Template The simplest python extension example. Use it as a starting point for your extensions.
119
Markdown
28.999993
86
0.806723
NVIDIA-Omniverse/kit-app-template/source/launcher/description.toml
name = "USD Explorer" # displayed application name shortName = "USD Explorer" # displayed application name in smaller card and library view version = "${version}" # version must be semantic kind = "app" # enum of "app", "connector", and "experience" for now latest = true # boolean for if this version is the latest version slug = "my_company.usd_explorer" # unique identifier for component, all lower case, persists between versions productArea = "My Company" # displayed before application name in launcher category = "Apps" # category of content channel = "beta" # 3 filter types [ "alpha", "beta", "release "] enterpriseStatus = false # set true if you want this package to show in enterprise launcher #values for filtering content, not implemented yet tags = [ "Manufacturing", "Product Design", "Scene Composition", "Visualization", "Rendering" ] #string array, each line is a new line, keep lines under 256 char and keep lines under 4 description = [ "My Company USD Explorer is an Omniverse app for Reviewing and Constructing large facilities such as factories, warehouses and more. It is built using NVIDIA Omniverse™ Kit. The Scene Description and in-memory model is based on Pixar's USD. Omniverse USD Composer takes advantage of the advanced workflows of USD like Layers, Variants, Instancing and much more.", "When connected to a Omniverse Nucleus server, worlds can be authored LIVE across multiple Omniverse applications, machines and users for advanced collaborative workflows." ] #array of links for more info on product [[links]] title = "Tutorials" url = "http://omniverse.nvidia.com/tutorials" [[links]] title = "Forums" url = "https://forums.developer.nvidia.com/c/omniverse/300" [developer] #name of developer name = 'My Company' # hyperlink on developer name (can be left as empty string) url = 'https://www.my-company.com/' [publisher] #name of publisher name = 'My Company' # hyperlink on publisher name (can be left as empty string) url = 'https://www.my-company.com/' [url] windows-x86_64 = 'windows-x86_64/package.zip' linux-x86_64 = 'linux-x86_64/package.zip'
2,246
TOML
43.939999
363
0.704809
NVIDIA-Omniverse/kit-app-template/source/launcher/requirements.toml
# Optional note that will be shown below system requirements. # Supports markdown. note = "Note: Omniverse is built to run on any RTX-powered machine. For ideal performance, we recommend using GeForce RTX™ 2080, Quadro RTX™ 5000, or higher. For latest drivers, visit [NVIDIA Driver Downloads](https://www.nvidia.com/Download/index.aspx). For Quadro, select 'Quadro New Feature Driver (QNF)." # System requirements specs. # Supports line breaks. [minimum] cpuNames = "Intel I7\nAMD Ryzen" cpuCores = "4" ram = "16 GB" storage = "512 GB SSD" vram = "6 GB" gpu = "Any RTX GPU" [recommended] cpuNames = "Intel I7\nAMD Ryzen" cpuCores = "8" ram = "32 GB" storage = "512 GB M.2 SSD" vram = "8 GB" gpu = "GeForce RTX 2080\nQuadro RTX 5000"
734
TOML
33.999998
308
0.723433
NVIDIA-Omniverse/kit-app-template/source/launcher/launcher.toml
## install and launch instructions by environment [defaults.windows-x86_64] url = "" entrypoint = "${productRoot}/omni.usd_explorer.bat" args = ["--/app/environment/name='launcher'"] [defaults.windows-x86_64.open] command = "${productRoot}/omni.usd_explorer.bat" args = ['--exec "open_stage.py ${file}"', "--/app/environment/name='launcher'"] [defaults.windows-x86_64.environment] [defaults.windows-x86_64.install] pre-install = "" pre-install-args = [] install = "${productRoot}/pull_kit_sdk.bat" install-args = [] post-install = "" # "${productRoot}/omni.usd_explorer.warmup.bat" post-install-args = ["--/app/environment/name='launcher_warmup'"] [defaults.windows-x86_64.uninstall] pre-uninstall = "" pre-uninstall-args = [] uninstall = "" uninstall-args = [] post-uninstall = "" post-uninstall-args = [] [defaults.linux-x86_64] url = "" entrypoint = "${productRoot}/omni.usd_explorer.sh" args = ["--/app/environment/name='launcher'"] [defaults.linux-x86_64.environment] [defaults.linux-x86_64.install] pre-install = "" pre-install-args = [] install = "${productRoot}/pull_kit_sdk.sh" install-args = [] post-install = "" # "${productRoot}/omni.usd_explorer.warmup.sh" post-install-args = ["--/app/environment/name='launcher_warmup'"] [defaults.linux-x86_64.uninstall] pre-uninstall = "" pre-uninstall-args = [] uninstall = "" uninstall-args = [] post-uninstall = "" post-uninstall-args = []
1,400
TOML
27.019999
79
0.696429
NVIDIA-Omniverse/IsaacGymEnvs/setup.py
"""Installation script for the 'isaacgymenvs' python package.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from setuptools import setup, find_packages import os root_dir = os.path.dirname(os.path.realpath(__file__)) # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # RL "gym==0.23.1", "torch", "omegaconf", "termcolor", "jinja2", "hydra-core>=1.2", "rl-games>=1.6.0", "pyvirtualdisplay", "urdfpy==0.0.22", "pysdf==0.1.9", "warp-lang==0.10.1", "trimesh==3.23.5", ] # Installation operation setup( name="isaacgymenvs", author="NVIDIA", version="1.5.1", description="Benchmark environments for high-speed robot learning in NVIDIA IsaacGym.", keywords=["robotics", "rl"], include_package_data=True, python_requires=">=3.6", install_requires=INSTALL_REQUIRES, packages=find_packages("."), classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.6, 3.7, 3.8"], zip_safe=False, ) # EOF
1,107
Python
21.612244
99
0.644986
NVIDIA-Omniverse/IsaacGymEnvs/README.md
# Isaac Gym Benchmark Environments [Website](https://developer.nvidia.com/isaac-gym) | [Technical Paper](https://arxiv.org/abs/2108.10470) | [Videos](https://sites.google.com/view/isaacgym-nvidia) ### About this repository This repository contains example RL environments for the NVIDIA Isaac Gym high performance environments described [in our NeurIPS 2021 Datasets and Benchmarks paper](https://openreview.net/forum?id=fgFBtYgJQX_) ### Installation Download the Isaac Gym Preview 4 release from the [website](https://developer.nvidia.com/isaac-gym), then follow the installation instructions in the documentation. We highly recommend using a conda environment to simplify set up. Ensure that Isaac Gym works on your system by running one of the examples from the `python/examples` directory, like `joint_monkey.py`. Follow troubleshooting steps described in the Isaac Gym Preview 4 install instructions if you have any trouble running the samples. Once Isaac Gym is installed and samples work within your current python environment, install this repo: ```bash pip install -e . ``` ### Creating an environment We offer an easy-to-use API for creating preset vectorized environments. For more info on what a vectorized environment is and its usage, please refer to the Gym library [documentation](https://www.gymlibrary.dev/content/vectorising/#vectorized-environments). ```python import isaacgym import isaacgymenvs import torch num_envs = 2000 envs = isaacgymenvs.make( seed=0, task="Ant", num_envs=num_envs, sim_device="cuda:0", rl_device="cuda:0", ) print("Observation space is", envs.observation_space) print("Action space is", envs.action_space) obs = envs.reset() for _ in range(20): random_actions = 2.0 * torch.rand((num_envs,) + envs.action_space.shape, device = 'cuda:0') - 1.0 envs.step(random_actions) ``` ### Running the benchmarks To train your first policy, run this line: ```bash python train.py task=Cartpole ``` Cartpole should train to the point that the pole stays upright within a few seconds of starting. Here's another example - Ant locomotion: ```bash python train.py task=Ant ``` Note that by default we show a preview window, which will usually slow down training. You can use the `v` key while running to disable viewer updates and allow training to proceed faster. Hit the `v` key again to resume viewing after a few seconds of training, once the ants have learned to run a bit better. Use the `esc` key or close the viewer window to stop training early. Alternatively, you can train headlessly, as follows: ```bash python train.py task=Ant headless=True ``` Ant may take a minute or two to train a policy you can run. When running headlessly, you can stop it early using Control-C in the command line window. ### Loading trained models // Checkpoints Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME` defaults to the task name, but can also be overridden via the `experiment` argument. To load a trained checkpoint and continue training, use the `checkpoint` argument: ```bash python train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth ``` To load a trained checkpoint and only perform inference (no training), pass `test=True` as an argument, along with the checkpoint name. To avoid rendering overhead, you may also want to run with fewer environments using `num_envs=64`: ```bash python train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64 ``` Note that If there are special characters such as `[` or `=` in the checkpoint names, you will need to escape them and put quotes around the string. For example, `checkpoint="./runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"` ### Configuration and command line arguments We use [Hydra](https://hydra.cc/docs/intro/) to manage the config. Note that this has some differences from previous incarnations in older versions of Isaac Gym. Key arguments to the `train.py` script are: * `task=TASK` - selects which task to use. Any of `AllegroHand`, `AllegroHandDextremeADR`, `AllegroHandDextremeManualDR`, `AllegroKukaLSTM`, `AllegroKukaTwoArmsLSTM`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `FrankaCabinet`, `Humanoid`, `Ingenuity` `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM`, and `Trifinger` (these correspond to the config for each environment in the folder `isaacgymenvs/config/task`) * `train=TRAIN` - selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`). * `num_envs=NUM_ENVS` - selects the number of environments to use (overriding the default number of environments set in the task config). * `seed=SEED` - sets a seed value for randomizations, and overrides the default seed set up in the task config * `sim_device=SIM_DEVICE_TYPE` - Device used for physics simulation. Set to `cuda:0` (default) to use GPU and to `cpu` for CPU. Follows PyTorch-like device syntax. * `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and also follows PyTorch-like device syntax. * `graphics_device_id=GRAPHICS_DEVICE_ID` - Which Vulkan graphics device ID to use for rendering. Defaults to 0. **Note** - this may be different from CUDA device ID, and does **not** follow PyTorch-like device syntax. * `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU and everything runs as fast as possible. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step. * `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training. * `checkpoint=CHECKPOINT_PATH` - Set to path to the checkpoint to load for training or testing. * `headless=HEADLESS` - Whether to run in headless mode. * `experiment=EXPERIMENT` - Sets the name of the experiment. * `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments. Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the discount rate for a rl_games training run, you can use `train.params.config.gamma=0.999`. Similarly, variables in task configs can also be set. For example, `task.env.enableDebugVis=True`. #### Hydra Notes Default values for each of these are found in the `isaacgymenvs/config/config.yaml` file. The way that the `task` and `train` portions of the config works are through the use of config groups. You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/) The actual configs for `task` are in `isaacgymenvs/config/task/<TASK>.yaml` and for train in `isaacgymenvs/config/train/<TASK>PPO.yaml`. In some places in the config you will find other variables referenced (for example, `num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy. This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation). ## Tasks Source code for tasks can be found in `isaacgymenvs/tasks`. Each task subclasses the `VecEnv` base class in `isaacgymenvs/base/vec_task.py`. Refer to [docs/framework.md](docs/framework.md) for how to create your own tasks. Full details on each of the tasks available can be found in the [RL examples documentation](docs/rl_examples.md). ## Domain Randomization IsaacGymEnvs includes a framework for Domain Randomization to improve Sim-to-Real transfer of trained RL policies. You can read more about it [here](docs/domain_randomization.md). ## Reproducibility and Determinism If deterministic training of RL policies is important for your work, you may wish to review our [Reproducibility and Determinism Documentation](docs/reproducibility.md). ## Multi-GPU Training You can run multi-GPU training using `torchrun` (i.e., `torch.distributed`) using this repository. Here is an example command for how to run in this way - `torchrun --standalone --nnodes=1 --nproc_per_node=2 train.py multi_gpu=True task=Ant <OTHER_ARGS>` Where the `--nproc_per_node=` flag specifies how many processes to run and note the `multi_gpu=True` flag must be set on the train script in order for multi-GPU training to run. ## Population Based Training You can run population based training to help find good hyperparameters or to train on very difficult environments which would otherwise be hard to learn anything on without it. See [the readme](docs/pbt.md) for details. ## WandB support You can run [WandB](https://wandb.ai/) with Isaac Gym Envs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` set. Make sure you have WandB installed with `pip install wandb` before activating. ## Capture videos We implement the standard `env.render(mode='rgb_rray')` `gym` API to provide an image of the simulator viewer. Additionally, we can leverage `gym.wrappers.RecordVideo` to help record videos that shows agent's gameplay. Consider running the following file which should produce a video in the `videos` folder. ```python import gym import isaacgym import isaacgymenvs import torch num_envs = 64 envs = isaacgymenvs.make( seed=0, task="Ant", num_envs=num_envs, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=False, multi_gpu=False, virtual_screen_capture=True, force_render=False, ) envs.is_vector_env = True envs = gym.wrappers.RecordVideo( envs, "./videos", step_trigger=lambda step: step % 10000 == 0, # record the videos every 10000 steps video_length=100 # for each video record up to 100 steps ) envs.reset() print("the image of Isaac Gym viewer is an array of shape", envs.render(mode="rgb_array").shape) for _ in range(100): actions = 2.0 * torch.rand((num_envs,) + envs.action_space.shape, device = 'cuda:0') - 1.0 envs.step(actions) ``` ## Capture videos during training You can automatically capture the videos of the agents gameplay by toggling the `capture_video=True` flag and tune the capture frequency `capture_video_freq=1500` and video length via `capture_video_len=100`. You can set `force_render=False` to disable rendering when the videos are not captured. ``` python train.py capture_video=True capture_video_freq=1500 capture_video_len=100 force_render=False ``` You can also automatically upload the videos to Weights and Biases: ``` python train.py task=Ant wandb_activate=True wandb_entity=nvidia wandb_project=rl_games capture_video=True force_render=False ``` ## Pre-commit We use [pre-commit](https://pre-commit.com/) to helps us automate short tasks that improve code quality. Before making a commit to the repository, please ensure `pre-commit run --all-files` runs without error. ## Troubleshooting Please review the Isaac Gym installation instructions first if you run into any issues. You can either submit issues through GitHub or through the [Isaac Gym forum here](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/isaac-gym/322). ## Citing Please cite this work as: ``` @misc{makoviychuk2021isaac, title={Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning}, author={Viktor Makoviychuk and Lukasz Wawrzyniak and Yunrong Guo and Michelle Lu and Kier Storey and Miles Macklin and David Hoeller and Nikita Rudin and Arthur Allshire and Ankur Handa and Gavriel State}, year={2021}, journal={arXiv preprint arXiv:2108.10470} } ``` **Note** if you use the DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training work or the code related to Population Based Training, please cite the following paper: ``` @inproceedings{ petrenko2023dexpbt, author = {Aleksei Petrenko, Arthur Allshire, Gavriel State, Ankur Handa, Viktor Makoviychuk}, title = {DexPBT: Scaling up Dexterous Manipulation for Hand-Arm Systems with Population Based Training}, booktitle = {RSS}, year = {2023} } ``` **Note** if you use the DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality work or the code related to Automatic Domain Randomisation, please cite the following paper: ``` @inproceedings{ handa2023dextreme, author = {Ankur Handa, Arthur Allshire, Viktor Makoviychuk, Aleksei Petrenko, Ritvik Singh, Jingzhou Liu, Denys Makoviichuk, Karl Van Wyk, Alexander Zhurkevich, Balakumar Sundaralingam, Yashraj Narang, Jean-Francois Lafleche, Dieter Fox, Gavriel State}, title = {DeXtreme: Transfer of Agile In-hand Manipulation from Simulation to Reality}, booktitle = {ICRA}, year = {2023} } ``` **Note** if you use the ANYmal rough terrain environment in your work, please ensure you cite the following work: ``` @misc{rudin2021learning, title={Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning}, author={Nikita Rudin and David Hoeller and Philipp Reist and Marco Hutter}, year={2021}, journal = {arXiv preprint arXiv:2109.11978} } ``` **Note** if you use the Trifinger environment in your work, please ensure you cite the following work: ``` @misc{isaacgym-trifinger, title = {{Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger}}, author = {Allshire, Arthur and Mittal, Mayank and Lodaya, Varun and Makoviychuk, Viktor and Makoviichuk, Denys and Widmaier, Felix and Wuthrich, Manuel and Bauer, Stefan and Handa, Ankur and Garg, Animesh}, year = {2021}, journal = {arXiv preprint arXiv:2108.09779} } ``` **Note** if you use the AMP: Adversarial Motion Priors environment in your work, please ensure you cite the following work: ``` @article{ 2021-TOG-AMP, author = {Peng, Xue Bin and Ma, Ze and Abbeel, Pieter and Levine, Sergey and Kanazawa, Angjoo}, title = {AMP: Adversarial Motion Priors for Stylized Physics-Based Character Control}, journal = {ACM Trans. Graph.}, issue_date = {August 2021}, volume = {40}, number = {4}, month = jul, year = {2021}, articleno = {1}, numpages = {15}, url = {http://doi.acm.org/10.1145/3450626.3459670}, doi = {10.1145/3450626.3459670}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {motion control, physics-based character animation, reinforcement learning}, } ``` **Note** if you use the Factory simulation methods (e.g., SDF collisions, contact reduction) or Factory learning tools (e.g., assets, environments, or controllers) in your work, please cite the following paper: ``` @inproceedings{ narang2022factory, author = {Yashraj Narang and Kier Storey and Iretiayo Akinola and Miles Macklin and Philipp Reist and Lukasz Wawrzyniak and Yunrong Guo and Adam Moravanszky and Gavriel State and Michelle Lu and Ankur Handa and Dieter Fox}, title = {Factory: Fast contact for robotic assembly}, booktitle = {Robotics: Science and Systems}, year = {2022} } ``` **Note** if you use the IndustReal training environments or algorithms in your work, please cite the following paper: ``` @inproceedings{ tang2023industreal, author = {Bingjie Tang and Michael A Lin and Iretiayo Akinola and Ankur Handa and Gaurav S Sukhatme and Fabio Ramos and Dieter Fox and Yashraj Narang}, title = {IndustReal: Transferring contact-rich assembly tasks from simulation to reality}, booktitle = {Robotics: Science and Systems}, year = {2023} } ```
15,616
Markdown
44.135838
455
0.75698
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/__init__.py
import hydra from hydra import compose, initialize from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf from isaacgymenvs.utils.reformat import omegaconf_to_dict OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg) def make( seed: int, task: str, num_envs: int, sim_device: str, rl_device: str, graphics_device_id: int = -1, headless: bool = False, multi_gpu: bool = False, virtual_screen_capture: bool = False, force_render: bool = True, cfg: DictConfig = None ): from isaacgymenvs.utils.rlgames_utils import get_rlgames_env_creator # create hydra config if no config passed in if cfg is None: # reset current hydra config if already parsed (but not passed in here) if HydraConfig.initialized(): task = HydraConfig.get().runtime.choices['task'] hydra.core.global_hydra.GlobalHydra.instance().clear() with initialize(config_path="./cfg"): cfg = compose(config_name="config", overrides=[f"task={task}"]) cfg_dict = omegaconf_to_dict(cfg.task) cfg_dict['env']['numEnvs'] = num_envs # reuse existing config else: cfg_dict = omegaconf_to_dict(cfg.task) create_rlgpu_env = get_rlgames_env_creator( seed=seed, task_config=cfg_dict, task_name=cfg_dict["name"], sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless, multi_gpu=multi_gpu, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) return create_rlgpu_env()
1,953
Python
33.892857
100
0.656938
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/train.py
# train.py # Script to train policies in Isaac Gym # # Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import hydra from omegaconf import DictConfig, OmegaConf from omegaconf import DictConfig, OmegaConf def preprocess_train_config(cfg, config_dict): """ Adding common configuration parameters to the rl_games train config. An alternative to this is inferring them in task-specific .yaml files, but that requires repeating the same variable interpolations in each config. """ train_cfg = config_dict['params']['config'] train_cfg['device'] = cfg.rl_device train_cfg['population_based_training'] = cfg.pbt.enabled train_cfg['pbt_idx'] = cfg.pbt.policy_idx if cfg.pbt.enabled else None train_cfg['full_experiment_name'] = cfg.get('full_experiment_name') print(f'Using rl_device: {cfg.rl_device}') print(f'Using sim_device: {cfg.sim_device}') print(train_cfg) try: model_size_multiplier = config_dict['params']['network']['mlp']['model_size_multiplier'] if model_size_multiplier != 1: units = config_dict['params']['network']['mlp']['units'] for i, u in enumerate(units): units[i] = u * model_size_multiplier print(f'Modified MLP units by x{model_size_multiplier} to {config_dict["params"]["network"]["mlp"]["units"]}') except KeyError: pass return config_dict @hydra.main(version_base="1.1", config_name="config", config_path="./cfg") def launch_rlg_hydra(cfg: DictConfig): import logging import os from datetime import datetime # noinspection PyUnresolvedReferences import isaacgym from isaacgymenvs.pbt.pbt import PbtAlgoObserver, initial_pbt_check from isaacgymenvs.utils.rlgames_utils import multi_gpu_get_rank from hydra.utils import to_absolute_path from isaacgymenvs.tasks import isaacgym_task_map import gym from isaacgymenvs.utils.reformat import omegaconf_to_dict, print_dict from isaacgymenvs.utils.utils import set_np_formatting, set_seed if cfg.pbt.enabled: initial_pbt_check(cfg) from isaacgymenvs.utils.rlgames_utils import RLGPUEnv, RLGPUAlgoObserver, MultiObserver, ComplexObsRLGPUEnv from isaacgymenvs.utils.wandb_utils import WandbAlgoObserver from rl_games.common import env_configurations, vecenv from rl_games.torch_runner import Runner from rl_games.algos_torch import model_builder from isaacgymenvs.learning import amp_continuous from isaacgymenvs.learning import amp_players from isaacgymenvs.learning import amp_models from isaacgymenvs.learning import amp_network_builder import isaacgymenvs time_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") run_name = f"{cfg.wandb_name}_{time_str}" # ensure checkpoints can be specified as relative paths if cfg.checkpoint: cfg.checkpoint = to_absolute_path(cfg.checkpoint) cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) # set numpy formatting for printing only set_np_formatting() # global rank of the GPU global_rank = int(os.getenv("RANK", "0")) # sets seed. if seed is -1 will pick a random one cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic, rank=global_rank) def create_isaacgym_env(**kwargs): envs = isaacgymenvs.make( cfg.seed, cfg.task_name, cfg.task.env.numEnvs, cfg.sim_device, cfg.rl_device, cfg.graphics_device_id, cfg.headless, cfg.multi_gpu, cfg.capture_video, cfg.force_render, cfg, **kwargs, ) if cfg.capture_video: envs.is_vector_env = True envs = gym.wrappers.RecordVideo( envs, f"videos/{run_name}", step_trigger=lambda step: step % cfg.capture_video_freq == 0, video_length=cfg.capture_video_len, ) return envs env_configurations.register('rlgpu', { 'vecenv_type': 'RLGPU', 'env_creator': lambda **kwargs: create_isaacgym_env(**kwargs), }) ige_env_cls = isaacgym_task_map[cfg.task_name] dict_cls = ige_env_cls.dict_obs_cls if hasattr(ige_env_cls, 'dict_obs_cls') and ige_env_cls.dict_obs_cls else False if dict_cls: obs_spec = {} actor_net_cfg = cfg.train.params.network obs_spec['obs'] = {'names': list(actor_net_cfg.inputs.keys()), 'concat': not actor_net_cfg.name == "complex_net", 'space_name': 'observation_space'} if "central_value_config" in cfg.train.params.config: critic_net_cfg = cfg.train.params.config.central_value_config.network obs_spec['states'] = {'names': list(critic_net_cfg.inputs.keys()), 'concat': not critic_net_cfg.name == "complex_net", 'space_name': 'state_space'} vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: ComplexObsRLGPUEnv(config_name, num_actors, obs_spec, **kwargs)) else: vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs)) rlg_config_dict = omegaconf_to_dict(cfg.train) rlg_config_dict = preprocess_train_config(cfg, rlg_config_dict) observers = [RLGPUAlgoObserver()] if cfg.pbt.enabled: pbt_observer = PbtAlgoObserver(cfg) observers.append(pbt_observer) if cfg.wandb_activate: cfg.seed += global_rank if global_rank == 0: # initialize wandb only once per multi-gpu run wandb_observer = WandbAlgoObserver(cfg) observers.append(wandb_observer) # register new AMP network builder and agent def build_runner(algo_observer): runner = Runner(algo_observer) runner.algo_factory.register_builder('amp_continuous', lambda **kwargs : amp_continuous.AMPAgent(**kwargs)) runner.player_factory.register_builder('amp_continuous', lambda **kwargs : amp_players.AMPPlayerContinuous(**kwargs)) model_builder.register_model('continuous_amp', lambda network, **kwargs : amp_models.ModelAMPContinuous(network)) model_builder.register_network('amp', lambda **kwargs : amp_network_builder.AMPBuilder()) return runner # convert CLI arguments into dictionary # create runner and set the settings runner = build_runner(MultiObserver(observers)) runner.load(rlg_config_dict) runner.reset() # dump config dict if not cfg.test: experiment_dir = os.path.join('runs', cfg.train.params.config.name + '_{date:%d-%H-%M-%S}'.format(date=datetime.now())) os.makedirs(experiment_dir, exist_ok=True) with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f: f.write(OmegaConf.to_yaml(cfg)) runner.run({ 'train': not cfg.test, 'play': cfg.test, 'checkpoint': cfg.checkpoint, 'sigma': cfg.sigma if cfg.sigma != '' else None }) if __name__ == "__main__": launch_rlg_hydra()
8,604
Python
38.113636
159
0.675035
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_models.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch.nn as nn from rl_games.algos_torch.models import ModelA2CContinuousLogStd class ModelAMPContinuous(ModelA2CContinuousLogStd): def __init__(self, network): super().__init__(network) return def build(self, config): net = self.network_builder.build('amp', **config) for name, _ in net.named_parameters(): print(name) obs_shape = config['input_shape'] normalize_value = config.get('normalize_value', False) normalize_input = config.get('normalize_input', False) value_size = config.get('value_size', 1) return self.Network(net, obs_shape=obs_shape, normalize_value=normalize_value, normalize_input=normalize_input, value_size=value_size) class Network(ModelA2CContinuousLogStd.Network): def __init__(self, a2c_network, **kwargs): super().__init__(a2c_network, **kwargs) return def forward(self, input_dict): is_train = input_dict.get('is_train', True) result = super().forward(input_dict) if (is_train): amp_obs = input_dict['amp_obs'] disc_agent_logit = self.a2c_network.eval_disc(amp_obs) result["disc_agent_logit"] = disc_agent_logit amp_obs_replay = input_dict['amp_obs_replay'] disc_agent_replay_logit = self.a2c_network.eval_disc(amp_obs_replay) result["disc_agent_replay_logit"] = disc_agent_replay_logit amp_demo_obs = input_dict['amp_obs_demo'] disc_demo_logit = self.a2c_network.eval_disc(amp_demo_obs) result["disc_demo_logit"] = disc_demo_logit return result
3,290
Python
43.472972
100
0.685714
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/hrl_models.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch.nn as nn from rl_games.algos_torch.models import ModelA2CContinuousLogStd class ModelHRLContinuous(ModelA2CContinuousLogStd): def __init__(self, network): super().__init__(network) return def build(self, config): net = self.network_builder.build('amp', **config) for name, _ in net.named_parameters(): print(name) return ModelHRLContinuous.Network(net) class Network(ModelA2CContinuousLogStd.Network): def __init__(self, a2c_network): super().__init__(a2c_network) return
2,142
Python
45.586956
80
0.744631
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_datasets.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.common import datasets class AMPDataset(datasets.PPODataset): def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len): super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len) self._idx_buf = torch.randperm(batch_size) return def update_mu_sigma(self, mu, sigma): raise NotImplementedError() return def _get_item(self, idx): start = idx * self.minibatch_size end = (idx + 1) * self.minibatch_size sample_idx = self._idx_buf[start:end] input_dict = {} for k,v in self.values_dict.items(): if k not in self.special_names and v is not None: input_dict[k] = v[sample_idx] if (end >= self.batch_size): self._shuffle_idx_buf() return input_dict def _shuffle_idx_buf(self): self._idx_buf[:] = torch.randperm(self.batch_size) return
2,564
Python
41.749999
90
0.704758
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/replay_buffer.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch class ReplayBuffer(): def __init__(self, buffer_size, device): self._head = 0 self._total_count = 0 self._buffer_size = buffer_size self._device = device self._data_buf = None self._sample_idx = torch.randperm(buffer_size) self._sample_head = 0 return def reset(self): self._head = 0 self._total_count = 0 self._reset_sample_idx() return def get_buffer_size(self): return self._buffer_size def get_total_count(self): return self._total_count def store(self, data_dict): if (self._data_buf is None): self._init_data_buf(data_dict) n = next(iter(data_dict.values())).shape[0] buffer_size = self.get_buffer_size() assert(n < buffer_size) for key, curr_buf in self._data_buf.items(): curr_n = data_dict[key].shape[0] assert(n == curr_n) store_n = min(curr_n, buffer_size - self._head) curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n] remainder = n - store_n if (remainder > 0): curr_buf[0:remainder] = data_dict[key][store_n:] self._head = (self._head + n) % buffer_size self._total_count += n return def sample(self, n): total_count = self.get_total_count() buffer_size = self.get_buffer_size() idx = torch.arange(self._sample_head, self._sample_head + n) idx = idx % buffer_size rand_idx = self._sample_idx[idx] if (total_count < buffer_size): rand_idx = rand_idx % self._head samples = dict() for k, v in self._data_buf.items(): samples[k] = v[rand_idx] self._sample_head += n if (self._sample_head >= buffer_size): self._reset_sample_idx() return samples def _reset_sample_idx(self): buffer_size = self.get_buffer_size() self._sample_idx[:] = torch.randperm(buffer_size) self._sample_head = 0 return def _init_data_buf(self, data_dict): buffer_size = self.get_buffer_size() self._data_buf = dict() for k, v in data_dict.items(): v_shape = v.shape[1:] self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device) return
3,986
Python
33.973684
90
0.632965
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_network_builder.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from rl_games.algos_torch import torch_ext from rl_games.algos_torch import layers from rl_games.algos_torch import network_builder import torch import torch.nn as nn import numpy as np DISC_LOGIT_INIT_SCALE = 1.0 class AMPBuilder(network_builder.A2CBuilder): def __init__(self, **kwargs): super().__init__(**kwargs) return class Network(network_builder.A2CBuilder.Network): def __init__(self, params, **kwargs): super().__init__(params, **kwargs) if self.is_continuous: if (not self.space_config['learn_sigma']): actions_num = kwargs.get('actions_num') sigma_init = self.init_factory.create(**self.space_config['sigma_init']) self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=False, dtype=torch.float32), requires_grad=False) sigma_init(self.sigma) amp_input_shape = kwargs.get('amp_input_shape') self._build_disc(amp_input_shape) return def load(self, params): super().load(params) self._disc_units = params['disc']['units'] self._disc_activation = params['disc']['activation'] self._disc_initializer = params['disc']['initializer'] return def eval_critic(self, obs): c_out = self.critic_cnn(obs) c_out = c_out.contiguous().view(c_out.size(0), -1) c_out = self.critic_mlp(c_out) value = self.value_act(self.value(c_out)) return value def eval_disc(self, amp_obs): disc_mlp_out = self._disc_mlp(amp_obs) disc_logits = self._disc_logits(disc_mlp_out) return disc_logits def get_disc_logit_weights(self): return torch.flatten(self._disc_logits.weight) def get_disc_weights(self): weights = [] for m in self._disc_mlp.modules(): if isinstance(m, nn.Linear): weights.append(torch.flatten(m.weight)) weights.append(torch.flatten(self._disc_logits.weight)) return weights def _build_disc(self, input_shape): self._disc_mlp = nn.Sequential() mlp_args = { 'input_size' : input_shape[0], 'units' : self._disc_units, 'activation' : self._disc_activation, 'dense_func' : torch.nn.Linear } self._disc_mlp = self._build_mlp(**mlp_args) mlp_out_size = self._disc_units[-1] self._disc_logits = torch.nn.Linear(mlp_out_size, 1) mlp_init = self.init_factory.create(**self._disc_initializer) for m in self._disc_mlp.modules(): if isinstance(m, nn.Linear): mlp_init(m.weight) if getattr(m, "bias", None) is not None: torch.nn.init.zeros_(m.bias) torch.nn.init.uniform_(self._disc_logits.weight, -DISC_LOGIT_INIT_SCALE, DISC_LOGIT_INIT_SCALE) torch.nn.init.zeros_(self._disc_logits.bias) return def build(self, name, **kwargs): net = AMPBuilder.Network(self.params, **kwargs) return net
4,898
Python
39.487603
134
0.620457
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/hrl_continuous.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from datetime import datetime from gym import spaces import numpy as np import os import time import yaml from rl_games.algos_torch import torch_ext from rl_games.algos_torch import central_value from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common import a2c_common from rl_games.common import datasets from rl_games.common import schedulers from rl_games.common import vecenv import torch from torch import optim import isaacgymenvs.learning.common_agent as common_agent import isaacgymenvs.learning.gen_amp as gen_amp import isaacgymenvs.learning.gen_amp_models as gen_amp_models import isaacgymenvs.learning.gen_amp_network_builder as gen_amp_network_builder from tensorboardX import SummaryWriter class HRLAgent(common_agent.CommonAgent): def __init__(self, base_name, config): with open(os.path.join(os.getcwd(), config['llc_config']), 'r') as f: llc_config = yaml.load(f, Loader=yaml.SafeLoader) llc_config_params = llc_config['params'] self._latent_dim = llc_config_params['config']['latent_dim'] super().__init__(base_name, config) self._task_size = self.vec_env.env.get_task_obs_size() self._llc_steps = config['llc_steps'] llc_checkpoint = config['llc_checkpoint'] assert(llc_checkpoint != "") self._build_llc(llc_config_params, llc_checkpoint) return def env_step(self, actions): actions = self.preprocess_actions(actions) obs = self.obs['obs'] rewards = 0.0 done_count = 0.0 for t in range(self._llc_steps): llc_actions = self._compute_llc_action(obs, actions) obs, curr_rewards, curr_dones, infos = self.vec_env.step(llc_actions) rewards += curr_rewards done_count += curr_dones rewards /= self._llc_steps dones = torch.zeros_like(done_count) dones[done_count > 0] = 1.0 if self.is_tensor_obses: if self.value_size == 1: rewards = rewards.unsqueeze(1) return self.obs_to_tensors(obs), rewards.to(self.ppo_device), dones.to(self.ppo_device), infos else: if self.value_size == 1: rewards = np.expand_dims(rewards, axis=1) return self.obs_to_tensors(obs), torch.from_numpy(rewards).to(self.ppo_device).float(), torch.from_numpy(dones).to(self.ppo_device), infos def cast_obs(self, obs): obs = super().cast_obs(obs) self._llc_agent.is_tensor_obses = self.is_tensor_obses return obs def preprocess_actions(self, actions): clamped_actions = torch.clamp(actions, -1.0, 1.0) if not self.is_tensor_obses: clamped_actions = clamped_actions.cpu().numpy() return clamped_actions def _setup_action_space(self): super()._setup_action_space() self.actions_num = self._latent_dim return def _build_llc(self, config_params, checkpoint_file): network_params = config_params['network'] network_builder = gen_amp_network_builder.GenAMPBuilder() network_builder.load(network_params) network = gen_amp_models.ModelGenAMPContinuous(network_builder) llc_agent_config = self._build_llc_agent_config(config_params, network) self._llc_agent = gen_amp.GenAMPAgent('llc', llc_agent_config) self._llc_agent.restore(checkpoint_file) print("Loaded LLC checkpoint from {:s}".format(checkpoint_file)) self._llc_agent.set_eval() return def _build_llc_agent_config(self, config_params, network): llc_env_info = copy.deepcopy(self.env_info) obs_space = llc_env_info['observation_space'] obs_size = obs_space.shape[0] obs_size -= self._task_size llc_env_info['observation_space'] = spaces.Box(obs_space.low[:obs_size], obs_space.high[:obs_size]) config = config_params['config'] config['network'] = network config['num_actors'] = self.num_actors config['features'] = {'observer' : self.algo_observer} config['env_info'] = llc_env_info return config def _compute_llc_action(self, obs, actions): llc_obs = self._extract_llc_obs(obs) processed_obs = self._llc_agent._preproc_obs(llc_obs) z = torch.nn.functional.normalize(actions, dim=-1) mu, _ = self._llc_agent.model.a2c_network.eval_actor(obs=processed_obs, amp_latents=z) llc_action = mu llc_action = self._llc_agent.preprocess_actions(llc_action) return llc_action def _extract_llc_obs(self, obs): obs_size = obs.shape[-1] llc_obs = obs[..., :obs_size - self._task_size] return llc_obs
6,339
Python
38.625
150
0.675974
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_continuous.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.algos_torch import torch_ext from rl_games.common import a2c_common from rl_games.common import schedulers from rl_games.common import vecenv from isaacgymenvs.utils.torch_jit_utils import to_torch import time from datetime import datetime import numpy as np from torch import optim import torch from torch import nn import isaacgymenvs.learning.replay_buffer as replay_buffer import isaacgymenvs.learning.common_agent as common_agent from tensorboardX import SummaryWriter class AMPAgent(common_agent.CommonAgent): def __init__(self, base_name, params): super().__init__(base_name, params) if self.normalize_value: self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std if self._normalize_amp_input: self._amp_input_mean_std = RunningMeanStd(self._amp_observation_space.shape).to(self.ppo_device) return def init_tensors(self): super().init_tensors() self._build_amp_buffers() return def set_eval(self): super().set_eval() if self._normalize_amp_input: self._amp_input_mean_std.eval() return def set_train(self): super().set_train() if self._normalize_amp_input: self._amp_input_mean_std.train() return def get_stats_weights(self): state = super().get_stats_weights() if self._normalize_amp_input: state['amp_input_mean_std'] = self._amp_input_mean_std.state_dict() return state def set_stats_weights(self, weights): super().set_stats_weights(weights) if self._normalize_amp_input: self._amp_input_mean_std.load_state_dict(weights['amp_input_mean_std']) return def play_steps(self): self.set_eval() epinfos = [] update_list = self.update_list for n in range(self.horizon_length): self.obs, done_env_ids = self._env_reset_done() self.experience_buffer.update_data('obses', n, self.obs['obs']) if self.use_action_masks: masks = self.vec_env.get_action_masks() res_dict = self.get_masked_action_values(self.obs, masks) else: res_dict = self.get_action_values(self.obs) for k in update_list: self.experience_buffer.update_data(k, n, res_dict[k]) if self.has_central_value: self.experience_buffer.update_data('states', n, self.obs['states']) self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions']) shaped_rewards = self.rewards_shaper(rewards) self.experience_buffer.update_data('rewards', n, shaped_rewards) self.experience_buffer.update_data('next_obses', n, self.obs['obs']) self.experience_buffer.update_data('dones', n, self.dones) self.experience_buffer.update_data('amp_obs', n, infos['amp_obs']) terminated = infos['terminate'].float() terminated = terminated.unsqueeze(-1) next_vals = self._eval_critic(self.obs) next_vals *= (1.0 - terminated) self.experience_buffer.update_data('next_values', n, next_vals) self.current_rewards += rewards self.current_lengths += 1 all_done_indices = self.dones.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] self.game_rewards.update(self.current_rewards[done_indices]) self.game_lengths.update(self.current_lengths[done_indices]) self.algo_observer.process_infos(infos, done_indices) not_dones = 1.0 - self.dones.float() self.current_rewards = self.current_rewards * not_dones.unsqueeze(1) self.current_lengths = self.current_lengths * not_dones if (self.vec_env.env.viewer and (n == (self.horizon_length - 1))): self._amp_debug(infos) mb_fdones = self.experience_buffer.tensor_dict['dones'].float() mb_values = self.experience_buffer.tensor_dict['values'] mb_next_values = self.experience_buffer.tensor_dict['next_values'] mb_rewards = self.experience_buffer.tensor_dict['rewards'] mb_amp_obs = self.experience_buffer.tensor_dict['amp_obs'] amp_rewards = self._calc_amp_rewards(mb_amp_obs) mb_rewards = self._combine_rewards(mb_rewards, amp_rewards) mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values) mb_returns = mb_advs + mb_values batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list) batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns) batch_dict['played_frames'] = self.batch_size for k, v in amp_rewards.items(): batch_dict[k] = a2c_common.swap_and_flatten01(v) return batch_dict def prepare_dataset(self, batch_dict): super().prepare_dataset(batch_dict) self.dataset.values_dict['amp_obs'] = batch_dict['amp_obs'] self.dataset.values_dict['amp_obs_demo'] = batch_dict['amp_obs_demo'] self.dataset.values_dict['amp_obs_replay'] = batch_dict['amp_obs_replay'] return def train_epoch(self): play_time_start = time.time() with torch.no_grad(): if self.is_rnn: batch_dict = self.play_steps_rnn() else: batch_dict = self.play_steps() play_time_end = time.time() update_time_start = time.time() rnn_masks = batch_dict.get('rnn_masks', None) self._update_amp_demos() num_obs_samples = batch_dict['amp_obs'].shape[0] amp_obs_demo = self._amp_obs_demo_buffer.sample(num_obs_samples)['amp_obs'] batch_dict['amp_obs_demo'] = amp_obs_demo if (self._amp_replay_buffer.get_total_count() == 0): batch_dict['amp_obs_replay'] = batch_dict['amp_obs'] else: batch_dict['amp_obs_replay'] = self._amp_replay_buffer.sample(num_obs_samples)['amp_obs'] self.set_train() self.curr_frames = batch_dict.pop('played_frames') self.prepare_dataset(batch_dict) self.algo_observer.after_steps() if self.has_central_value: self.train_central_value() train_info = None if self.is_rnn: frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement()) print(frames_mask_ratio) for _ in range(0, self.mini_epochs_num): ep_kls = [] for i in range(len(self.dataset)): curr_train_info = self.train_actor_critic(self.dataset[i]) if self.schedule_type == 'legacy': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item()) self.update_lr(self.last_lr) if (train_info is None): train_info = dict() for k, v in curr_train_info.items(): train_info[k] = [v] else: for k, v in curr_train_info.items(): train_info[k].append(v) av_kls = torch_ext.mean_list(train_info['kl']) if self.schedule_type == 'standard': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) if self.schedule_type == 'standard_epoch': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) update_time_end = time.time() play_time = play_time_end - play_time_start update_time = update_time_end - update_time_start total_time = update_time_end - play_time_start self._store_replay_amp_obs(batch_dict['amp_obs']) train_info['play_time'] = play_time train_info['update_time'] = update_time train_info['total_time'] = total_time self._record_train_batch_info(batch_dict, train_info) return train_info def calc_gradients(self, input_dict): self.set_train() value_preds_batch = input_dict['old_values'] old_action_log_probs_batch = input_dict['old_logp_actions'] advantage = input_dict['advantages'] old_mu_batch = input_dict['mu'] old_sigma_batch = input_dict['sigma'] return_batch = input_dict['returns'] actions_batch = input_dict['actions'] obs_batch = input_dict['obs'] obs_batch = self._preproc_obs(obs_batch) amp_obs = input_dict['amp_obs'][0:self._amp_minibatch_size] amp_obs = self._preproc_amp_obs(amp_obs) amp_obs_replay = input_dict['amp_obs_replay'][0:self._amp_minibatch_size] amp_obs_replay = self._preproc_amp_obs(amp_obs_replay) amp_obs_demo = input_dict['amp_obs_demo'][0:self._amp_minibatch_size] amp_obs_demo = self._preproc_amp_obs(amp_obs_demo) amp_obs_demo.requires_grad_(True) lr = self.last_lr kl = 1.0 lr_mul = 1.0 curr_e_clip = lr_mul * self.e_clip batch_dict = { 'is_train': True, 'prev_actions': actions_batch, 'obs' : obs_batch, 'amp_obs' : amp_obs, 'amp_obs_replay' : amp_obs_replay, 'amp_obs_demo' : amp_obs_demo } rnn_masks = None if self.is_rnn: rnn_masks = input_dict['rnn_masks'] batch_dict['rnn_states'] = input_dict['rnn_states'] batch_dict['seq_length'] = self.seq_len with torch.cuda.amp.autocast(enabled=self.mixed_precision): res_dict = self.model(batch_dict) action_log_probs = res_dict['prev_neglogp'] values = res_dict['values'] entropy = res_dict['entropy'] mu = res_dict['mus'] sigma = res_dict['sigmas'] disc_agent_logit = res_dict['disc_agent_logit'] disc_agent_replay_logit = res_dict['disc_agent_replay_logit'] disc_demo_logit = res_dict['disc_demo_logit'] a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip) a_loss = a_info['actor_loss'] c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value) c_loss = c_info['critic_loss'] b_loss = self.bound_loss(mu) losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks) a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3] disc_agent_cat_logit = torch.cat([disc_agent_logit, disc_agent_replay_logit], dim=0) disc_info = self._disc_loss(disc_agent_cat_logit, disc_demo_logit, amp_obs_demo) disc_loss = disc_info['disc_loss'] loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss \ + self._disc_coef * disc_loss if self.multi_gpu: self.optimizer.zero_grad() else: for param in self.model.parameters(): param.grad = None self.scaler.scale(loss).backward() #TODO: Refactor this ugliest code of the year if self.truncate_grads: if self.multi_gpu: self.optimizer.synchronize() self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) with self.optimizer.skip_synchronize(): self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.step(self.optimizer) self.scaler.update() with torch.no_grad(): reduce_kl = not self.is_rnn kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl) if self.is_rnn: kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask self.train_result = { 'entropy': entropy, 'kl': kl_dist, 'last_lr': self.last_lr, 'lr_mul': lr_mul, 'b_loss': b_loss } self.train_result.update(a_info) self.train_result.update(c_info) self.train_result.update(disc_info) return def _load_config_params(self, config): super()._load_config_params(config) self._task_reward_w = config['task_reward_w'] self._disc_reward_w = config['disc_reward_w'] self._amp_observation_space = self.env_info['amp_observation_space'] self._amp_batch_size = int(config['amp_batch_size']) self._amp_minibatch_size = int(config['amp_minibatch_size']) assert(self._amp_minibatch_size <= self.minibatch_size) self._disc_coef = config['disc_coef'] self._disc_logit_reg = config['disc_logit_reg'] self._disc_grad_penalty = config['disc_grad_penalty'] self._disc_weight_decay = config['disc_weight_decay'] self._disc_reward_scale = config['disc_reward_scale'] self._normalize_amp_input = config.get('normalize_amp_input', True) return def _build_net_config(self): config = super()._build_net_config() config['amp_input_shape'] = self._amp_observation_space.shape return config def _init_train(self): super()._init_train() self._init_amp_demo_buf() return def _disc_loss(self, disc_agent_logit, disc_demo_logit, obs_demo): # prediction loss disc_loss_agent = self._disc_loss_neg(disc_agent_logit) disc_loss_demo = self._disc_loss_pos(disc_demo_logit) disc_loss = 0.5 * (disc_loss_agent + disc_loss_demo) # logit reg logit_weights = self.model.a2c_network.get_disc_logit_weights() disc_logit_loss = torch.sum(torch.square(logit_weights)) disc_loss += self._disc_logit_reg * disc_logit_loss # grad penalty disc_demo_grad = torch.autograd.grad(disc_demo_logit, obs_demo, grad_outputs=torch.ones_like(disc_demo_logit), create_graph=True, retain_graph=True, only_inputs=True) disc_demo_grad = disc_demo_grad[0] disc_demo_grad = torch.sum(torch.square(disc_demo_grad), dim=-1) disc_grad_penalty = torch.mean(disc_demo_grad) disc_loss += self._disc_grad_penalty * disc_grad_penalty # weight decay if (self._disc_weight_decay != 0): disc_weights = self.model.a2c_network.get_disc_weights() disc_weights = torch.cat(disc_weights, dim=-1) disc_weight_decay = torch.sum(torch.square(disc_weights)) disc_loss += self._disc_weight_decay * disc_weight_decay disc_agent_acc, disc_demo_acc = self._compute_disc_acc(disc_agent_logit, disc_demo_logit) disc_info = { 'disc_loss': disc_loss, 'disc_grad_penalty': disc_grad_penalty, 'disc_logit_loss': disc_logit_loss, 'disc_agent_acc': disc_agent_acc, 'disc_demo_acc': disc_demo_acc, 'disc_agent_logit': disc_agent_logit, 'disc_demo_logit': disc_demo_logit } return disc_info def _disc_loss_neg(self, disc_logits): bce = torch.nn.BCEWithLogitsLoss() loss = bce(disc_logits, torch.zeros_like(disc_logits)) return loss def _disc_loss_pos(self, disc_logits): bce = torch.nn.BCEWithLogitsLoss() loss = bce(disc_logits, torch.ones_like(disc_logits)) return loss def _compute_disc_acc(self, disc_agent_logit, disc_demo_logit): agent_acc = disc_agent_logit < 0 agent_acc = torch.mean(agent_acc.float()) demo_acc = disc_demo_logit > 0 demo_acc = torch.mean(demo_acc.float()) return agent_acc, demo_acc def _fetch_amp_obs_demo(self, num_samples): amp_obs_demo = self.vec_env.env.fetch_amp_obs_demo(num_samples) return amp_obs_demo def _build_amp_buffers(self): batch_shape = self.experience_buffer.obs_base_shape self.experience_buffer.tensor_dict['amp_obs'] = torch.zeros(batch_shape + self._amp_observation_space.shape, device=self.ppo_device) amp_obs_demo_buffer_size = int(self.config['amp_obs_demo_buffer_size']) self._amp_obs_demo_buffer = replay_buffer.ReplayBuffer(amp_obs_demo_buffer_size, self.ppo_device) self._amp_replay_keep_prob = self.config['amp_replay_keep_prob'] replay_buffer_size = int(self.config['amp_replay_buffer_size']) self._amp_replay_buffer = replay_buffer.ReplayBuffer(replay_buffer_size, self.ppo_device) self.tensor_list += ['amp_obs'] return def _init_amp_demo_buf(self): buffer_size = self._amp_obs_demo_buffer.get_buffer_size() num_batches = int(np.ceil(buffer_size / self._amp_batch_size)) for i in range(num_batches): curr_samples = self._fetch_amp_obs_demo(self._amp_batch_size) self._amp_obs_demo_buffer.store({'amp_obs': curr_samples}) return def _update_amp_demos(self): new_amp_obs_demo = self._fetch_amp_obs_demo(self._amp_batch_size) self._amp_obs_demo_buffer.store({'amp_obs': new_amp_obs_demo}) return def _preproc_amp_obs(self, amp_obs): if self._normalize_amp_input: amp_obs = self._amp_input_mean_std(amp_obs) return amp_obs def _combine_rewards(self, task_rewards, amp_rewards): disc_r = amp_rewards['disc_rewards'] combined_rewards = self._task_reward_w * task_rewards + \ + self._disc_reward_w * disc_r return combined_rewards def _eval_disc(self, amp_obs): proc_amp_obs = self._preproc_amp_obs(amp_obs) return self.model.a2c_network.eval_disc(proc_amp_obs) def _calc_amp_rewards(self, amp_obs): disc_r = self._calc_disc_rewards(amp_obs) output = { 'disc_rewards': disc_r } return output def _calc_disc_rewards(self, amp_obs): with torch.no_grad(): disc_logits = self._eval_disc(amp_obs) prob = 1 / (1 + torch.exp(-disc_logits)) disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.ppo_device))) disc_r *= self._disc_reward_scale return disc_r def _store_replay_amp_obs(self, amp_obs): buf_size = self._amp_replay_buffer.get_buffer_size() buf_total_count = self._amp_replay_buffer.get_total_count() if (buf_total_count > buf_size): keep_probs = to_torch(np.array([self._amp_replay_keep_prob] * amp_obs.shape[0]), device=self.ppo_device) keep_mask = torch.bernoulli(keep_probs) == 1.0 amp_obs = amp_obs[keep_mask] self._amp_replay_buffer.store({'amp_obs': amp_obs}) return def _record_train_batch_info(self, batch_dict, train_info): train_info['disc_rewards'] = batch_dict['disc_rewards'] return def _log_train_info(self, train_info, frame): super()._log_train_info(train_info, frame) self.writer.add_scalar('losses/disc_loss', torch_ext.mean_list(train_info['disc_loss']).item(), frame) self.writer.add_scalar('info/disc_agent_acc', torch_ext.mean_list(train_info['disc_agent_acc']).item(), frame) self.writer.add_scalar('info/disc_demo_acc', torch_ext.mean_list(train_info['disc_demo_acc']).item(), frame) self.writer.add_scalar('info/disc_agent_logit', torch_ext.mean_list(train_info['disc_agent_logit']).item(), frame) self.writer.add_scalar('info/disc_demo_logit', torch_ext.mean_list(train_info['disc_demo_logit']).item(), frame) self.writer.add_scalar('info/disc_grad_penalty', torch_ext.mean_list(train_info['disc_grad_penalty']).item(), frame) self.writer.add_scalar('info/disc_logit_loss', torch_ext.mean_list(train_info['disc_logit_loss']).item(), frame) disc_reward_std, disc_reward_mean = torch.std_mean(train_info['disc_rewards']) self.writer.add_scalar('info/disc_reward_mean', disc_reward_mean.item(), frame) self.writer.add_scalar('info/disc_reward_std', disc_reward_std.item(), frame) return def _amp_debug(self, info): with torch.no_grad(): amp_obs = info['amp_obs'] amp_obs = amp_obs[0:1] disc_pred = self._eval_disc(amp_obs) amp_rewards = self._calc_amp_rewards(amp_obs) disc_reward = amp_rewards['disc_rewards'] disc_pred = disc_pred.detach().cpu().numpy()[0, 0] disc_reward = disc_reward.cpu().numpy()[0, 0] print("disc_pred: ", disc_pred, disc_reward) return
23,314
Python
40.933453
157
0.6035
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/amp_players.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.algos_torch import torch_ext from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common.player import BasePlayer import isaacgymenvs.learning.common_player as common_player class AMPPlayerContinuous(common_player.CommonPlayer): def __init__(self, params): config = params['config'] self._normalize_amp_input = config.get('normalize_amp_input', True) self._disc_reward_scale = config['disc_reward_scale'] self._print_disc_prediction = config.get('print_disc_prediction', False) super().__init__(params) return def restore(self, fn): super().restore(fn) if self._normalize_amp_input: checkpoint = torch_ext.load_checkpoint(fn) self._amp_input_mean_std.load_state_dict(checkpoint['amp_input_mean_std']) return def _build_net(self, config): super()._build_net(config) if self._normalize_amp_input: self._amp_input_mean_std = RunningMeanStd(config['amp_input_shape']).to(self.device) self._amp_input_mean_std.eval() return def _post_step(self, info): super()._post_step(info) if self._print_disc_prediction: self._amp_debug(info) return def _build_net_config(self): config = super()._build_net_config() if (hasattr(self, 'env')): config['amp_input_shape'] = self.env.amp_observation_space.shape else: config['amp_input_shape'] = self.env_info['amp_observation_space'] return config def _amp_debug(self, info): with torch.no_grad(): amp_obs = info['amp_obs'] amp_obs = amp_obs[0:1] disc_pred = self._eval_disc(amp_obs.to(self.device)) amp_rewards = self._calc_amp_rewards(amp_obs.to(self.device)) disc_reward = amp_rewards['disc_rewards'] disc_pred = disc_pred.detach().cpu().numpy()[0, 0] disc_reward = disc_reward.cpu().numpy()[0, 0] print("disc_pred: ", disc_pred, disc_reward) return def _preproc_amp_obs(self, amp_obs): if self._normalize_amp_input: amp_obs = self._amp_input_mean_std(amp_obs) return amp_obs def _eval_disc(self, amp_obs): proc_amp_obs = self._preproc_amp_obs(amp_obs) return self.model.a2c_network.eval_disc(proc_amp_obs) def _calc_amp_rewards(self, amp_obs): disc_r = self._calc_disc_rewards(amp_obs) output = { 'disc_rewards': disc_r } return output def _calc_disc_rewards(self, amp_obs): with torch.no_grad(): disc_logits = self._eval_disc(amp_obs) prob = 1.0 / (1.0 + torch.exp(-disc_logits)) disc_r = -torch.log(torch.maximum(1 - prob, torch.tensor(0.0001, device=self.device))) disc_r *= self._disc_reward_scale return disc_r
4,535
Python
38.103448
98
0.657773
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/common_agent.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from datetime import datetime from gym import spaces import numpy as np import os import time import yaml from rl_games.algos_torch import a2c_continuous from rl_games.algos_torch import torch_ext from rl_games.algos_torch import central_value from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common import a2c_common from rl_games.common import datasets from rl_games.common import schedulers from rl_games.common import vecenv import torch from torch import optim from . import amp_datasets as amp_datasets from tensorboardX import SummaryWriter class CommonAgent(a2c_continuous.A2CAgent): def __init__(self, base_name, params): a2c_common.A2CBase.__init__(self, base_name, params) config = params['config'] self._load_config_params(config) self.is_discrete = False self._setup_action_space() self.bounds_loss_coef = config.get('bounds_loss_coef', None) self.clip_actions = config.get('clip_actions', True) self.network_path = self.nn_dir net_config = self._build_net_config() self.model = self.network.build(net_config) self.model.to(self.ppo_device) self.states = None self.init_rnn_from_model(self.model) self.last_lr = float(self.last_lr) self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay) if self.has_central_value: cv_config = { 'state_shape' : torch_ext.shape_whc_to_cwh(self.state_shape), 'value_size' : self.value_size, 'ppo_device' : self.ppo_device, 'num_agents' : self.num_agents, 'num_steps' : self.horizon_length, 'num_actors' : self.num_actors, 'num_actions' : self.actions_num, 'seq_len' : self.seq_len, 'model' : self.central_value_config['network'], 'config' : self.central_value_config, 'writter' : self.writer, 'multi_gpu' : self.multi_gpu } self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device) self.use_experimental_cv = self.config.get('use_experimental_cv', True) self.dataset = amp_datasets.AMPDataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len) self.algo_observer.after_init(self) return def init_tensors(self): super().init_tensors() self.experience_buffer.tensor_dict['next_obses'] = torch.zeros_like(self.experience_buffer.tensor_dict['obses']) self.experience_buffer.tensor_dict['next_values'] = torch.zeros_like(self.experience_buffer.tensor_dict['values']) self.tensor_list += ['next_obses'] return def train(self): self.init_tensors() self.last_mean_rewards = -100500 start_time = time.time() total_time = 0 rep_count = 0 self.frame = 0 self.obs = self.env_reset() self.curr_frames = self.batch_size_envs self.model_output_file = os.path.join(self.network_path, self.config['name'] + '_{date:%d-%H-%M-%S}'.format(date=datetime.now())) self._init_train() # global rank of the GPU # multi-gpu training is not currently supported for AMP self.global_rank = int(os.getenv("RANK", "0")) while True: epoch_num = self.update_epoch() train_info = self.train_epoch() sum_time = train_info['total_time'] total_time += sum_time frame = self.frame if self.global_rank == 0: scaled_time = sum_time scaled_play_time = train_info['play_time'] curr_frames = self.curr_frames self.frame += curr_frames if self.print_stats: fps_step = curr_frames / scaled_play_time fps_total = curr_frames / scaled_time print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}') self.writer.add_scalar('performance/total_fps', curr_frames / scaled_time, frame) self.writer.add_scalar('performance/step_fps', curr_frames / scaled_play_time, frame) self.writer.add_scalar('info/epochs', epoch_num, frame) self._log_train_info(train_info, frame) self.algo_observer.after_print_stats(frame, epoch_num, total_time) if self.game_rewards.current_size > 0: mean_rewards = self.game_rewards.get_mean() mean_lengths = self.game_lengths.get_mean() for i in range(self.value_size): self.writer.add_scalar('rewards/frame'.format(i), mean_rewards[i], frame) self.writer.add_scalar('rewards/iter'.format(i), mean_rewards[i], epoch_num) self.writer.add_scalar('rewards/time'.format(i), mean_rewards[i], total_time) self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame) self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num) if self.has_self_play_config: self.self_play_manager.update(self) if self.save_freq > 0: if (epoch_num % self.save_freq == 0): self.save(self.model_output_file + "_" + str(epoch_num)) if epoch_num > self.max_epochs: self.save(self.model_output_file) print('MAX EPOCHS NUM!') return self.last_mean_rewards, epoch_num update_time = 0 return def train_epoch(self): play_time_start = time.time() with torch.no_grad(): if self.is_rnn: batch_dict = self.play_steps_rnn() else: batch_dict = self.play_steps() play_time_end = time.time() update_time_start = time.time() rnn_masks = batch_dict.get('rnn_masks', None) self.set_train() self.curr_frames = batch_dict.pop('played_frames') self.prepare_dataset(batch_dict) self.algo_observer.after_steps() if self.has_central_value: self.train_central_value() train_info = None if self.is_rnn: frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement()) print(frames_mask_ratio) for _ in range(0, self.mini_epochs_num): ep_kls = [] for i in range(len(self.dataset)): curr_train_info = self.train_actor_critic(self.dataset[i]) print(type(curr_train_info)) if self.schedule_type == 'legacy': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, curr_train_info['kl'].item()) self.update_lr(self.last_lr) if (train_info is None): train_info = dict() for k, v in curr_train_info.items(): train_info[k] = [v] else: for k, v in curr_train_info.items(): train_info[k].append(v) av_kls = torch_ext.mean_list(train_info['kl']) if self.schedule_type == 'standard': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) if self.schedule_type == 'standard_epoch': self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item()) self.update_lr(self.last_lr) update_time_end = time.time() play_time = play_time_end - play_time_start update_time = update_time_end - update_time_start total_time = update_time_end - play_time_start train_info['play_time'] = play_time train_info['update_time'] = update_time train_info['total_time'] = total_time self._record_train_batch_info(batch_dict, train_info) return train_info def play_steps(self): self.set_eval() epinfos = [] update_list = self.update_list for n in range(self.horizon_length): self.obs, done_env_ids = self._env_reset_done() self.experience_buffer.update_data('obses', n, self.obs['obs']) if self.use_action_masks: masks = self.vec_env.get_action_masks() res_dict = self.get_masked_action_values(self.obs, masks) else: res_dict = self.get_action_values(self.obs) for k in update_list: self.experience_buffer.update_data(k, n, res_dict[k]) if self.has_central_value: self.experience_buffer.update_data('states', n, self.obs['states']) self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions']) shaped_rewards = self.rewards_shaper(rewards) self.experience_buffer.update_data('rewards', n, shaped_rewards) self.experience_buffer.update_data('next_obses', n, self.obs['obs']) self.experience_buffer.update_data('dones', n, self.dones) terminated = infos['terminate'].float() terminated = terminated.unsqueeze(-1) next_vals = self._eval_critic(self.obs) next_vals *= (1.0 - terminated) self.experience_buffer.update_data('next_values', n, next_vals) self.current_rewards += rewards self.current_lengths += 1 all_done_indices = self.dones.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] self.game_rewards.update(self.current_rewards[done_indices]) self.game_lengths.update(self.current_lengths[done_indices]) self.algo_observer.process_infos(infos, done_indices) not_dones = 1.0 - self.dones.float() self.current_rewards = self.current_rewards * not_dones.unsqueeze(1) self.current_lengths = self.current_lengths * not_dones mb_fdones = self.experience_buffer.tensor_dict['dones'].float() mb_values = self.experience_buffer.tensor_dict['values'] mb_next_values = self.experience_buffer.tensor_dict['next_values'] mb_rewards = self.experience_buffer.tensor_dict['rewards'] mb_advs = self.discount_values(mb_fdones, mb_values, mb_rewards, mb_next_values) mb_returns = mb_advs + mb_values batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list) batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns) batch_dict['played_frames'] = self.batch_size return batch_dict def calc_gradients(self, input_dict): self.set_train() value_preds_batch = input_dict['old_values'] old_action_log_probs_batch = input_dict['old_logp_actions'] advantage = input_dict['advantages'] old_mu_batch = input_dict['mu'] old_sigma_batch = input_dict['sigma'] return_batch = input_dict['returns'] actions_batch = input_dict['actions'] obs_batch = input_dict['obs'] obs_batch = self._preproc_obs(obs_batch) lr = self.last_lr kl = 1.0 lr_mul = 1.0 curr_e_clip = lr_mul * self.e_clip batch_dict = { 'is_train': True, 'prev_actions': actions_batch, 'obs' : obs_batch } rnn_masks = None if self.is_rnn: rnn_masks = input_dict['rnn_masks'] batch_dict['rnn_states'] = input_dict['rnn_states'] batch_dict['seq_length'] = self.seq_len with torch.cuda.amp.autocast(enabled=self.mixed_precision): res_dict = self.model(batch_dict) action_log_probs = res_dict['prev_neglogp'] values = res_dict['value'] entropy = res_dict['entropy'] mu = res_dict['mu'] sigma = res_dict['sigma'] a_info = self._actor_loss(old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip) a_loss = a_info['actor_loss'] c_info = self._critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value) c_loss = c_info['critic_loss'] b_loss = self.bound_loss(mu) losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks) a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3] loss = a_loss + self.critic_coef * c_loss - self.entropy_coef * entropy + self.bounds_loss_coef * b_loss if self.multi_gpu: self.optimizer.zero_grad() else: for param in self.model.parameters(): param.grad = None self.scaler.scale(loss).backward() #TODO: Refactor this ugliest code of the year if self.truncate_grads: if self.multi_gpu: self.optimizer.synchronize() self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) with self.optimizer.skip_synchronize(): self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm) self.scaler.step(self.optimizer) self.scaler.update() else: self.scaler.step(self.optimizer) self.scaler.update() with torch.no_grad(): reduce_kl = not self.is_rnn kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl) if self.is_rnn: kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask self.train_result = { 'entropy': entropy, 'kl': kl_dist, 'last_lr': self.last_lr, 'lr_mul': lr_mul, 'b_loss': b_loss } self.train_result.update(a_info) self.train_result.update(c_info) return def discount_values(self, mb_fdones, mb_values, mb_rewards, mb_next_values): lastgaelam = 0 mb_advs = torch.zeros_like(mb_rewards) for t in reversed(range(self.horizon_length)): not_done = 1.0 - mb_fdones[t] not_done = not_done.unsqueeze(1) delta = mb_rewards[t] + self.gamma * mb_next_values[t] - mb_values[t] lastgaelam = delta + self.gamma * self.tau * not_done * lastgaelam mb_advs[t] = lastgaelam return mb_advs def bound_loss(self, mu): if self.bounds_loss_coef is not None: soft_bound = 1.0 mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.ppo_device))**2 mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.ppo_device))**2 b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1) else: b_loss = 0 return b_loss def _load_config_params(self, config): self.last_lr = config['learning_rate'] return def _build_net_config(self): obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape) config = { 'actions_num' : self.actions_num, 'input_shape' : obs_shape, 'num_seqs' : self.num_actors * self.num_agents, 'value_size': self.env_info.get('value_size', 1), 'normalize_value' : self.normalize_value, 'normalize_input': self.normalize_input, } return config def _setup_action_space(self): action_space = self.env_info['action_space'] self.actions_num = action_space.shape[0] # todo introduce device instead of cuda() self.actions_low = torch.from_numpy(action_space.low.copy()).float().to(self.ppo_device) self.actions_high = torch.from_numpy(action_space.high.copy()).float().to(self.ppo_device) return def _init_train(self): return def _env_reset_done(self): obs, done_env_ids = self.vec_env.reset_done() return self.obs_to_tensors(obs), done_env_ids def _eval_critic(self, obs_dict): self.model.eval() obs = obs_dict['obs'] processed_obs = self._preproc_obs(obs) if self.normalize_input: processed_obs = self.model.norm_obs(processed_obs) value = self.model.a2c_network.eval_critic(processed_obs) if self.normalize_value: value = self.value_mean_std(value, True) return value def _actor_loss(self, old_action_log_probs_batch, action_log_probs, advantage, curr_e_clip): clip_frac = None if (self.ppo): ratio = torch.exp(old_action_log_probs_batch - action_log_probs) surr1 = advantage * ratio surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip, 1.0 + curr_e_clip) a_loss = torch.max(-surr1, -surr2) clipped = torch.abs(ratio - 1.0) > curr_e_clip clip_frac = torch.mean(clipped.float()) clip_frac = clip_frac.detach() else: a_loss = (action_log_probs * advantage) info = { 'actor_loss': a_loss, 'actor_clip_frac': clip_frac } return info def _critic_loss(self, value_preds_batch, values, curr_e_clip, return_batch, clip_value): if clip_value: value_pred_clipped = value_preds_batch + \ (values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip) value_losses = (values - return_batch)**2 value_losses_clipped = (value_pred_clipped - return_batch)**2 c_loss = torch.max(value_losses, value_losses_clipped) else: c_loss = (return_batch - values)**2 info = { 'critic_loss': c_loss } return info def _record_train_batch_info(self, batch_dict, train_info): return def _log_train_info(self, train_info, frame): self.writer.add_scalar('performance/update_time', train_info['update_time'], frame) self.writer.add_scalar('performance/play_time', train_info['play_time'], frame) self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(train_info['actor_loss']).item(), frame) self.writer.add_scalar('losses/c_loss', torch_ext.mean_list(train_info['critic_loss']).item(), frame) self.writer.add_scalar('losses/bounds_loss', torch_ext.mean_list(train_info['b_loss']).item(), frame) self.writer.add_scalar('losses/entropy', torch_ext.mean_list(train_info['entropy']).item(), frame) self.writer.add_scalar('info/last_lr', train_info['last_lr'][-1] * train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/lr_mul', train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/e_clip', self.e_clip * train_info['lr_mul'][-1], frame) self.writer.add_scalar('info/clip_frac', torch_ext.mean_list(train_info['actor_clip_frac']).item(), frame) self.writer.add_scalar('info/kl', torch_ext.mean_list(train_info['kl']).item(), frame) return
21,575
Python
39.863636
157
0.585724
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/learning/common_player.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch from rl_games.algos_torch import players from rl_games.algos_torch import torch_ext from rl_games.algos_torch.running_mean_std import RunningMeanStd from rl_games.common.player import BasePlayer class CommonPlayer(players.PpoPlayerContinuous): def __init__(self, params): BasePlayer.__init__(self, params) self.network = self.config['network'] self.normalize_input = self.config['normalize_input'] self.normalize_value = self.config['normalize_value'] self._setup_action_space() self.mask = [False] net_config = self._build_net_config() self._build_net(net_config) return def run(self): n_games = self.games_num render = self.render_env n_game_life = self.n_game_life is_determenistic = self.is_deterministic sum_rewards = 0 sum_steps = 0 sum_game_res = 0 n_games = n_games * n_game_life games_played = 0 has_masks = False has_masks_func = getattr(self.env, "has_action_mask", None) is not None op_agent = getattr(self.env, "create_agent", None) if op_agent: agent_inited = True if has_masks_func: has_masks = self.env.has_action_mask() need_init_rnn = self.is_rnn for _ in range(n_games): if games_played >= n_games: break obs_dict = self.env_reset(self.env) batch_size = 1 batch_size = self.get_batch_size(obs_dict['obs'], batch_size) if need_init_rnn: self.init_rnn() need_init_rnn = False cr = torch.zeros(batch_size, dtype=torch.float32) steps = torch.zeros(batch_size, dtype=torch.float32) print_game_res = False for n in range(self.max_steps): obs_dict, done_env_ids = self._env_reset_done() if has_masks: masks = self.env.get_action_mask() action = self.get_masked_action(obs_dict, masks, is_determenistic) else: action = self.get_action(obs_dict, is_determenistic) obs_dict, r, done, info = self.env_step(self.env, action) cr += r steps += 1 self._post_step(info) if render: self.env.render(mode = 'human') time.sleep(self.render_sleep) all_done_indices = done.nonzero(as_tuple=False) done_indices = all_done_indices[::self.num_agents] done_count = len(done_indices) games_played += done_count if done_count > 0: if self.is_rnn: for s in self.states: s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0 cur_rewards = cr[done_indices].sum().item() cur_steps = steps[done_indices].sum().item() cr = cr * (1.0 - done.float()) steps = steps * (1.0 - done.float()) sum_rewards += cur_rewards sum_steps += cur_steps game_res = 0.0 if isinstance(info, dict): if 'battle_won' in info: print_game_res = True game_res = info.get('battle_won', 0.5) if 'scores' in info: print_game_res = True game_res = info.get('scores', 0.5) if self.print_stats: if print_game_res: print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count, 'w:', game_res) else: print('reward:', cur_rewards/done_count, 'steps:', cur_steps/done_count) sum_game_res += game_res if batch_size//self.num_agents == 1 or games_played >= n_games: break print(sum_rewards) if print_game_res: print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life) else: print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps / games_played * n_game_life) return def obs_to_torch(self, obs): obs = super().obs_to_torch(obs) obs_dict = { 'obs': obs } return obs_dict def get_action(self, obs_dict, is_determenistic = False): output = super().get_action(obs_dict['obs'], is_determenistic) return output def _build_net(self, config): self.model = self.network.build(config) self.model.to(self.device) self.model.eval() self.is_rnn = self.model.is_rnn() return def _env_reset_done(self): obs, done_env_ids = self.env.reset_done() return self.obs_to_torch(obs), done_env_ids def _post_step(self, info): return def _build_net_config(self): obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape) config = { 'actions_num' : self.actions_num, 'input_shape' : obs_shape, 'num_seqs' : self.num_agents, 'value_size': self.env_info.get('value_size', 1), 'normalize_value': self.normalize_value, 'normalize_input': self.normalize_input, } return config def _setup_action_space(self): self.actions_num = self.action_space.shape[0] self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device) self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device) return
7,570
Python
37.627551
181
0.571731
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_hand.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask class AllegroHand(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg", "pen"] self.ignore_z = (self.object_type == "pen") self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "pen": "mjcf/open_ai_assets/hand/pen.xml" } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"]) # can be "full_no_vel", "full", "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) self.num_obs_dict = { "full_no_vel": 50, "full": 72, "full_state": 88 } self.up_axis = 'z' self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] num_states = 0 if self.asymmetric_obs: num_states = 88 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 16 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: # sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) # self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs] self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0] self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs print("Num dofs: ", self.num_dofs) self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') allegro_hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf" if "asset" in self.cfg["env"]: asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) allegro_hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", allegro_hand_asset_file) object_asset_file = self.asset_files_dict[self.object_type] # load shadow hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS allegro_hand_asset = self.gym.load_asset(self.sim, asset_root, allegro_hand_asset_file, asset_options) self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(allegro_hand_asset) self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(allegro_hand_asset) self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(allegro_hand_asset) print("Num dofs: ", self.num_shadow_hand_dofs) self.num_shadow_hand_actuators = self.num_shadow_hand_dofs self.actuated_dof_indices = [i for i in range(self.num_shadow_hand_dofs)] # set shadow_hand dof properties shadow_hand_dof_props = self.gym.get_asset_dof_properties(allegro_hand_asset) self.shadow_hand_dof_lower_limits = [] self.shadow_hand_dof_upper_limits = [] self.shadow_hand_dof_default_pos = [] self.shadow_hand_dof_default_vel = [] self.sensors = [] sensor_pose = gymapi.Transform() for i in range(self.num_shadow_hand_dofs): self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i]) self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i]) self.shadow_hand_dof_default_pos.append(0.0) self.shadow_hand_dof_default_vel.append(0.0) print("Max effort: ", shadow_hand_dof_props['effort'][i]) shadow_hand_dof_props['effort'][i] = 0.5 shadow_hand_dof_props['stiffness'][i] = 3 shadow_hand_dof_props['damping'][i] = 0.1 shadow_hand_dof_props['friction'][i] = 0.01 shadow_hand_dof_props['armature'][i] = 0.001 self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device) self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device) self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device) self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) shadow_hand_start_pose = gymapi.Transform() shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) shadow_hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = shadow_hand_start_pose.p.x pose_dy, pose_dz = -0.2, 0.06 object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz if self.object_type == "pen": object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02 self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_shadow_hand_bodies + 2 max_agg_shapes = self.num_shadow_hand_shapes + 2 self.allegro_hands = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(allegro_hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader allegro_hand_actor = self.gym.create_actor(env_ptr, allegro_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z, shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, allegro_hand_actor, shadow_hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, allegro_hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(allegro_hand_actor) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, (self.object_type == "pen") ) self.extras['consecutive_successes'] = self.consecutive_successes.mean() if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] if self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state() else: print("Unknown observations type!") if self.asymmetric_obs: self.compute_full_state(True) def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, 16:23] = self.object_pose self.obs_buf[:, 23:30] = self.goal_pose self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 34:50] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel # 2*16 = 32 -16 self.obs_buf[:, 32:39] = self.object_pose self.obs_buf[:, 39:42] = self.object_linvel self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 45:52] = self.goal_pose self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 56:72] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 48 self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) fingertip_obs_start = goal_obs_start + 11 # 72 # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 72 + 16 = 88 obs_end = fingertip_obs_start self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 48 self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 61 self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) fingertip_obs_start = goal_obs_start + 11 # 72 # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 72 + 16 = 88 obs_end = fingertip_obs_start #+ num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids, goal_env_ids): # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise * rand_floats[:, self.up_axis_idx] new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) if self.object_type == "pen": rand_angle_y = torch.tensor(0.3) new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y, self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset shadow hand delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1) pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.shadow_hand_dof_pos[env_ids, :] = pos self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2] self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids, goal_env_ids) self.actions = actions.clone().to(self.device) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) else: self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool ): # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) if ignore_z_rot: success_tolerance = 2.0 * success_tolerance # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions ** 2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) # Fall penalty: distance to the goal is larger than a threshold reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward) # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) timed_out = progress_buf >= max_episode_length - 1 resets = torch.where(timed_out, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal if max_consecutive_successes > 0: reward = torch.where(timed_out, reward + 0.5 * fall_penalty, reward) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, successes, cons_successes @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) @torch.jit.script def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor): rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor), quat_from_angle_axis(rand0 * np.pi, z_unit_tensor)) return rot
40,972
Python
54.897681
223
0.622157
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ball_balance.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float, tensor_clamp, torch_random_dir_2 from .base.vec_task import VecTask def _indent_xml(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: _indent_xml(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i class BallBalance(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.action_speed_scale = self.cfg["env"]["actionSpeedScale"] self.debug_viz = self.cfg["env"]["enableDebugVis"] sensors_per_env = 3 actors_per_env = 2 dofs_per_env = 6 bodies_per_env = 7 + 1 # Observations: # 0:3 - activated DOF positions # 3:6 - activated DOF velocities # 6:9 - ball position # 9:12 - ball linear velocity # 12:15 - sensor force (same for each sensor) # 15:18 - sensor torque 1 # 18:21 - sensor torque 2 # 21:24 - sensor torque 3 self.cfg["env"]["numObservations"] = 24 # Actions: target velocities for the 3 actuated DOFs self.cfg["env"]["numActions"] = 3 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, actors_per_env, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) vec_sensor_tensor = gymtorch.wrap_tensor(self.sensor_tensor).view(self.num_envs, sensors_per_env, 6) self.root_states = vec_root_tensor self.tray_positions = vec_root_tensor[..., 0, 0:3] self.ball_positions = vec_root_tensor[..., 1, 0:3] self.ball_orientations = vec_root_tensor[..., 1, 3:7] self.ball_linvels = vec_root_tensor[..., 1, 7:10] self.ball_angvels = vec_root_tensor[..., 1, 10:13] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.sensor_forces = vec_sensor_tensor[..., 0:3] self.sensor_torques = vec_sensor_tensor[..., 3:6] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_dof_states = self.dof_states.clone() self.initial_root_states = vec_root_tensor.clone() self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(actors_per_env * self.num_envs, dtype=torch.int32, device=self.device).view(self.num_envs, actors_per_env) self.all_bbot_indices = actors_per_env * torch.arange(self.num_envs, dtype=torch.int32, device=self.device) # vis self.axes_geom = gymutil.AxesGeometry(0.2) def create_sim(self): self.dt = self.sim_params.dt self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_balance_bot_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_balance_bot_asset(self): # there is an asset balance_bot.xml, here we override some features. tray_radius = 0.5 tray_thickness = 0.02 leg_radius = 0.02 leg_outer_offset = tray_radius - 0.1 leg_length = leg_outer_offset - 2 * leg_radius leg_inner_offset = leg_outer_offset - leg_length / math.sqrt(2) tray_height = leg_length * math.sqrt(2) + 2 * leg_radius + 0.5 * tray_thickness root = ET.Element('mujoco') root.attrib["model"] = "BalanceBot" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" worldbody = ET.SubElement(root, "worldbody") tray = ET.SubElement(worldbody, "body") tray.attrib["name"] = "tray" tray.attrib["pos"] = "%g %g %g" % (0, 0, tray_height) tray_joint = ET.SubElement(tray, "joint") tray_joint.attrib["name"] = "root_joint" tray_joint.attrib["type"] = "free" tray_geom = ET.SubElement(tray, "geom") tray_geom.attrib["type"] = "cylinder" tray_geom.attrib["size"] = "%g %g" % (tray_radius, 0.5 * tray_thickness) tray_geom.attrib["pos"] = "0 0 0" tray_geom.attrib["density"] = "100" leg_angles = [0.0, 2.0 / 3.0 * math.pi, 4.0 / 3.0 * math.pi] for i in range(len(leg_angles)): angle = leg_angles[i] upper_leg_from = gymapi.Vec3() upper_leg_from.x = leg_outer_offset * math.cos(angle) upper_leg_from.y = leg_outer_offset * math.sin(angle) upper_leg_from.z = -leg_radius - 0.5 * tray_thickness upper_leg_to = gymapi.Vec3() upper_leg_to.x = leg_inner_offset * math.cos(angle) upper_leg_to.y = leg_inner_offset * math.sin(angle) upper_leg_to.z = upper_leg_from.z - leg_length / math.sqrt(2) upper_leg_pos = (upper_leg_from + upper_leg_to) * 0.5 upper_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.75 * math.pi, angle) upper_leg = ET.SubElement(tray, "body") upper_leg.attrib["name"] = "upper_leg" + str(i) upper_leg.attrib["pos"] = "%g %g %g" % (upper_leg_pos.x, upper_leg_pos.y, upper_leg_pos.z) upper_leg.attrib["quat"] = "%g %g %g %g" % (upper_leg_quat.w, upper_leg_quat.x, upper_leg_quat.y, upper_leg_quat.z) upper_leg_geom = ET.SubElement(upper_leg, "geom") upper_leg_geom.attrib["type"] = "capsule" upper_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length) upper_leg_geom.attrib["density"] = "1000" upper_leg_joint = ET.SubElement(upper_leg, "joint") upper_leg_joint.attrib["name"] = "upper_leg_joint" + str(i) upper_leg_joint.attrib["type"] = "hinge" upper_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length) upper_leg_joint.attrib["axis"] = "0 1 0" upper_leg_joint.attrib["limited"] = "true" upper_leg_joint.attrib["range"] = "-45 45" lower_leg_pos = gymapi.Vec3(-0.5 * leg_length, 0, 0.5 * leg_length) lower_leg_quat = gymapi.Quat.from_euler_zyx(0, -0.5 * math.pi, 0) lower_leg = ET.SubElement(upper_leg, "body") lower_leg.attrib["name"] = "lower_leg" + str(i) lower_leg.attrib["pos"] = "%g %g %g" % (lower_leg_pos.x, lower_leg_pos.y, lower_leg_pos.z) lower_leg.attrib["quat"] = "%g %g %g %g" % (lower_leg_quat.w, lower_leg_quat.x, lower_leg_quat.y, lower_leg_quat.z) lower_leg_geom = ET.SubElement(lower_leg, "geom") lower_leg_geom.attrib["type"] = "capsule" lower_leg_geom.attrib["size"] = "%g %g" % (leg_radius, 0.5 * leg_length) lower_leg_geom.attrib["density"] = "1000" lower_leg_joint = ET.SubElement(lower_leg, "joint") lower_leg_joint.attrib["name"] = "lower_leg_joint" + str(i) lower_leg_joint.attrib["type"] = "hinge" lower_leg_joint.attrib["pos"] = "%g %g %g" % (0, 0, -0.5 * leg_length) lower_leg_joint.attrib["axis"] = "0 1 0" lower_leg_joint.attrib["limited"] = "true" lower_leg_joint.attrib["range"] = "-70 90" _indent_xml(root) ET.ElementTree(root).write("balance_bot.xml") # save some useful robot parameters self.tray_height = tray_height self.leg_radius = leg_radius self.leg_length = leg_length self.leg_outer_offset = leg_outer_offset self.leg_angles = leg_angles def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "." asset_file = "balance_bot.xml" asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) bbot_options = gymapi.AssetOptions() bbot_options.fix_base_link = False bbot_options.slices_per_cylinder = 40 bbot_asset = self.gym.load_asset(self.sim, asset_root, asset_file, bbot_options) # printed view of asset built # self.gym.debug_print_asset(bbot_asset) self.num_bbot_dofs = self.gym.get_asset_dof_count(bbot_asset) bbot_dof_props = self.gym.get_asset_dof_properties(bbot_asset) self.bbot_dof_lower_limits = [] self.bbot_dof_upper_limits = [] for i in range(self.num_bbot_dofs): self.bbot_dof_lower_limits.append(bbot_dof_props['lower'][i]) self.bbot_dof_upper_limits.append(bbot_dof_props['upper'][i]) self.bbot_dof_lower_limits = to_torch(self.bbot_dof_lower_limits, device=self.device) self.bbot_dof_upper_limits = to_torch(self.bbot_dof_upper_limits, device=self.device) bbot_pose = gymapi.Transform() bbot_pose.p.z = self.tray_height # create force sensors attached to the tray body bbot_tray_idx = self.gym.find_asset_rigid_body_index(bbot_asset, "tray") for angle in self.leg_angles: sensor_pose = gymapi.Transform() sensor_pose.p.x = self.leg_outer_offset * math.cos(angle) sensor_pose.p.y = self.leg_outer_offset * math.sin(angle) self.gym.create_asset_force_sensor(bbot_asset, bbot_tray_idx, sensor_pose) # create ball asset self.ball_radius = 0.1 ball_options = gymapi.AssetOptions() ball_options.density = 200 ball_asset = self.gym.create_sphere(self.sim, self.ball_radius, ball_options) self.envs = [] self.bbot_handles = [] self.obj_handles = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) bbot_handle = self.gym.create_actor(env_ptr, bbot_asset, bbot_pose, "bbot", i, 0, 0) actuated_dofs = np.array([1, 3, 5]) free_dofs = np.array([0, 2, 4]) dof_props = self.gym.get_actor_dof_properties(env_ptr, bbot_handle) dof_props['driveMode'][actuated_dofs] = gymapi.DOF_MODE_POS dof_props['stiffness'][actuated_dofs] = 4000.0 dof_props['damping'][actuated_dofs] = 100.0 dof_props['driveMode'][free_dofs] = gymapi.DOF_MODE_NONE dof_props['stiffness'][free_dofs] = 0 dof_props['damping'][free_dofs] = 0 self.gym.set_actor_dof_properties(env_ptr, bbot_handle, dof_props) lower_leg_handles = [] lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg0")) lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg1")) lower_leg_handles.append(self.gym.find_actor_rigid_body_handle(env_ptr, bbot_handle, "lower_leg2")) # create attractors to hold the feet in place attractor_props = gymapi.AttractorProperties() attractor_props.stiffness = 5e7 attractor_props.damping = 5e3 attractor_props.axes = gymapi.AXIS_TRANSLATION for j in range(3): angle = self.leg_angles[j] attractor_props.rigid_handle = lower_leg_handles[j] # attractor world pose to keep the feet in place attractor_props.target.p.x = self.leg_outer_offset * math.cos(angle) attractor_props.target.p.z = self.leg_radius attractor_props.target.p.y = self.leg_outer_offset * math.sin(angle) # attractor local pose in lower leg body attractor_props.offset.p.z = 0.5 * self.leg_length self.gym.create_rigid_body_attractor(env_ptr, attractor_props) ball_pose = gymapi.Transform() ball_pose.p.x = 0.2 ball_pose.p.z = 2.0 ball_handle = self.gym.create_actor(env_ptr, ball_asset, ball_pose, "ball", i, 0, 0) self.obj_handles.append(ball_handle) # pretty colors self.gym.set_rigid_body_color(env_ptr, ball_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.99, 0.66, 0.25)) self.gym.set_rigid_body_color(env_ptr, bbot_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.48, 0.65, 0.8)) for j in range(1, 7): self.gym.set_rigid_body_color(env_ptr, bbot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.15, 0.2, 0.3)) self.envs.append(env_ptr) self.bbot_handles.append(bbot_handle) def compute_observations(self): #print("~!~!~!~! Computing obs") actuated_dof_indices = torch.tensor([1, 3, 5], device=self.device) #print(self.dof_states[:, actuated_dof_indices, :]) self.obs_buf[..., 0:3] = self.dof_positions[..., actuated_dof_indices] self.obs_buf[..., 3:6] = self.dof_velocities[..., actuated_dof_indices] self.obs_buf[..., 6:9] = self.ball_positions self.obs_buf[..., 9:12] = self.ball_linvels self.obs_buf[..., 12:15] = self.sensor_forces[..., 0] / 20 # !!! lousy normalization self.obs_buf[..., 15:18] = self.sensor_torques[..., 0] / 20 # !!! lousy normalization self.obs_buf[..., 18:21] = self.sensor_torques[..., 1] / 20 # !!! lousy normalization self.obs_buf[..., 21:24] = self.sensor_torques[..., 2] / 20 # !!! lousy normalization return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_bbot_reward( self.tray_positions, self.ball_positions, self.ball_linvels, self.ball_radius, self.reset_buf, self.progress_buf, self.max_episode_length ) def reset_idx(self, env_ids): num_resets = len(env_ids) # reset bbot and ball root states self.root_states[env_ids] = self.initial_root_states[env_ids] min_d = 0.001 # min horizontal dist from origin max_d = 0.5 # max horizontal dist from origin min_height = 1.0 max_height = 2.0 min_horizontal_speed = 0 max_horizontal_speed = 5 dists = torch_rand_float(min_d, max_d, (num_resets, 1), self.device) dirs = torch_random_dir_2((num_resets, 1), self.device) hpos = dists * dirs speedscales = (dists - min_d) / (max_d - min_d) hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self.device) hvels = -speedscales * hspeeds * dirs vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self.device).squeeze() self.ball_positions[env_ids, 0] = hpos[..., 0] self.ball_positions[env_ids, 2] = torch_rand_float(min_height, max_height, (num_resets, 1), self.device).squeeze() self.ball_positions[env_ids, 1] = hpos[..., 1] self.ball_orientations[env_ids, 0:3] = 0 self.ball_orientations[env_ids, 3] = 1 self.ball_linvels[env_ids, 0] = hvels[..., 0] self.ball_linvels[env_ids, 2] = vspeeds self.ball_linvels[env_ids, 1] = hvels[..., 1] self.ball_angvels[env_ids] = 0 # reset root state for bbots and balls in selected envs actor_indices = self.all_actor_indices[env_ids].flatten() self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), len(actor_indices)) # reset DOF states for bbots in selected envs bbot_indices = self.all_bbot_indices[env_ids].flatten() self.dof_states[env_ids] = self.initial_dof_states[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(bbot_indices), len(bbot_indices)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, _actions): # resets reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = _actions.to(self.device) actuated_indices = torch.LongTensor([1, 3, 5]) # update position targets from actions self.dof_position_targets[..., actuated_indices] += self.dt * self.action_speed_scale * actions self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits) # reset position targets for reset envs self.dof_position_targets[reset_env_ids] = 0 self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets)) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.compute_observations() self.compute_reward() # vis if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) for i in range(self.num_envs): env = self.envs[i] bbot_handle = self.bbot_handles[i] body_handles = [] body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg0")) body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg1")) body_handles.append(self.gym.find_actor_rigid_body_handle(env, bbot_handle, "upper_leg2")) for lhandle in body_handles: lpose = self.gym.get_rigid_transform(env, lhandle) gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, env, lpose) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_bbot_reward(tray_positions, ball_positions, ball_velocities, ball_radius, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # calculating the norm for ball distance to desired height above the ground plane (i.e. 0.7) ball_dist = torch.sqrt(ball_positions[..., 0] * ball_positions[..., 0] + (ball_positions[..., 2] - 0.7) * (ball_positions[..., 2] - 0.7) + (ball_positions[..., 1]) * ball_positions[..., 1]) ball_speed = torch.sqrt(ball_velocities[..., 0] * ball_velocities[..., 0] + ball_velocities[..., 1] * ball_velocities[..., 1] + ball_velocities[..., 2] * ball_velocities[..., 2]) pos_reward = 1.0 / (1.0 + ball_dist) speed_reward = 1.0 / (1.0 + ball_speed) reward = pos_reward * speed_reward reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) reset = torch.where(ball_positions[..., 2] < ball_radius * 1.5, torch.ones_like(reset_buf), reset) return reward, reset
22,414
Python
45.991614
217
0.605559
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal_terrain.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os, time from isaacgym import gymtorch from isaacgym import gymapi from .base.vec_task import VecTask import torch from typing import Tuple, Dict from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, normalize, quat_apply, quat_rotate_inverse from isaacgymenvs.tasks.base.vec_task import VecTask class AnymalTerrain(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.height_samples = None self.custom_origins = False self.debug_viz = self.cfg["env"]["enableDebugVis"] self.init_done = False # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"] self.height_meas_scale = self.cfg["env"]["learn"]["heightMeasurementScale"] self.action_scale = self.cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["termination"] = self.cfg["env"]["learn"]["terminalReward"] self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["lin_vel_z"] = self.cfg["env"]["learn"]["linearVelocityZRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["ang_vel_xy"] = self.cfg["env"]["learn"]["angularVelocityXYRewardScale"] self.rew_scales["orient"] = self.cfg["env"]["learn"]["orientationRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] self.rew_scales["joint_acc"] = self.cfg["env"]["learn"]["jointAccRewardScale"] self.rew_scales["base_height"] = self.cfg["env"]["learn"]["baseHeightRewardScale"] self.rew_scales["air_time"] = self.cfg["env"]["learn"]["feetAirTimeRewardScale"] self.rew_scales["collision"] = self.cfg["env"]["learn"]["kneeCollisionRewardScale"] self.rew_scales["stumble"] = self.cfg["env"]["learn"]["feetStumbleRewardScale"] self.rew_scales["action_rate"] = self.cfg["env"]["learn"]["actionRateRewardScale"] self.rew_scales["hip"] = self.cfg["env"]["learn"]["hipRewardScale"] #command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] self.base_init_state = pos + rot + v_lin + v_ang # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] # other self.decimation = self.cfg["env"]["control"]["decimation"] self.dt = self.decimation * self.cfg["sim"]["dt"] self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s/ self.dt + 0.5) self.push_interval = int(self.cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5) self.allow_knee_contacts = self.cfg["env"]["learn"]["allowKneeContacts"] self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] self.curriculum = self.cfg["env"]["terrain"]["curriculum"] for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.graphics_device_id != -1: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis # initialize some data used later on self.common_step_counter = 0 self.extras = {} self.noise_scale_vec = self._get_noise_scale_vec(self.cfg) self.commands = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) # x vel, y vel, yaw vel, heading self.commands_scale = torch.tensor([self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale], device=self.device, requires_grad=False,) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.forward_vec = to_torch([1., 0., 0.], device=self.device).repeat((self.num_envs, 1)) self.torques = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.last_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False) self.last_dof_vel = torch.zeros_like(self.dof_vel) self.height_points = self.init_height_points() self.measured_heights = None # joint positions offsets self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.num_actions): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle # reward episode sums torch_zeros = lambda : torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"lin_vel_xy": torch_zeros(), "lin_vel_z": torch_zeros(), "ang_vel_z": torch_zeros(), "ang_vel_xy": torch_zeros(), "orient": torch_zeros(), "torques": torch_zeros(), "joint_acc": torch_zeros(), "base_height": torch_zeros(), "air_time": torch_zeros(), "collision": torch_zeros(), "stumble": torch_zeros(), "action_rate": torch_zeros(), "hip": torch_zeros()} self.reset_idx(torch.arange(self.num_envs, device=self.device)) self.init_done = True def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) terrain_type = self.cfg["env"]["terrain"]["terrainType"] if terrain_type=='plane': self._create_ground_plane() elif terrain_type=='trimesh': self._create_trimesh() self.custom_origins = True self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _get_noise_scale_vec(self, cfg): noise_vec = torch.zeros_like(self.obs_buf[0]) self.add_noise = self.cfg["env"]["learn"]["addNoise"] noise_level = self.cfg["env"]["learn"]["noiseLevel"] noise_vec[:3] = self.cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale noise_vec[3:6] = self.cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale noise_vec[6:9] = self.cfg["env"]["learn"]["gravityNoise"] * noise_level noise_vec[9:12] = 0. # commands noise_vec[12:24] = self.cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale noise_vec[24:36] = self.cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale noise_vec[36:176] = self.cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale noise_vec[176:188] = 0. # previous actions return noise_vec def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"] plane_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"] plane_params.restitution = self.cfg["env"]["terrain"]["restitution"] self.gym.add_ground(self.sim, plane_params) def _create_trimesh(self): self.terrain = Terrain(self.cfg["env"]["terrain"], num_robots=self.num_envs) tm_params = gymapi.TriangleMeshParams() tm_params.nb_vertices = self.terrain.vertices.shape[0] tm_params.nb_triangles = self.terrain.triangles.shape[0] tm_params.transform.p.x = -self.terrain.border_size tm_params.transform.p.y = -self.terrain.border_size tm_params.transform.p.z = 0.0 tm_params.static_friction = self.cfg["env"]["terrain"]["staticFriction"] tm_params.dynamic_friction = self.cfg["env"]["terrain"]["dynamicFriction"] tm_params.restitution = self.cfg["env"]["terrain"]["restitution"] self.gym.add_triangle_mesh(self.sim, self.terrain.vertices.flatten(order='C'), self.terrain.triangles.flatten(order='C'), tm_params) self.height_samples = torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device) def _create_envs(self, num_envs, spacing, num_per_row): asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = self.cfg["env"]["urdfAsset"]["file"] asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.flip_visual_attachments = True asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"] asset_options.density = 0.001 asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(anymal_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset) # prepare friction randomization rigid_shape_prop = self.gym.get_asset_rigid_shape_properties(anymal_asset) friction_range = self.cfg["env"]["learn"]["frictionRange"] num_buckets = 100 friction_buckets = torch_rand_float(friction_range[0], friction_range[1], (num_buckets,1), device=self.device) self.base_init_state = to_torch(self.base_init_state, device=self.device, requires_grad=False) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*self.base_init_state[:3]) body_names = self.gym.get_asset_rigid_body_names(anymal_asset) self.dof_names = self.gym.get_asset_dof_names(anymal_asset) foot_name = self.cfg["env"]["urdfAsset"]["footName"] knee_name = self.cfg["env"]["urdfAsset"]["kneeName"] feet_names = [s for s in body_names if foot_name in s] self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False) knee_names = [s for s in body_names if knee_name in s] self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False) self.base_index = 0 dof_props = self.gym.get_asset_dof_properties(anymal_asset) # env origins self.env_origins = torch.zeros(self.num_envs, 3, device=self.device, requires_grad=False) if not self.curriculum: self.cfg["env"]["terrain"]["maxInitMapLevel"] = self.cfg["env"]["terrain"]["numLevels"] - 1 self.terrain_levels = torch.randint(0, self.cfg["env"]["terrain"]["maxInitMapLevel"]+1, (self.num_envs,), device=self.device) self.terrain_types = torch.randint(0, self.cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device) if self.custom_origins: self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float) spacing = 0. env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) env_upper = gymapi.Vec3(spacing, spacing, spacing) self.anymal_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_handle = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row) if self.custom_origins: self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]] pos = self.env_origins[i].clone() pos[:2] += torch_rand_float(-1., 1., (2, 1), device=self.device).squeeze(1) start_pose.p = gymapi.Vec3(*pos) for s in range(len(rigid_shape_prop)): rigid_shape_prop[s].friction = friction_buckets[i % num_buckets] self.gym.set_asset_rigid_shape_properties(anymal_asset, rigid_shape_prop) anymal_handle = self.gym.create_actor(env_handle, anymal_asset, start_pose, "anymal", i, 0, 0) self.gym.set_actor_dof_properties(env_handle, anymal_handle, dof_props) self.envs.append(env_handle) self.anymal_handles.append(anymal_handle) for i in range(len(feet_names)): self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i]) for i in range(len(knee_names)): self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i]) self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base") def check_termination(self): self.reset_buf = torch.norm(self.contact_forces[:, self.base_index, :], dim=1) > 1. if not self.allow_knee_contacts: knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1. self.reset_buf |= torch.any(knee_contact, dim=1) self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def compute_observations(self): self.measured_heights = self.get_heights() heights = torch.clip(self.root_states[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.) * self.height_meas_scale self.obs_buf = torch.cat(( self.base_lin_vel * self.lin_vel_scale, self.base_ang_vel * self.ang_vel_scale, self.projected_gravity, self.commands[:, :3] * self.commands_scale, self.dof_pos * self.dof_pos_scale, self.dof_vel * self.dof_vel_scale, heights, self.actions ), dim=-1) def compute_reward(self): # velocity tracking reward lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * self.rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * self.rew_scales["ang_vel_z"] # other base velocity penalties rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"] rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"] # orientation penalty rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"] # base height penalty rew_base_height = torch.square(self.root_states[:, 2] - 0.52) * self.rew_scales["base_height"] # TODO add target base height to cfg # torque penalty rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"] # joint acc penalty rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"] # collision penalty knee_contact = torch.norm(self.contact_forces[:, self.knee_indices, :], dim=2) > 1. rew_collision = torch.sum(knee_contact, dim=1) * self.rew_scales["collision"] # sum vs any ? # stumbling penalty stumble = (torch.norm(self.contact_forces[:, self.feet_indices, :2], dim=2) > 5.) * (torch.abs(self.contact_forces[:, self.feet_indices, 2]) < 1.) rew_stumble = torch.sum(stumble, dim=1) * self.rew_scales["stumble"] # action rate penalty rew_action_rate = torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"] # air time reward # contact = torch.norm(contact_forces[:, feet_indices, :], dim=2) > 1. contact = self.contact_forces[:, self.feet_indices, 2] > 1. first_contact = (self.feet_air_time > 0.) * contact self.feet_air_time += self.dt rew_airTime = torch.sum((self.feet_air_time - 0.5) * first_contact, dim=1) * self.rew_scales["air_time"] # reward only on first contact with the ground rew_airTime *= torch.norm(self.commands[:, :2], dim=1) > 0.1 #no reward for zero command self.feet_air_time *= ~contact # cosmetic penalty for hip motion rew_hip = torch.sum(torch.abs(self.dof_pos[:, [0, 3, 6, 9]] - self.default_dof_pos[:, [0, 3, 6, 9]]), dim=1)* self.rew_scales["hip"] # total reward self.rew_buf = rew_lin_vel_xy + rew_ang_vel_z + rew_lin_vel_z + rew_ang_vel_xy + rew_orient + rew_base_height +\ rew_torque + rew_joint_acc + rew_collision + rew_action_rate + rew_airTime + rew_hip + rew_stumble self.rew_buf = torch.clip(self.rew_buf, min=0., max=None) # add termination reward self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf # log episode reward sums self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy self.episode_sums["ang_vel_z"] += rew_ang_vel_z self.episode_sums["lin_vel_z"] += rew_lin_vel_z self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy self.episode_sums["orient"] += rew_orient self.episode_sums["torques"] += rew_torque self.episode_sums["joint_acc"] += rew_joint_acc self.episode_sums["collision"] += rew_collision self.episode_sums["stumble"] += rew_stumble self.episode_sums["action_rate"] += rew_action_rate self.episode_sums["air_time"] += rew_airTime self.episode_sums["base_height"] += rew_base_height self.episode_sums["hip"] += rew_hip def reset_idx(self, env_ids): positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) if self.custom_origins: self.update_terrain_level(env_ids) self.root_states[env_ids] = self.base_init_state self.root_states[env_ids, :3] += self.env_origins[env_ids] self.root_states[env_ids, :2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device) else: self.root_states[env_ids] = self.base_init_state self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.commands[env_ids, 0] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids, 1] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids, 3] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(1) # set small commands to zero self.last_actions[env_ids] = 0. self.last_dof_vel[env_ids] = 0. self.feet_air_time[env_ids] = 0. self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): self.extras["episode"]['rew_' + key] = torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s self.episode_sums[key][env_ids] = 0. self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float()) def update_terrain_level(self, env_ids): if not self.init_done or not self.curriculum: # don't change on initial reset return distance = torch.norm(self.root_states[env_ids, :2] - self.env_origins[env_ids, :2], dim=1) self.terrain_levels[env_ids] -= 1 * (distance < torch.norm(self.commands[env_ids, :2])*self.max_episode_length_s*0.25) self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2) self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]] def push_robots(self): self.root_states[:, 7:9] = torch_rand_float(-1., 1., (self.num_envs, 2), device=self.device) # lin vel x/y self.gym.set_actor_root_state_tensor(self.sim, gymtorch.unwrap_tensor(self.root_states)) def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) for i in range(self.decimation): torques = torch.clip(self.Kp*(self.action_scale*self.actions + self.default_dof_pos - self.dof_pos) - self.Kd*self.dof_vel, -80., 80.) self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(torques)) self.torques = torques.view(self.torques.shape) self.gym.simulate(self.sim) if self.device == 'cpu': self.gym.fetch_results(self.sim, True) self.gym.refresh_dof_state_tensor(self.sim) def post_physics_step(self): # self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.progress_buf += 1 self.randomize_buf += 1 self.common_step_counter += 1 if self.common_step_counter % self.push_interval == 0: self.push_robots() # prepare quantities self.base_quat = self.root_states[:, 3:7] self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 7:10]) self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.root_states[:, 10:13]) self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec) forward = quat_apply(self.base_quat, self.forward_vec) heading = torch.atan2(forward[:, 1], forward[:, 0]) self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.) # compute observations, rewards, resets, ... self.check_termination() self.compute_reward() env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() if self.add_noise: self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec self.last_actions[:] = self.actions[:] self.last_dof_vel[:] = self.dof_vel[:] if self.viewer and self.enable_viewer_sync and self.debug_viz: # draw height lines self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) sphere_geom = gymutil.WireframeSphereGeometry(0.02, 4, 4, None, color=(1, 1, 0)) for i in range(self.num_envs): base_pos = (self.root_states[i, :3]).cpu().numpy() heights = self.measured_heights[i].cpu().numpy() height_points = quat_apply_yaw(self.base_quat[i].repeat(heights.shape[0]), self.height_points[i]).cpu().numpy() for j in range(heights.shape[0]): x = height_points[j, 0] + base_pos[0] y = height_points[j, 1] + base_pos[1] z = heights[j] sphere_pose = gymapi.Transform(gymapi.Vec3(x, y, z), r=None) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], sphere_pose) def init_height_points(self): # 1mx1.6m rectangle (without center line) y = 0.1 * torch.tensor([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False) # 10-50cm on each side x = 0.1 * torch.tensor([-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False) # 20-80cm on each side grid_x, grid_y = torch.meshgrid(x, y) self.num_height_points = grid_x.numel() points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False) points[:, :, 0] = grid_x.flatten() points[:, :, 1] = grid_y.flatten() return points def get_heights(self, env_ids=None): if self.cfg["env"]["terrain"]["terrainType"] == 'plane': return torch.zeros(self.num_envs, self.num_height_points, device=self.device, requires_grad=False) elif self.cfg["env"]["terrain"]["terrainType"] == 'none': raise NameError("Can't measure height with terrain type 'none'") if env_ids: points = quat_apply_yaw(self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]) + (self.root_states[env_ids, :3]).unsqueeze(1) else: points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (self.root_states[:, :3]).unsqueeze(1) points += self.terrain.border_size points = (points/self.terrain.horizontal_scale).long() px = points[:, :, 0].view(-1) py = points[:, :, 1].view(-1) px = torch.clip(px, 0, self.height_samples.shape[0]-2) py = torch.clip(py, 0, self.height_samples.shape[1]-2) heights1 = self.height_samples[px, py] heights2 = self.height_samples[px+1, py+1] heights = torch.min(heights1, heights2) return heights.view(self.num_envs, -1) * self.terrain.vertical_scale # terrain generator from isaacgym.terrain_utils import * class Terrain: def __init__(self, cfg, num_robots) -> None: self.type = cfg["terrainType"] if self.type in ["none", 'plane']: return self.horizontal_scale = 0.1 self.vertical_scale = 0.005 self.border_size = 20 self.num_per_env = 2 self.env_length = cfg["mapLength"] self.env_width = cfg["mapWidth"] self.proportions = [np.sum(cfg["terrainProportions"][:i+1]) for i in range(len(cfg["terrainProportions"]))] self.env_rows = cfg["numLevels"] self.env_cols = cfg["numTerrains"] self.num_maps = self.env_rows * self.env_cols self.num_per_env = int(num_robots / self.num_maps) self.env_origins = np.zeros((self.env_rows, self.env_cols, 3)) self.width_per_env_pixels = int(self.env_width / self.horizontal_scale) self.length_per_env_pixels = int(self.env_length / self.horizontal_scale) self.border = int(self.border_size/self.horizontal_scale) self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border self.height_field_raw = np.zeros((self.tot_rows , self.tot_cols), dtype=np.int16) if cfg["curriculum"]: self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows) else: self.randomized_terrain() self.heightsamples = self.height_field_raw self.vertices, self.triangles = convert_heightfield_to_trimesh(self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"]) def randomized_terrain(self): for k in range(self.num_maps): # Env coordinates in the world (i, j) = np.unravel_index(k, (self.env_rows, self.env_cols)) # Heightfield coordinate system from now on start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels terrain = SubTerrain("terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale) choice = np.random.uniform(0, 1) if choice < 0.1: if np.random.choice([0, 1]): pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2) else: pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3])) elif choice < 0.6: # step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18]) step_height = np.random.choice([-0.15, 0.15]) pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.) elif choice < 1.: discrete_obstacles_terrain(terrain, 0.15, 1., 2., 40, platform_size=3.) self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length/2. - 1) / self.horizontal_scale) x2 = int((self.env_length/2. + 1) / self.horizontal_scale) y1 = int((self.env_width/2. - 1) / self.horizontal_scale) y2 = int((self.env_width/2. + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z] def curiculum(self, num_robots, num_terrains, num_levels): num_robots_per_map = int(num_robots / num_terrains) left_over = num_robots % num_terrains idx = 0 for j in range(num_terrains): for i in range(num_levels): terrain = SubTerrain("terrain", width=self.width_per_env_pixels, length=self.width_per_env_pixels, vertical_scale=self.vertical_scale, horizontal_scale=self.horizontal_scale) difficulty = i / num_levels choice = j / num_terrains slope = difficulty * 0.4 step_height = 0.05 + 0.175 * difficulty discrete_obstacles_height = 0.025 + difficulty * 0.15 stepping_stones_size = 2 - 1.8 * difficulty if choice < self.proportions[0]: if choice < 0.05: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.) elif choice < self.proportions[1]: if choice < 0.15: slope *= -1 pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.) random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2) elif choice < self.proportions[3]: if choice<self.proportions[2]: step_height *= -1 pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.) elif choice < self.proportions[4]: discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1., 2., 40, platform_size=3.) else: stepping_stones_terrain(terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0., platform_size=3.) # Heightfield coordinate system start_x = self.border + i * self.length_per_env_pixels end_x = self.border + (i + 1) * self.length_per_env_pixels start_y = self.border + j * self.width_per_env_pixels end_y = self.border + (j + 1) * self.width_per_env_pixels self.height_field_raw[start_x: end_x, start_y:end_y] = terrain.height_field_raw robots_in_map = num_robots_per_map if j < left_over: robots_in_map +=1 env_origin_x = (i + 0.5) * self.env_length env_origin_y = (j + 0.5) * self.env_width x1 = int((self.env_length/2. - 1) / self.horizontal_scale) x2 = int((self.env_length/2. + 1) / self.horizontal_scale) y1 = int((self.env_width/2. - 1) / self.horizontal_scale) y2 = int((self.env_width/2. + 1) / self.horizontal_scale) env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2])*self.vertical_scale self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z] @torch.jit.script def quat_apply_yaw(quat, vec): quat_yaw = quat.clone().view(-1, 4) quat_yaw[:, :2] = 0. quat_yaw = normalize(quat_yaw) return quat_apply(quat_yaw, vec) @torch.jit.script def wrap_to_pi(angles): angles %= 2*np.pi angles -= 2*np.pi * (angles > np.pi) return angles
38,280
Python
54.640988
217
0.610789
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/trifinger.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import quat_mul from collections import OrderedDict project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) from isaacgymenvs.utils.torch_jit_utils import * from isaacgymenvs.tasks.base.vec_task import VecTask from types import SimpleNamespace from collections import deque from typing import Deque, Dict, Tuple, Union # python import enum import numpy as np # ################### # # Dimensions of robot # # ################### # class TrifingerDimensions(enum.Enum): """ Dimensions of the tri-finger robot. Note: While it may not seem necessary for tri-finger robot since it is fixed base, for floating base systems having this dimensions class is useful. """ # general state # cartesian position + quaternion orientation PoseDim = 7, # linear velocity + angular velcoity VelocityDim = 6 # state: pose + velocity StateDim = 13 # force + torque WrenchDim = 6 # for robot # number of fingers NumFingers = 3 # for three fingers JointPositionDim = 9 JointVelocityDim = 9 JointTorqueDim = 9 # generalized coordinates GeneralizedCoordinatesDim = JointPositionDim GeneralizedVelocityDim = JointVelocityDim # for objects ObjectPoseDim = 7 ObjectVelocityDim = 6 # ################# # # Different objects # # ################# # # radius of the area ARENA_RADIUS = 0.195 class CuboidalObject: """ Fields for a cuboidal object. @note Motivation for this class is that if domain randomization is performed over the size of the cuboid, then its attributes are automatically updated as well. """ # 3D radius of the cuboid radius_3d: float # distance from wall to the center max_com_distance_to_center: float # minimum and mximum height for spawning the object min_height: float max_height = 0.1 NumKeypoints = 8 ObjectPositionDim = 3 KeypointsCoordsDim = NumKeypoints * ObjectPositionDim def __init__(self, size: Union[float, Tuple[float, float, float]]): """Initialize the cuboidal object. Args: size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that object is a cube. """ # decide the size depedning on input type if isinstance(size, float): self._size = (size, size, size) else: self._size = size # compute remaining attributes self.__compute() """ Properties """ @property def size(self) -> Tuple[float, float, float]: """ Returns the dimensions of the cuboid object (x, y, z) in meters. """ return self._size """ Configurations """ @size.setter def size(self, size: Union[float, Tuple[float, float, float]]): """ Set size of the object. Args: size: The size of the object along x, y, z in meters. If a single float is provided, then it is assumed that object is a cube. """ # decide the size depedning on input type if isinstance(size, float): self._size = (size, size, size) else: self._size = size # compute attributes self.__compute() """ Private members """ def __compute(self): """Compute the attributes for the object. """ # compute 3D radius of the cuboid max_len = max(self._size) self.radius_3d = max_len * np.sqrt(3) / 2 # compute distance from wall to the center self.max_com_distance_to_center = ARENA_RADIUS - self.radius_3d # minimum height for spawning the object self.min_height = self._size[2] / 2 class Trifinger(VecTask): # constants # directory where assets for the simulator are present _trifinger_assets_dir = os.path.join(project_dir, "../", "assets", "trifinger") # robot urdf (path relative to `_trifinger_assets_dir`) _robot_urdf_file = "robot_properties_fingers/urdf/pro/trifingerpro.urdf" # stage urdf (path relative to `_trifinger_assets_dir`) # _stage_urdf_file = "robot_properties_fingers/urdf/trifinger_stage.urdf" _table_urdf_file = "robot_properties_fingers/urdf/table_without_border.urdf" _boundary_urdf_file = "robot_properties_fingers/urdf/high_table_boundary.urdf" # object urdf (path relative to `_trifinger_assets_dir`) # TODO: Make object URDF configurable. _object_urdf_file = "objects/urdf/cube_multicolor_rrc.urdf" # physical dimensions of the object # TODO: Make object dimensions configurable. _object_dims = CuboidalObject(0.065) # dimensions of the system _dims = TrifingerDimensions # Constants for limits # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/trifinger_platform.py#L68 # maximum joint torque (in N-m) applicable on each actuator _max_torque_Nm = 0.36 # maximum joint velocity (in rad/s) on each actuator _max_velocity_radps = 10 # History of state: Number of timesteps to save history for # Note: Currently used only to manage history of object and frame states. # This can be extended to other observations (as done in ANYmal). _state_history_len = 2 # buffers to store the simulation data # goal poses for the object [num. of instances, 7] where 7: (x, y, z, quat) _object_goal_poses_buf: torch.Tensor # DOF state of the system [num. of instances, num. of dof, 2] where last index: pos, vel _dof_state: torch.Tensor # Rigid body state of the system [num. of instances, num. of bodies, 13] where 13: (x, y, z, quat, v, omega) _rigid_body_state: torch.Tensor # Root prim states [num. of actors, 13] where 13: (x, y, z, quat, v, omega) _actors_root_state: torch.Tensor # Force-torque sensor array [num. of instances, num. of bodies * wrench] _ft_sensors_values: torch.Tensor # DOF position of the system [num. of instances, num. of dof] _dof_position: torch.Tensor # DOF velocity of the system [num. of instances, num. of dof] _dof_velocity: torch.Tensor # DOF torque of the system [num. of instances, num. of dof] _dof_torque: torch.Tensor # Fingertip links state list([num. of instances, num. of fingers, 13]) where 13: (x, y, z, quat, v, omega) # The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step. _fingertips_frames_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len) # Object prim state [num. of instances, 13] where 13: (x, y, z, quat, v, omega) # The length of list is the history of the state: 0: t, 1: t-1, 2: t-2, ... step. _object_state_history: Deque[torch.Tensor] = deque(maxlen=_state_history_len) # stores the last action output _last_action: torch.Tensor # keeps track of the number of goal resets _successes: torch.Tensor # keeps track of number of consecutive successes _consecutive_successes: float _robot_limits: dict = { "joint_position": SimpleNamespace( # matches those on the real robot low=np.array([-0.33, 0.0, -2.7] * _dims.NumFingers.value, dtype=np.float32), high=np.array([1.0, 1.57, 0.0] * _dims.NumFingers.value, dtype=np.float32), default=np.array([0.0, 0.9, -2.0] * _dims.NumFingers.value, dtype=np.float32), ), "joint_velocity": SimpleNamespace( low=np.full(_dims.JointVelocityDim.value, -_max_velocity_radps, dtype=np.float32), high=np.full(_dims.JointVelocityDim.value, _max_velocity_radps, dtype=np.float32), default=np.zeros(_dims.JointVelocityDim.value, dtype=np.float32), ), "joint_torque": SimpleNamespace( low=np.full(_dims.JointTorqueDim.value, -_max_torque_Nm, dtype=np.float32), high=np.full(_dims.JointTorqueDim.value, _max_torque_Nm, dtype=np.float32), default=np.zeros(_dims.JointTorqueDim.value, dtype=np.float32), ), "fingertip_position": SimpleNamespace( low=np.array([-0.4, -0.4, 0], dtype=np.float32), high=np.array([0.4, 0.4, 0.5], dtype=np.float32), ), "fingertip_orientation": SimpleNamespace( low=-np.ones(4, dtype=np.float32), high=np.ones(4, dtype=np.float32), ), "fingertip_velocity": SimpleNamespace( low=np.full(_dims.VelocityDim.value, -0.2, dtype=np.float32), high=np.full(_dims.VelocityDim.value, 0.2, dtype=np.float32), ), "fingertip_wrench": SimpleNamespace( low=np.full(_dims.WrenchDim.value, -1.0, dtype=np.float32), high=np.full(_dims.WrenchDim.value, 1.0, dtype=np.float32), ), # used if we want to have joint stiffness/damping as parameters` "joint_stiffness": SimpleNamespace( low=np.array([1.0, 1.0, 1.0] * _dims.NumFingers.value, dtype=np.float32), high=np.array([50.0, 50.0, 50.0] * _dims.NumFingers.value, dtype=np.float32), ), "joint_damping": SimpleNamespace( low=np.array([0.01, 0.03, 0.0001] * _dims.NumFingers.value, dtype=np.float32), high=np.array([1.0, 3.0, 0.01] * _dims.NumFingers.value, dtype=np.float32), ), } # limits of the object (mapped later: str -> torch.tensor) _object_limits: dict = { "position": SimpleNamespace( low=np.array([-0.3, -0.3, 0], dtype=np.float32), high=np.array([0.3, 0.3, 0.3], dtype=np.float32), default=np.array([0, 0, _object_dims.min_height], dtype=np.float32) ), # difference between two positions "position_delta": SimpleNamespace( low=np.array([-0.6, -0.6, 0], dtype=np.float32), high=np.array([0.6, 0.6, 0.3], dtype=np.float32), default=np.array([0, 0, 0], dtype=np.float32) ), "orientation": SimpleNamespace( low=-np.ones(4, dtype=np.float32), high=np.ones(4, dtype=np.float32), default=np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32), ), "velocity": SimpleNamespace( low=np.full(_dims.VelocityDim.value, -0.5, dtype=np.float32), high=np.full(_dims.VelocityDim.value, 0.5, dtype=np.float32), default=np.zeros(_dims.VelocityDim.value, dtype=np.float32) ), "scale": SimpleNamespace( low=np.full(1, 0.0, dtype=np.float32), high=np.full(1, 1.0, dtype=np.float32), ), } # PD gains for the robot (mapped later: str -> torch.tensor) # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L49-L65 _robot_dof_gains = { # The kp and kd gains of the PD control of the fingers. # Note: This depends on simulation step size and is set for a rate of 250 Hz. "stiffness": [10.0, 10.0, 10.0] * _dims.NumFingers.value, "damping": [0.1, 0.3, 0.001] * _dims.NumFingers.value, # The kd gains used for damping the joint motor velocities during the # safety torque check on the joint motors. "safety_damping": [0.08, 0.08, 0.04] * _dims.NumFingers.value } action_dim = _dims.JointTorqueDim.value def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.obs_spec = { "robot_q": self._dims.GeneralizedCoordinatesDim.value, "robot_u": self._dims.GeneralizedVelocityDim.value, "object_q": self._dims.ObjectPoseDim.value, "object_q_des": self._dims.ObjectPoseDim.value, "command": self.action_dim } if self.cfg["env"]["asymmetric_obs"]: self.state_spec = { # observations spec **self.obs_spec, # extra observations (added separately to make computations simpler) "object_u": self._dims.ObjectVelocityDim.value, "fingertip_state": self._dims.NumFingers.value * self._dims.StateDim.value, "robot_a": self._dims.GeneralizedVelocityDim.value, "fingertip_wrench": self._dims.NumFingers.value * self._dims.WrenchDim.value, } else: self.state_spec = self.obs_spec self.action_spec = { "command": self.action_dim } self.cfg["env"]["numObservations"] = sum(self.obs_spec.values()) self.cfg["env"]["numStates"] = sum(self.state_spec.values()) self.cfg["env"]["numActions"] = sum(self.action_spec.values()) self.max_episode_length = self.cfg["env"]["episodeLength"] self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] # define prims present in the scene prim_names = ["robot", "table", "boundary", "object", "goal_object"] # mapping from name to asset instance self.gym_assets = dict.fromkeys(prim_names) # mapping from name to gym indices self.gym_indices = dict.fromkeys(prim_names) # mapping from name to gym rigid body handles # name of finger tips links i.e. end-effector frames fingertips_frames = ["finger_tip_link_0", "finger_tip_link_120", "finger_tip_link_240"] self._fingertips_handles = OrderedDict.fromkeys(fingertips_frames, None) # mapping from name to gym dof index robot_dof_names = list() for finger_pos in ['0', '120', '240']: robot_dof_names += [f'finger_base_to_upper_joint_{finger_pos}', f'finger_upper_to_middle_joint_{finger_pos}', f'finger_middle_to_lower_joint_{finger_pos}'] self._robot_dof_indices = OrderedDict.fromkeys(robot_dof_names, None) super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(0.7, 0.0, 0.7) cam_target = gymapi.Vec3(0.0, 0.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # change constant buffers from numpy/lists into torch tensors # limits for robot for limit_name in self._robot_limits: # extract limit simple-namespace limit_dict = self._robot_limits[limit_name].__dict__ # iterate over namespace attributes for prop, value in limit_dict.items(): limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device) # limits for the object for limit_name in self._object_limits: # extract limit simple-namespace limit_dict = self._object_limits[limit_name].__dict__ # iterate over namespace attributes for prop, value in limit_dict.items(): limit_dict[prop] = torch.tensor(value, dtype=torch.float, device=self.device) # PD gains for actuation for gain_name, value in self._robot_dof_gains.items(): self._robot_dof_gains[gain_name] = torch.tensor(value, dtype=torch.float, device=self.device) # store the sampled goal poses for the object: [num. of instances, 7] self._object_goal_poses_buf = torch.zeros((self.num_envs, 7), device=self.device, dtype=torch.float) # get force torque sensor if enabled if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: # # joint torques # dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) # self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, # self._dims.JointTorqueDim.value) # # force-torque sensor num_ft_dims = self._dims.NumFingers.value * self._dims.WrenchDim.value # sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) # self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self._ft_sensors_values = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, num_ft_dims) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self._dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self._dims.JointTorqueDim.value) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) # refresh the buffer (to copy memory?) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create wrapper tensors for reference (consider everything as pointer to actual memory) # DOF self._dof_state = gymtorch.wrap_tensor(dof_state_tensor).view(self.num_envs, -1, 2) self._dof_position = self._dof_state[..., 0] self._dof_velocity = self._dof_state[..., 1] # rigid body self._rigid_body_state = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) # root actors self._actors_root_state = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) # frames history action_dim = sum(self.action_spec.values()) self._last_action = torch.zeros(self.num_envs, action_dim, dtype=torch.float, device=self.device) fingertip_handles_indices = list(self._fingertips_handles.values()) object_indices = self.gym_indices["object"] # timestep 0 is current tensor curr_history_length = 0 while curr_history_length < self._state_history_len: # add tensors to history list print(self._rigid_body_state.shape) self._fingertips_frames_state_history.append(self._rigid_body_state[:, fingertip_handles_indices]) self._object_state_history.append(self._actors_root_state[object_indices]) # update current history length curr_history_length += 1 self._observations_scale = SimpleNamespace(low=None, high=None) self._states_scale = SimpleNamespace(low=None, high=None) self._action_scale = SimpleNamespace(low=None, high=None) self._successes = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self._successes_pos = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self._successes_quat = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) self.__configure_mdp_spaces() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_scene_assets() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.013 plane_params.static_friction = 1.0 plane_params.dynamic_friction = 1.0 self.gym.add_ground(self.sim, plane_params) def _create_scene_assets(self): """ Define Gym assets for stage, robot and object. """ # define assets self.gym_assets["robot"] = self.__define_robot_asset() self.gym_assets["table"] = self.__define_table_asset() self.gym_assets["boundary"] = self.__define_boundary_asset() self.gym_assets["object"] = self.__define_object_asset() self.gym_assets["goal_object"] = self.__define_goal_object_asset() # display the properties (only for debugging) # robot print("Trifinger Robot Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["robot"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["robot"])}') print(f'\t Number of dofs: {self.gym.get_asset_dof_count(self.gym_assets["robot"])}') print(f'\t Number of actuated dofs: {self._dims.JointTorqueDim.value}') # stage print("Trifinger Table Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["table"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["table"])}') print("Trifinger Boundary Asset: ") print(f'\t Number of bodies: {self.gym.get_asset_rigid_body_count(self.gym_assets["boundary"])}') print(f'\t Number of shapes: {self.gym.get_asset_rigid_shape_count(self.gym_assets["boundary"])}') def _create_envs(self, num_envs, spacing, num_per_row): # define the dof properties for the robot robot_dof_props = self.gym.get_asset_dof_properties(self.gym_assets["robot"]) # set dof properites based on the control mode for k, dof_index in enumerate(self._robot_dof_indices.values()): # note: since safety checks are employed, the simulator PD controller is not # used. Instead the torque is computed manually and applied, even if the # command mode is 'position'. robot_dof_props['driveMode'][dof_index] = gymapi.DOF_MODE_EFFORT robot_dof_props['stiffness'][dof_index] = 0.0 robot_dof_props['damping'][dof_index] = 0.0 # set dof limits robot_dof_props['effort'][dof_index] = self._max_torque_Nm robot_dof_props['velocity'][dof_index] = self._max_velocity_radps robot_dof_props['lower'][dof_index] = float(self._robot_limits["joint_position"].low[k]) robot_dof_props['upper'][dof_index] = float(self._robot_limits["joint_position"].high[k]) self.envs = [] # define lower and upper region bound for each environment env_lower_bound = gymapi.Vec3(-self.cfg["env"]["envSpacing"], -self.cfg["env"]["envSpacing"], 0.0) env_upper_bound = gymapi.Vec3(self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"], self.cfg["env"]["envSpacing"]) num_envs_per_row = int(np.sqrt(self.num_envs)) # initialize gym indices buffer as a list # note: later the list is converted to torch tensor for ease in interfacing with IsaacGym. for asset_name in self.gym_indices.keys(): self.gym_indices[asset_name] = list() # count number of shapes and bodies max_agg_bodies = 0 max_agg_shapes = 0 for asset in self.gym_assets.values(): max_agg_bodies += self.gym.get_asset_rigid_body_count(asset) max_agg_shapes += self.gym.get_asset_rigid_shape_count(asset) # iterate and create environment instances for env_index in range(self.num_envs): # create environment env_ptr = self.gym.create_env(self.sim, env_lower_bound, env_upper_bound, num_envs_per_row) # begin aggregration mode if enabled - this can improve simulation performance if self.cfg["env"]["aggregate_mode"]: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add trifinger robot to environment trifinger_actor = self.gym.create_actor(env_ptr, self.gym_assets["robot"], gymapi.Transform(), "robot", env_index, 0, 0) trifinger_idx = self.gym.get_actor_index(env_ptr, trifinger_actor, gymapi.DOMAIN_SIM) # add table to environment table_handle = self.gym.create_actor(env_ptr, self.gym_assets["table"], gymapi.Transform(), "table", env_index, 1, 0) table_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # add stage to environment boundary_handle = self.gym.create_actor(env_ptr, self.gym_assets["boundary"], gymapi.Transform(), "boundary", env_index, 1, 0) boundary_idx = self.gym.get_actor_index(env_ptr, boundary_handle, gymapi.DOMAIN_SIM) # add object to environment object_handle = self.gym.create_actor(env_ptr, self.gym_assets["object"], gymapi.Transform(), "object", env_index, 0, 0) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) # add goal object to environment goal_handle = self.gym.create_actor(env_ptr, self.gym_assets["goal_object"], gymapi.Transform(), "goal_object", env_index + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) # change settings of DOF self.gym.set_actor_dof_properties(env_ptr, trifinger_actor, robot_dof_props) # add color to instances stage_color = gymapi.Vec3(0.73, 0.68, 0.72) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color) self.gym.set_rigid_body_color(env_ptr, boundary_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, stage_color) # end aggregation mode if enabled if self.cfg["env"]["aggregate_mode"]: self.gym.end_aggregate(env_ptr) # add instances to list self.envs.append(env_ptr) self.gym_indices["robot"].append(trifinger_idx) self.gym_indices["table"].append(table_idx) self.gym_indices["boundary"].append(boundary_idx) self.gym_indices["object"].append(object_idx) self.gym_indices["goal_object"].append(goal_object_idx) # convert gym indices from list to tensor for asset_name, asset_indices in self.gym_indices.items(): self.gym_indices[asset_name] = torch.tensor(asset_indices, dtype=torch.long, device=self.device) def __configure_mdp_spaces(self): """ Configures the observations, state and action spaces. """ # Action scale for the MDP # Note: This is order sensitive. if self.cfg["env"]["command_mode"] == "position": # action space is joint positions self._action_scale.low = self._robot_limits["joint_position"].low self._action_scale.high = self._robot_limits["joint_position"].high elif self.cfg["env"]["command_mode"] == "torque": # action space is joint torques self._action_scale.low = self._robot_limits["joint_torque"].low self._action_scale.high = self._robot_limits["joint_torque"].high else: msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']." raise ValueError(msg) # Observations scale for the MDP # check if policy outputs normalized action [-1, 1] or not. if self.cfg["env"]["normalize_action"]: obs_action_scale = SimpleNamespace( low=torch.full((self.action_dim,), -1, dtype=torch.float, device=self.device), high=torch.full((self.action_dim,), 1, dtype=torch.float, device=self.device) ) else: obs_action_scale = self._action_scale object_obs_low = torch.cat([ self._object_limits["position"].low, self._object_limits["orientation"].low, ]*2) object_obs_high = torch.cat([ self._object_limits["position"].high, self._object_limits["orientation"].high, ]*2) # Note: This is order sensitive. self._observations_scale.low = torch.cat([ self._robot_limits["joint_position"].low, self._robot_limits["joint_velocity"].low, object_obs_low, obs_action_scale.low ]) self._observations_scale.high = torch.cat([ self._robot_limits["joint_position"].high, self._robot_limits["joint_velocity"].high, object_obs_high, obs_action_scale.high ]) # State scale for the MDP if self.cfg["env"]["asymmetric_obs"]: # finger tip scaling fingertip_state_scale = SimpleNamespace( low=torch.cat([ self._robot_limits["fingertip_position"].low, self._robot_limits["fingertip_orientation"].low, self._robot_limits["fingertip_velocity"].low, ]), high=torch.cat([ self._robot_limits["fingertip_position"].high, self._robot_limits["fingertip_orientation"].high, self._robot_limits["fingertip_velocity"].high, ]) ) states_low = [ self._observations_scale.low, self._object_limits["velocity"].low, fingertip_state_scale.low.repeat(self._dims.NumFingers.value), self._robot_limits["joint_torque"].low, self._robot_limits["fingertip_wrench"].low.repeat(self._dims.NumFingers.value), ] states_high = [ self._observations_scale.high, self._object_limits["velocity"].high, fingertip_state_scale.high.repeat(self._dims.NumFingers.value), self._robot_limits["joint_torque"].high, self._robot_limits["fingertip_wrench"].high.repeat(self._dims.NumFingers.value), ] # Note: This is order sensitive. self._states_scale.low = torch.cat(states_low) self._states_scale.high = torch.cat(states_high) # check that dimensions of scalings are correct # count number of dimensions state_dim = sum(self.state_spec.values()) obs_dim = sum(self.obs_spec.values()) action_dim = sum(self.action_spec.values()) # check that dimensions match # observations if self._observations_scale.low.shape[0] != obs_dim or self._observations_scale.high.shape[0] != obs_dim: msg = f"Observation scaling dimensions mismatch. " \ f"\tLow: {self._observations_scale.low.shape[0]}, " \ f"\tHigh: {self._observations_scale.high.shape[0]}, " \ f"\tExpected: {obs_dim}." raise AssertionError(msg) # state if self.cfg["env"]["asymmetric_obs"] \ and (self._states_scale.low.shape[0] != state_dim or self._states_scale.high.shape[0] != state_dim): msg = f"States scaling dimensions mismatch. " \ f"\tLow: {self._states_scale.low.shape[0]}, " \ f"\tHigh: {self._states_scale.high.shape[0]}, " \ f"\tExpected: {state_dim}." raise AssertionError(msg) # actions if self._action_scale.low.shape[0] != action_dim or self._action_scale.high.shape[0] != action_dim: msg = f"Actions scaling dimensions mismatch. " \ f"\tLow: {self._action_scale.low.shape[0]}, " \ f"\tHigh: {self._action_scale.high.shape[0]}, " \ f"\tExpected: {action_dim}." raise AssertionError(msg) # print the scaling print(f'MDP Raw observation bounds\n' f'\tLow: {self._observations_scale.low}\n' f'\tHigh: {self._observations_scale.high}') print(f'MDP Raw state bounds\n' f'\tLow: {self._states_scale.low}\n' f'\tHigh: {self._states_scale.high}') print(f'MDP Raw action bounds\n' f'\tLow: {self._action_scale.low}\n' f'\tHigh: {self._action_scale.high}') def compute_reward(self, actions): self.rew_buf[:] = 0. self.reset_buf[:] = 0. self.rew_buf[:], self.reset_buf[:], log_dict = compute_trifinger_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.max_episode_length, self.cfg["sim"]["dt"], self.cfg["env"]["reward_terms"]["finger_move_penalty"]["weight"], self.cfg["env"]["reward_terms"]["finger_reach_object_rate"]["weight"], self.cfg["env"]["reward_terms"]["object_dist"]["weight"], self.cfg["env"]["reward_terms"]["object_rot"]["weight"], self.env_steps_count, self._object_goal_poses_buf, self._object_state_history[0], self._object_state_history[1], self._fingertips_frames_state_history[0], self._fingertips_frames_state_history[1], self.cfg["env"]["reward_terms"]["keypoints_dist"]["activate"] ) self.extras.update({"env/rewards/"+k: v.mean() for k, v in log_dict.items()}) def compute_observations(self): # refresh memory buffers self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) joint_torques = self._dof_torque tip_wrenches = self._ft_sensors_values else: joint_torques = torch.zeros(self.num_envs, self._dims.JointTorqueDim.value, dtype=torch.float32, device=self.device) tip_wrenches = torch.zeros(self.num_envs, self._dims.NumFingers.value * self._dims.WrenchDim.value, dtype=torch.float32, device=self.device) # extract frame handles fingertip_handles_indices = list(self._fingertips_handles.values()) object_indices = self.gym_indices["object"] # update state histories self._fingertips_frames_state_history.appendleft(self._rigid_body_state[:, fingertip_handles_indices]) self._object_state_history.appendleft(self._actors_root_state[object_indices]) # fill the observations and states buffer self.obs_buf[:], self.states_buf[:] = compute_trifinger_observations_states( self.cfg["env"]["asymmetric_obs"], self._dof_position, self._dof_velocity, self._object_state_history[0], self._object_goal_poses_buf, self.actions, self._fingertips_frames_state_history[0], joint_torques, tip_wrenches, ) # normalize observations if flag is enabled if self.cfg["env"]["normalize_obs"]: # for normal obs self.obs_buf = scale_transform( self.obs_buf, lower=self._observations_scale.low, upper=self._observations_scale.high ) def reset_idx(self, env_ids): # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # A) Reset episode stats buffers self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 self._successes[env_ids] = 0 self._successes_pos[env_ids] = 0 self._successes_quat[env_ids] = 0 # B) Various randomizations at the start of the episode: # -- Robot base position. # -- Stage position. # -- Coefficient of restituion and friction for robot, object, stage. # -- Mass and size of the object # -- Mass of robot links # -- Robot joint state robot_initial_state_config = self.cfg["env"]["reset_distribution"]["robot_initial_state"] self._sample_robot_state( env_ids, distribution=robot_initial_state_config["type"], dof_pos_stddev=robot_initial_state_config["dof_pos_stddev"], dof_vel_stddev=robot_initial_state_config["dof_vel_stddev"] ) # -- Sampling of initial pose of the object object_initial_state_config = self.cfg["env"]["reset_distribution"]["object_initial_state"] self._sample_object_poses( env_ids, distribution=object_initial_state_config["type"], ) # -- Sampling of goal pose of the object self._sample_object_goal_poses( env_ids, difficulty=self.cfg["env"]["task_difficulty"] ) # C) Extract trifinger indices to reset robot_indices = self.gym_indices["robot"][env_ids].to(torch.int32) object_indices = self.gym_indices["object"][env_ids].to(torch.int32) goal_object_indices = self.gym_indices["goal_object"][env_ids].to(torch.int32) all_indices = torch.unique(torch.cat([robot_indices, object_indices, goal_object_indices])) # D) Set values into simulator # -- DOF self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(robot_indices), len(robot_indices)) # -- actor root states self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._actors_root_state), gymtorch.unwrap_tensor(all_indices), len(all_indices)) def _sample_robot_state(self, instances: torch.Tensor, distribution: str = 'default', dof_pos_stddev: float = 0.0, dof_vel_stddev: float = 0.0): """Samples the robot DOF state based on the settings. Type of robot initial state distribution: ["default", "random"] - "default" means that robot is in default configuration. - "random" means that noise is added to default configuration - "none" means that robot is configuration is not reset between episodes. Args: instances: A tensor constraining indices of environment instances to reset. distribution: Name of distribution to sample initial state from: ['default', 'random'] dof_pos_stddev: Noise scale to DOF position (used if 'type' is 'random') dof_vel_stddev: Noise scale to DOF velocity (used if 'type' is 'random') """ # number of samples to generate num_samples = instances.size()[0] # sample dof state based on distribution type if distribution == "none": return elif distribution == "default": # set to default configuration self._dof_position[instances] = self._robot_limits["joint_position"].default self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default elif distribution == "random": # sample uniform random from (-1, 1) dof_state_dim = self._dims.JointPositionDim.value + self._dims.JointVelocityDim.value dof_state_noise = 2 * torch.rand((num_samples, dof_state_dim,), dtype=torch.float, device=self.device) - 1 # set to default configuration self._dof_position[instances] = self._robot_limits["joint_position"].default self._dof_velocity[instances] = self._robot_limits["joint_velocity"].default # add noise # DOF position start_offset = 0 end_offset = self._dims.JointPositionDim.value self._dof_position[instances] += dof_pos_stddev * dof_state_noise[:, start_offset:end_offset] # DOF velocity start_offset = end_offset end_offset += self._dims.JointVelocityDim.value self._dof_velocity[instances] += dof_vel_stddev * dof_state_noise[:, start_offset:end_offset] else: msg = f"Invalid robot initial state distribution. Input: {distribution} not in [`default`, `random`]." raise ValueError(msg) # reset robot fingertips state history for idx in range(1, self._state_history_len): self._fingertips_frames_state_history[idx][instances] = 0.0 def _sample_object_poses(self, instances: torch.Tensor, distribution: str): """Sample poses for the cube. Type of distribution: ["default", "random", "none"] - "default" means that pose is default configuration. - "random" means that pose is randomly sampled on the table. - "none" means no resetting of object pose between episodes. Args: instances: A tensor constraining indices of environment instances to reset. distribution: Name of distribution to sample initial state from: ['default', 'random'] """ # number of samples to generate num_samples = instances.size()[0] # sample poses based on distribution type if distribution == "none": return elif distribution == "default": pos_x, pos_y, pos_z = self._object_limits["position"].default orientation = self._object_limits["orientation"].default elif distribution == "random": # For initialization pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) # add a small offset to the height to account for scale randomisation (prevent ground intersection) pos_z = self._object_dims.size[2] / 2 + 0.0015 orientation = random_yaw_orientation(num_samples, self.device) else: msg = f"Invalid object initial state distribution. Input: {distribution} " \ "not in [`default`, `random`, `none`]." raise ValueError(msg) # set buffers into simulator # extract indices for goal object object_indices = self.gym_indices["object"][instances] # set values into buffer # object buffer self._object_state_history[0][instances, 0] = pos_x self._object_state_history[0][instances, 1] = pos_y self._object_state_history[0][instances, 2] = pos_z self._object_state_history[0][instances, 3:7] = orientation self._object_state_history[0][instances, 7:13] = 0 # reset object state history for idx in range(1, self._state_history_len): self._object_state_history[idx][instances] = 0.0 # root actor buffer self._actors_root_state[object_indices] = self._object_state_history[0][instances] def _sample_object_goal_poses(self, instances: torch.Tensor, difficulty: int): """Sample goal poses for the cube and sets them into the desired goal pose buffer. Args: instances: A tensor constraining indices of environment instances to reset. difficulty: Difficulty level. The higher, the more difficult is the goal. Possible levels are: - -1: Random goal position on the table, including yaw orientation. - 1: Random goal position on the table, no orientation. - 2: Fixed goal position in the air with x,y = 0. No orientation. - 3: Random goal position in the air, no orientation. - 4: Random goal pose in the air, including orientation. """ # number of samples to generate num_samples = instances.size()[0] # sample poses based on task difficulty if difficulty == -1: # For initialization pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = self._object_dims.size[2] / 2 orientation = random_yaw_orientation(num_samples, self.device) elif difficulty == 1: # Random goal position on the table, no orientation. pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = self._object_dims.size[2] / 2 orientation = default_orientation(num_samples, self.device) elif difficulty == 2: # Fixed goal position in the air with x,y = 0. No orientation. pos_x, pos_y = 0.0, 0.0 pos_z = self._object_dims.min_height + 0.05 orientation = default_orientation(num_samples, self.device) elif difficulty == 3: # Random goal position in the air, no orientation. pos_x, pos_y = random_xy(num_samples, self._object_dims.max_com_distance_to_center, self.device) pos_z = random_z(num_samples, self._object_dims.min_height, self._object_dims.max_height, self.device) orientation = default_orientation(num_samples, self.device) elif difficulty == 4: # Random goal pose in the air, including orientation. # Note: Set minimum height such that the cube does not intersect with the # ground in any orientation max_goal_radius = self._object_dims.max_com_distance_to_center max_height = self._object_dims.max_height orientation = random_orientation(num_samples, self.device) # pick x, y, z according to the maximum height / radius at the current point # in the cirriculum pos_x, pos_y = random_xy(num_samples, max_goal_radius, self.device) pos_z = random_z(num_samples, self._object_dims.radius_3d, max_height, self.device) else: msg = f"Invalid difficulty index for task: {difficulty}." raise ValueError(msg) # extract indices for goal object goal_object_indices = self.gym_indices["goal_object"][instances] # set values into buffer # object goal buffer self._object_goal_poses_buf[instances, 0] = pos_x self._object_goal_poses_buf[instances, 1] = pos_y self._object_goal_poses_buf[instances, 2] = pos_z self._object_goal_poses_buf[instances, 3:7] = orientation # root actor buffer self._actors_root_state[goal_object_indices, 0:7] = self._object_goal_poses_buf[instances] # self._actors_root_state[goal_object_indices, 2] = -10 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.gym.simulate(self.sim) self.actions = actions.clone().to(self.device) # if normalized_action is true, then denormalize them. if self.cfg["env"]["normalize_action"]: # TODO: Default action should correspond to normalized value of 0. action_transformed = unscale_transform( self.actions, lower=self._action_scale.low, upper=self._action_scale.high ) else: action_transformed = self.actions # compute command on the basis of mode selected if self.cfg["env"]["command_mode"] == 'torque': # command is the desired joint torque computed_torque = action_transformed elif self.cfg["env"]["command_mode"] == 'position': # command is the desired joint positions desired_dof_position = action_transformed # compute torque to apply computed_torque = self._robot_dof_gains["stiffness"] * (desired_dof_position - self._dof_position) computed_torque -= self._robot_dof_gains["damping"] * self._dof_velocity else: msg = f"Invalid command mode. Input: {self.cfg['env']['command_mode']} not in ['torque', 'position']." raise ValueError(msg) # apply clamping of computed torque to actuator limits applied_torque = saturate( computed_torque, lower=self._robot_limits["joint_torque"].low, upper=self._robot_limits["joint_torque"].high ) # apply safety damping and clamping of the action torque if enabled if self.cfg["env"]["apply_safety_damping"]: # apply damping by joint velocity applied_torque -= self._robot_dof_gains["safety_damping"] * self._dof_velocity # clamp input applied_torque = saturate( applied_torque, lower=self._robot_limits["joint_torque"].low, upper=self._robot_limits["joint_torque"].high ) # set computed torques to simulator buffer. self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(applied_torque)) def post_physics_step(self): self._step_info = {} self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) # check termination conditions (success only) self._check_termination() if torch.sum(self.reset_buf) > 0: self._step_info['consecutive_successes'] = np.mean(self._successes.float().cpu().numpy()) self._step_info['consecutive_successes_pos'] = np.mean(self._successes_pos.float().cpu().numpy()) self._step_info['consecutive_successes_quat'] = np.mean(self._successes_quat.float().cpu().numpy()) def _check_termination(self): """Check whether the episode is done per environment. """ # Extract configuration for termination conditions termination_config = self.cfg["env"]["termination_conditions"] # Termination condition - successful completion # Calculate distance between current object and goal object_goal_position_dist = torch.norm( self._object_goal_poses_buf[:, 0:3] - self._object_state_history[0][:, 0:3], p=2, dim=-1 ) # log theoretical number of r eseats goal_position_reset = torch.le(object_goal_position_dist, termination_config["success"]["position_tolerance"]) self._step_info['env/current_position_goal/per_env'] = np.mean(goal_position_reset.float().cpu().numpy()) # For task with difficulty 4, we need to check if orientation matches as well. # Compute the difference in orientation between object and goal pose object_goal_orientation_dist = quat_diff_rad(self._object_state_history[0][:, 3:7], self._object_goal_poses_buf[:, 3:7]) # Check for distance within tolerance goal_orientation_reset = torch.le(object_goal_orientation_dist, termination_config["success"]["orientation_tolerance"]) self._step_info['env/current_orientation_goal/per_env'] = np.mean(goal_orientation_reset.float().cpu().numpy()) if self.cfg["env"]['task_difficulty'] < 4: # Check for task completion if position goal is within a threshold task_completion_reset = goal_position_reset elif self.cfg["env"]['task_difficulty'] == 4: # Check for task completion if both position + orientation goal is within a threshold task_completion_reset = torch.logical_and(goal_position_reset, goal_orientation_reset) else: # Check for task completion if both orientation goal is within a threshold task_completion_reset = goal_orientation_reset self._successes = task_completion_reset self._successes_pos = goal_position_reset self._successes_quat = goal_orientation_reset """ Helper functions - define assets """ def __define_robot_asset(self): """ Define Gym asset for robot. """ # define tri-finger asset robot_asset_options = gymapi.AssetOptions() robot_asset_options.flip_visual_attachments = False robot_asset_options.fix_base_link = True robot_asset_options.collapse_fixed_joints = False robot_asset_options.disable_gravity = False robot_asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT robot_asset_options.thickness = 0.001 robot_asset_options.angular_damping = 0.01 robot_asset_options.vhacd_enabled = True robot_asset_options.vhacd_params = gymapi.VhacdParams() robot_asset_options.vhacd_params.resolution = 100000 robot_asset_options.vhacd_params.concavity = 0.0025 robot_asset_options.vhacd_params.alpha = 0.04 robot_asset_options.vhacd_params.beta = 1.0 robot_asset_options.vhacd_params.convex_hull_downsampling = 4 robot_asset_options.vhacd_params.max_num_vertices_per_ch = 256 if self.physics_engine == gymapi.SIM_PHYSX: robot_asset_options.use_physx_armature = True # load tri-finger asset trifinger_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._robot_urdf_file, robot_asset_options) # set the link properties for the robot # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/sim_finger.py#L563 trifinger_props = self.gym.get_asset_rigid_shape_properties(trifinger_asset) for p in trifinger_props: p.friction = 1.0 p.torsion_friction = 1.0 p.restitution = 0.8 self.gym.set_asset_rigid_shape_properties(trifinger_asset, trifinger_props) # extract the frame handles for frame_name in self._fingertips_handles.keys(): self._fingertips_handles[frame_name] = self.gym.find_asset_rigid_body_index(trifinger_asset, frame_name) # check valid handle if self._fingertips_handles[frame_name] == gymapi.INVALID_HANDLE: msg = f"Invalid handle received for frame: `{frame_name}`." print(msg) if self.cfg["env"]["enable_ft_sensors"] or self.cfg["env"]["asymmetric_obs"]: sensor_pose = gymapi.Transform() for fingertip_handle in self._fingertips_handles.values(): self.gym.create_asset_force_sensor(trifinger_asset, fingertip_handle, sensor_pose) # extract the dof indices # Note: need to write actuated dofs manually since the system contains fixed joints as well which show up. for dof_name in self._robot_dof_indices.keys(): self._robot_dof_indices[dof_name] = self.gym.find_asset_dof_index(trifinger_asset, dof_name) # check valid handle if self._robot_dof_indices[dof_name] == gymapi.INVALID_HANDLE: msg = f"Invalid index received for DOF: `{dof_name}`." print(msg) # return the asset return trifinger_asset def __define_table_asset(self): """ Define Gym asset for stage. """ # define stage asset table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = True table_asset_options.fix_base_link = True table_asset_options.thickness = 0.001 # load stage asset table_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._table_urdf_file, table_asset_options) # set stage properties table_props = self.gym.get_asset_rigid_shape_properties(table_asset) # iterate over each mesh for p in table_props: p.friction = 0.1 p.torsion_friction = 0.1 self.gym.set_asset_rigid_shape_properties(table_asset, table_props) # return the asset return table_asset def __define_boundary_asset(self): """ Define Gym asset for stage. """ # define stage asset boundary_asset_options = gymapi.AssetOptions() boundary_asset_options.disable_gravity = True boundary_asset_options.fix_base_link = True boundary_asset_options.thickness = 0.001 boundary_asset_options.vhacd_enabled = True boundary_asset_options.vhacd_params = gymapi.VhacdParams() boundary_asset_options.vhacd_params.resolution = 100000 boundary_asset_options.vhacd_params.concavity = 0.0 boundary_asset_options.vhacd_params.alpha = 0.04 boundary_asset_options.vhacd_params.beta = 1.0 boundary_asset_options.vhacd_params.max_num_vertices_per_ch = 1024 # load stage asset boundary_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._boundary_urdf_file, boundary_asset_options) # set stage properties boundary_props = self.gym.get_asset_rigid_shape_properties(boundary_asset) self.gym.set_asset_rigid_shape_properties(boundary_asset, boundary_props) # return the asset return boundary_asset def __define_object_asset(self): """ Define Gym asset for object. """ # define object asset object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = False object_asset_options.thickness = 0.001 object_asset_options.flip_visual_attachments = True # load object asset object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._object_urdf_file, object_asset_options) # set object properties # Ref: https://github.com/rr-learning/rrc_simulation/blob/master/python/rrc_simulation/collision_objects.py#L96 object_props = self.gym.get_asset_rigid_shape_properties(object_asset) for p in object_props: p.friction = 1.0 p.torsion_friction = 0.001 p.restitution = 0.0 self.gym.set_asset_rigid_shape_properties(object_asset, object_props) # return the asset return object_asset def __define_goal_object_asset(self): """ Define Gym asset for goal object. """ # define object asset object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = True object_asset_options.fix_base_link = True object_asset_options.thickness = 0.001 object_asset_options.flip_visual_attachments = True # load object asset goal_object_asset = self.gym.load_asset(self.sim, self._trifinger_assets_dir, self._object_urdf_file, object_asset_options) # return the asset return goal_object_asset @property def env_steps_count(self) -> int: """Returns the total number of environment steps aggregated across parallel environments.""" return self.gym.get_frame_count(self.sim) * self.num_envs ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def lgsk_kernel(x: torch.Tensor, scale: float = 50.0, eps:float=2) -> torch.Tensor: """Defines logistic kernel function to bound input to [-0.25, 0) Ref: https://arxiv.org/abs/1901.08652 (page 15) Args: x: Input tensor. scale: Scaling of the kernel function (controls how wide the 'bell' shape is') eps: Controls how 'tall' the 'bell' shape is. Returns: Output tensor computed using kernel. """ scaled = x * scale return 1.0 / (scaled.exp() + eps + (-scaled).exp()) @torch.jit.script def gen_keypoints(pose: torch.Tensor, num_keypoints: int = 8, size: Tuple[float, float, float] = (0.065, 0.065, 0.065)): num_envs = pose.shape[0] keypoints_buf = torch.ones(num_envs, num_keypoints, 3, dtype=torch.float32, device=pose.device) for i in range(num_keypoints): # which dimensions to negate n = [((i >> k) & 1) == 0 for k in range(3)] corner_loc = [(1 if n[k] else -1) * s / 2 for k, s in enumerate(size)], corner = torch.tensor(corner_loc, dtype=torch.float32, device=pose.device) * keypoints_buf[:, i, :] keypoints_buf[:, i, :] = local_to_world_space(corner, pose) return keypoints_buf @torch.jit.script def compute_trifinger_reward( obs_buf: torch.Tensor, reset_buf: torch.Tensor, progress_buf: torch.Tensor, episode_length: int, dt: float, finger_move_penalty_weight: float, finger_reach_object_weight: float, object_dist_weight: float, object_rot_weight: float, env_steps_count: int, object_goal_poses_buf: torch.Tensor, object_state: torch.Tensor, last_object_state: torch.Tensor, fingertip_state: torch.Tensor, last_fingertip_state: torch.Tensor, use_keypoints: bool ) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]: ft_sched_start = 0 ft_sched_end = 5e7 # Reward penalising finger movement fingertip_vel = (fingertip_state[:, :, 0:3] - last_fingertip_state[:, :, 0:3]) / dt finger_movement_penalty = finger_move_penalty_weight * fingertip_vel.pow(2).view(-1, 9).sum(dim=-1) # Reward for finger reaching the object # distance from each finger to the centroid of the object, shape (N, 3). curr_norms = torch.stack([ torch.norm(fingertip_state[:, i, 0:3] - object_state[:, 0:3], p=2, dim=-1) for i in range(3) ], dim=-1) # distance from each finger to the centroid of the object in the last timestep, shape (N, 3). prev_norms = torch.stack([ torch.norm(last_fingertip_state[:, i, 0:3] - last_object_state[:, 0:3], p=2, dim=-1) for i in range(3) ], dim=-1) ft_sched_val = 1.0 if ft_sched_start <= env_steps_count <= ft_sched_end else 0.0 finger_reach_object_reward = finger_reach_object_weight * ft_sched_val * (curr_norms - prev_norms).sum(dim=-1) if use_keypoints: object_keypoints = gen_keypoints(object_state[:, 0:7]) goal_keypoints = gen_keypoints(object_goal_poses_buf[:, 0:7]) delta = object_keypoints - goal_keypoints dist_l2 = torch.norm(delta, p=2, dim=-1) keypoints_kernel_sum = lgsk_kernel(dist_l2, scale=30., eps=2.).mean(dim=-1) pose_reward = object_dist_weight * dt * keypoints_kernel_sum else: # Reward for object distance object_dist = torch.norm(object_state[:, 0:3] - object_goal_poses_buf[:, 0:3], p=2, dim=-1) object_dist_reward = object_dist_weight * dt * lgsk_kernel(object_dist, scale=50., eps=2.) # Reward for object rotation # extract quaternion orientation quat_a = object_state[:, 3:7] quat_b = object_goal_poses_buf[:, 3:7] angles = quat_diff_rad(quat_a, quat_b) object_rot_reward = object_rot_weight * dt / (3. * torch.abs(angles) + 0.01) pose_reward = object_dist_reward + object_rot_reward total_reward = ( finger_movement_penalty + finger_reach_object_reward + pose_reward ) # reset agents reset = torch.zeros_like(reset_buf) reset = torch.where(progress_buf >= episode_length - 1, torch.ones_like(reset_buf), reset) info: Dict[str, torch.Tensor] = { 'finger_movement_penalty': finger_movement_penalty, 'finger_reach_object_reward': finger_reach_object_reward, 'pose_reward': finger_reach_object_reward, 'reward': total_reward, } return total_reward, reset, info @torch.jit.script def compute_trifinger_observations_states( asymmetric_obs: bool, dof_position: torch.Tensor, dof_velocity: torch.Tensor, object_state: torch.Tensor, object_goal_poses: torch.Tensor, actions: torch.Tensor, fingertip_state: torch.Tensor, joint_torques: torch.Tensor, tip_wrenches: torch.Tensor ): num_envs = dof_position.shape[0] obs_buf = torch.cat([ dof_position, dof_velocity, object_state[:, 0:7], # pose object_goal_poses, actions ], dim=-1) if asymmetric_obs: states_buf = torch.cat([ obs_buf, object_state[:, 7:13], # linear / angular velocity fingertip_state.reshape(num_envs, -1), joint_torques, tip_wrenches ], dim=-1) else: states_buf = obs_buf return obs_buf, states_buf """ Sampling of cuboidal object """ @torch.jit.script def random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]: """Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)""" # sample radius of circle radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device)) radius *= max_com_distance_to_center # sample theta of point theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device) # x,y-position of the cube x = radius * torch.cos(theta) y = radius * torch.sin(theta) return x, y @torch.jit.script def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor: """Returns sampled height of the goal object.""" z = torch.rand(num, dtype=torch.float, device=device) z = (max_height - min_height) * z + min_height return z @torch.jit.script def default_orientation(num: int, device: str) -> torch.Tensor: """Returns identity rotation transform.""" quat = torch.zeros((num, 4,), dtype=torch.float, device=device) quat[..., -1] = 1.0 return quat @torch.jit.script def random_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation in 3D as quaternion. Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html """ # sample random orientation from normal distribution quat = torch.randn((num, 4,), dtype=torch.float, device=device) # normalize the quaternion quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12) return quat @torch.jit.script def random_orientation_within_angle(num: int, device:str, base: torch.Tensor, max_angle: float): """ Generates random quaternions within max_angle of base Ref: https://math.stackexchange.com/a/3448434 """ quat = torch.zeros((num, 4,), dtype=torch.float, device=device) rand = torch.rand((num, 3), dtype=torch.float, device=device) c = torch.cos(rand[:, 0]*max_angle) n = torch.sqrt((1.-c)/2.) quat[:, 3] = torch.sqrt((1+c)/2.) quat[:, 2] = (rand[:, 1]*2.-1.) * n quat[:, 0] = (torch.sqrt(1-quat[:, 2]**2.) * torch.cos(2*np.pi*rand[:, 2])) * n quat[:, 1] = (torch.sqrt(1-quat[:, 2]**2.) * torch.sin(2*np.pi*rand[:, 2])) * n # floating point errors can cause it to be slightly off, re-normalise quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12) return quat_mul(quat, base) @torch.jit.script def random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor: """Samples a random angular velocity with standard deviation `magnitude_stdev`""" axis = torch.randn((num, 3,), dtype=torch.float, device=device) axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1) magnitude = torch.randn((num, 1,), dtype=torch.float, device=device) magnitude *= magnitude_stdev return magnitude * axis @torch.jit.script def random_yaw_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation around z-axis.""" roll = torch.zeros(num, dtype=torch.float, device=device) pitch = torch.zeros(num, dtype=torch.float, device=device) yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device) return quat_from_euler_xyz(roll, pitch, yaw)
70,571
Python
45.643754
217
0.611568
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/shadow_hand.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask class ShadowHand(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.shadow_hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg", "pen"] self.ignore_z = (self.object_type == "pen") self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "pen": "mjcf/open_ai_assets/hand/pen.xml" } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.asset_files_dict["pen"] = self.cfg["env"]["asset"].get("assetFileNamePen", self.asset_files_dict["pen"]) # can be "openai", "full_no_vel", "full", "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) self.num_obs_dict = { "openai": 42, "full_no_vel": 77, "full": 157, "full_state": 211 } self.up_axis = 'z' self.fingertips = ["robot0:ffdistal", "robot0:mfdistal", "robot0:rfdistal", "robot0:lfdistal", "robot0:thdistal"] self.num_fingertips = len(self.fingertips) self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] num_states = 0 if self.asymmetric_obs: num_states = 211 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 20 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_shadow_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.shadow_hand_default_dof_pos = torch.zeros(self.num_shadow_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.shadow_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_shadow_hand_dofs] self.shadow_hand_dof_pos = self.shadow_hand_dof_state[..., 0] self.shadow_hand_dof_vel = self.shadow_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) def create_sim(self): self.dt = self.cfg["sim"]["dt"] self.up_axis_idx = 2 if self.up_axis == 'z' else 1 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets')) shadow_hand_asset_file = os.path.normpath("mjcf/open_ai_assets/hand/shadow_hand.xml") if "asset" in self.cfg["env"]: # asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) shadow_hand_asset_file = os.path.normpath(self.cfg["env"]["asset"].get("assetFileName", shadow_hand_asset_file)) object_asset_file = self.asset_files_dict[self.object_type] # load shadow hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE shadow_hand_asset = self.gym.load_asset(self.sim, asset_root, shadow_hand_asset_file, asset_options) self.num_shadow_hand_bodies = self.gym.get_asset_rigid_body_count(shadow_hand_asset) self.num_shadow_hand_shapes = self.gym.get_asset_rigid_shape_count(shadow_hand_asset) self.num_shadow_hand_dofs = self.gym.get_asset_dof_count(shadow_hand_asset) self.num_shadow_hand_actuators = self.gym.get_asset_actuator_count(shadow_hand_asset) self.num_shadow_hand_tendons = self.gym.get_asset_tendon_count(shadow_hand_asset) # tendon set up limit_stiffness = 30 t_damping = 0.1 relevant_tendons = ["robot0:T_FFJ1c", "robot0:T_MFJ1c", "robot0:T_RFJ1c", "robot0:T_LFJ1c"] tendon_props = self.gym.get_asset_tendon_properties(shadow_hand_asset) for i in range(self.num_shadow_hand_tendons): for rt in relevant_tendons: if self.gym.get_asset_tendon_name(shadow_hand_asset, i) == rt: tendon_props[i].limit_stiffness = limit_stiffness tendon_props[i].damping = t_damping self.gym.set_asset_tendon_properties(shadow_hand_asset, tendon_props) actuated_dof_names = [self.gym.get_asset_actuator_joint_name(shadow_hand_asset, i) for i in range(self.num_shadow_hand_actuators)] self.actuated_dof_indices = [self.gym.find_asset_dof_index(shadow_hand_asset, name) for name in actuated_dof_names] # get shadow_hand dof properties, loaded by Isaac Gym from the MJCF file shadow_hand_dof_props = self.gym.get_asset_dof_properties(shadow_hand_asset) self.shadow_hand_dof_lower_limits = [] self.shadow_hand_dof_upper_limits = [] self.shadow_hand_dof_default_pos = [] self.shadow_hand_dof_default_vel = [] for i in range(self.num_shadow_hand_dofs): self.shadow_hand_dof_lower_limits.append(shadow_hand_dof_props['lower'][i]) self.shadow_hand_dof_upper_limits.append(shadow_hand_dof_props['upper'][i]) self.shadow_hand_dof_default_pos.append(0.0) self.shadow_hand_dof_default_vel.append(0.0) self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.shadow_hand_dof_lower_limits = to_torch(self.shadow_hand_dof_lower_limits, device=self.device) self.shadow_hand_dof_upper_limits = to_torch(self.shadow_hand_dof_upper_limits, device=self.device) self.shadow_hand_dof_default_pos = to_torch(self.shadow_hand_dof_default_pos, device=self.device) self.shadow_hand_dof_default_vel = to_torch(self.shadow_hand_dof_default_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips] # create fingertip force sensors, if needed if self.obs_type == "full_state" or self.asymmetric_obs: sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(shadow_hand_asset, ft_handle, sensor_pose) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) shadow_hand_start_pose = gymapi.Transform() shadow_hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = shadow_hand_start_pose.p.x pose_dy, pose_dz = -0.39, 0.10 object_start_pose.p.y = shadow_hand_start_pose.p.y + pose_dy object_start_pose.p.z = shadow_hand_start_pose.p.z + pose_dz if self.object_type == "pen": object_start_pose.p.z = shadow_hand_start_pose.p.z + 0.02 self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_shadow_hand_bodies + 2 max_agg_shapes = self.num_shadow_hand_shapes + 2 self.shadow_hands = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(shadow_hand_asset, name) for name in self.fingertips] shadow_hand_rb_count = self.gym.get_asset_rigid_body_count(shadow_hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(shadow_hand_rb_count, shadow_hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader shadow_hand_actor = self.gym.create_actor(env_ptr, shadow_hand_asset, shadow_hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([shadow_hand_start_pose.p.x, shadow_hand_start_pose.p.y, shadow_hand_start_pose.p.z, shadow_hand_start_pose.r.x, shadow_hand_start_pose.r.y, shadow_hand_start_pose.r.z, shadow_hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, shadow_hand_actor, shadow_hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, shadow_hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) # enable DOF force sensors, if needed if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.enable_actor_dof_force_sensors(env_ptr, shadow_hand_actor) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.shadow_hands.append(shadow_hand_actor) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], self.successes[:], self.consecutive_successes[:] = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, (self.object_type == "pen") ) self.extras['consecutive_successes'] = self.consecutive_successes.mean() if self.print_success_stat: self.total_resets = self.total_resets + self.reset_buf.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state" or self.asymmetric_obs: self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3] if self.obs_type == "openai": self.compute_fingertip_observations(True) elif self.obs_type == "full_no_vel": self.compute_full_observations(True) elif self.obs_type == "full": self.compute_full_observations() elif self.obs_type == "full_state": self.compute_full_state() else: print("Unknown observations type!") if self.asymmetric_obs: self.compute_full_state(True) def compute_fingertip_observations(self, no_vel=False): if no_vel: # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 # Fingertip positions # Object Position, but not orientation # Relative target orientation # 3*self.num_fingertips = 15 self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 15:18] = self.object_pose[:, 0:3] self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 22:42] = self.actions else: # 13*self.num_fingertips = 65 self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 65:72] = self.object_pose self.obs_buf[:, 72:75] = self.object_linvel self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 78:85] = self.goal_pose self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.obs_buf[:, 89:109] = self.actions def compute_full_observations(self, no_vel=False): if no_vel: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, 24:31] = self.object_pose self.obs_buf[:, 31:38] = self.goal_pose self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # 3*self.num_fingertips = 15 self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 15) self.obs_buf[:, 57:77] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 48:55] = self.object_pose self.obs_buf[:, 55:58] = self.object_linvel self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel self.obs_buf[:, 61:68] = self.goal_pose self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # 13*self.num_fingertips = 65 self.obs_buf[:, 72:137] = self.fingertip_state.reshape(self.num_envs, 65) self.obs_buf[:, 137:157] = self.actions def compute_full_state(self, asymm_obs=False): if asymm_obs: self.states_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.states_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.states_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 72 self.states_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.states_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.states_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 85 self.states_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.states_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 96 self.states_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states) self.states_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states + num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 211 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.states_buf[:, obs_end:obs_end + self.num_actions] = self.actions else: self.obs_buf[:, 0:self.num_shadow_hand_dofs] = unscale(self.shadow_hand_dof_pos, self.shadow_hand_dof_lower_limits, self.shadow_hand_dof_upper_limits) self.obs_buf[:, self.num_shadow_hand_dofs:2*self.num_shadow_hand_dofs] = self.vel_obs_scale * self.shadow_hand_dof_vel self.obs_buf[:, 2*self.num_shadow_hand_dofs:3*self.num_shadow_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor obj_obs_start = 3*self.num_shadow_hand_dofs # 72 self.obs_buf[:, obj_obs_start:obj_obs_start + 7] = self.object_pose self.obs_buf[:, obj_obs_start + 7:obj_obs_start + 10] = self.object_linvel self.obs_buf[:, obj_obs_start + 10:obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel goal_obs_start = obj_obs_start + 13 # 85 self.obs_buf[:, goal_obs_start:goal_obs_start + 7] = self.goal_pose self.obs_buf[:, goal_obs_start + 7:goal_obs_start + 11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) # fingertip observations, state(pose and vel) + force-torque sensors num_ft_states = 13 * self.num_fingertips # 65 num_ft_force_torques = 6 * self.num_fingertips # 30 fingertip_obs_start = goal_obs_start + 11 # 96 self.obs_buf[:, fingertip_obs_start:fingertip_obs_start + num_ft_states] = self.fingertip_state.reshape(self.num_envs, num_ft_states) self.obs_buf[:, fingertip_obs_start + num_ft_states:fingertip_obs_start + num_ft_states + num_ft_force_torques] = self.force_torque_obs_scale * self.vec_sensor_tensor # obs_end = 96 + 65 + 30 = 191 # obs_total = obs_end + num_actions = 211 obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques self.obs_buf[:, obs_end:obs_end + self.num_actions] = self.actions def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 def reset_idx(self, env_ids, goal_env_ids): # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_shadow_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise * rand_floats[:, self.up_axis_idx] new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) if self.object_type == "pen": rand_angle_y = torch.tensor(0.3) new_object_rot = randomize_rotation_pen(rand_floats[:, 3], rand_floats[:, 4], rand_angle_y, self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids], self.z_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset shadow hand delta_max = self.shadow_hand_dof_upper_limits - self.shadow_hand_dof_default_pos delta_min = self.shadow_hand_dof_lower_limits - self.shadow_hand_dof_default_pos rand_delta = delta_min + (delta_max - delta_min) * 0.5 * (rand_floats[:, 5:5+self.num_shadow_hand_dofs] + 1) pos = self.shadow_hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.shadow_hand_dof_pos[env_ids, :] = pos self.shadow_hand_dof_vel[env_ids, :] = self.shadow_hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_shadow_hand_dofs:5+self.num_shadow_hand_dofs*2] self.prev_targets[env_ids, :self.num_shadow_hand_dofs] = pos self.cur_targets[env_ids, :self.num_shadow_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.successes[env_ids] = 0 def pre_physics_step(self, actions): env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset_idx() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids) > 0: self.reset_idx(env_ids, goal_env_ids) self.actions = actions.clone().to(self.device) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.shadow_hand_dof_speed_scale * self.dt * self.actions self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(targets, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) else: self.cur_targets[:, self.actuated_dof_indices] = scale(self.actions, self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices] + (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.shadow_hand_dof_lower_limits[self.actuated_dof_indices], self.shadow_hand_dof_upper_limits[self.actuated_dof_indices]) self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, ignore_z_rot: bool ): # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) if ignore_z_rot: success_tolerance = 2.0 * success_tolerance # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = torch.sum(actions ** 2, dim=-1) # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty * action_penalty_scale # Find out which envs hit the goal and update successes count goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward) # Fall penalty: distance to the goal is larger than a threshold reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward) # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal if max_consecutive_successes > 0: reward = torch.where(progress_buf >= max_episode_length - 1, reward + 0.5 * fall_penalty, reward) num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, successes, cons_successes @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) @torch.jit.script def randomize_rotation_pen(rand0, rand1, max_angle, x_unit_tensor, y_unit_tensor, z_unit_tensor): rot = quat_mul(quat_from_angle_axis(0.5 * np.pi + rand0 * max_angle, x_unit_tensor), quat_from_angle_axis(rand0 * np.pi, z_unit_tensor)) return rot
45,910
Python
55.40172
217
0.624439
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cabinet.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, tensor_clamp, \ tf_vector, tf_combine from .base.vec_task import VecTask class FrankaCabinet(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.action_scale = self.cfg["env"]["actionScale"] self.start_position_noise = self.cfg["env"]["startPositionNoise"] self.start_rotation_noise = self.cfg["env"]["startRotationNoise"] self.num_props = self.cfg["env"]["numProps"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self.cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self.cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self.cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.up_axis = "z" self.up_axis_idx = 2 self.distX_offset = 0.04 self.dt = 1/60. # prop dimensions self.prop_width = 0.08 self.prop_height = 0.08 self.prop_length = 0.08 self.prop_spacing = 0.09 num_obs = 23 num_acts = 9 self.cfg["env"]["numObservations"] = 23 self.cfg["env"]["numActions"] = 9 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.franka_default_dof_pos = to_torch([1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.franka_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_franka_dofs] self.franka_dof_pos = self.franka_dof_state[..., 0] self.franka_dof_vel = self.franka_dof_state[..., 1] self.cabinet_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, self.num_franka_dofs:] self.cabinet_dof_pos = self.cabinet_dof_state[..., 0] self.cabinet_dof_vel = self.cabinet_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(self.num_envs, -1, 13) if self.num_props > 0: self.prop_states = self.root_state_tensor[:, 2:] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.franka_dof_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * (2 + self.num_props), dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim( self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") franka_asset_file = "urdf/franka_description/robots/franka_panda.urdf" cabinet_asset_file = "urdf/sektion_cabinet_model/urdf/sektion_cabinet_2.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file) cabinet_asset_file = self.cfg["env"]["asset"].get("assetFileNameCabinet", cabinet_asset_file) # load franka asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = True asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS asset_options.use_mesh_materials = True franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options) # load cabinet asset asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = False asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.armature = 0.005 cabinet_asset = self.gym.load_asset(self.sim, asset_root, cabinet_asset_file, asset_options) franka_dof_stiffness = to_torch([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float, device=self.device) franka_dof_damping = to_torch([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device) self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset) self.num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset) self.num_cabinet_dofs = self.gym.get_asset_dof_count(cabinet_asset) print("num franka bodies: ", self.num_franka_bodies) print("num franka dofs: ", self.num_franka_dofs) print("num cabinet bodies: ", self.num_cabinet_bodies) print("num cabinet dofs: ", self.num_cabinet_dofs) # set franka dof properties franka_dof_props = self.gym.get_asset_dof_properties(franka_asset) self.franka_dof_lower_limits = [] self.franka_dof_upper_limits = [] for i in range(self.num_franka_dofs): franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if self.physics_engine == gymapi.SIM_PHYSX: franka_dof_props['stiffness'][i] = franka_dof_stiffness[i] franka_dof_props['damping'][i] = franka_dof_damping[i] else: franka_dof_props['stiffness'][i] = 7000.0 franka_dof_props['damping'][i] = 50.0 self.franka_dof_lower_limits.append(franka_dof_props['lower'][i]) self.franka_dof_upper_limits.append(franka_dof_props['upper'][i]) self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device) self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[[7, 8]] = 0.1 franka_dof_props['effort'][7] = 200 franka_dof_props['effort'][8] = 200 # set cabinet dof properties cabinet_dof_props = self.gym.get_asset_dof_properties(cabinet_asset) for i in range(self.num_cabinet_dofs): cabinet_dof_props['damping'][i] = 10.0 # create prop assets box_opts = gymapi.AssetOptions() box_opts.density = 400 prop_asset = self.gym.create_box(self.sim, self.prop_width, self.prop_height, self.prop_width, box_opts) franka_start_pose = gymapi.Transform() franka_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0) franka_start_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) cabinet_start_pose = gymapi.Transform() cabinet_start_pose.p = gymapi.Vec3(*get_axis_params(0.4, self.up_axis_idx)) # compute aggregate size num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset) num_cabinet_bodies = self.gym.get_asset_rigid_body_count(cabinet_asset) num_cabinet_shapes = self.gym.get_asset_rigid_shape_count(cabinet_asset) num_prop_bodies = self.gym.get_asset_rigid_body_count(prop_asset) num_prop_shapes = self.gym.get_asset_rigid_shape_count(prop_asset) max_agg_bodies = num_franka_bodies + num_cabinet_bodies + self.num_props * num_prop_bodies max_agg_shapes = num_franka_shapes + num_cabinet_shapes + self.num_props * num_prop_shapes self.frankas = [] self.cabinets = [] self.default_prop_states = [] self.prop_start = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 3: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 1, 0) self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props) if self.aggregate_mode == 2: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) cabinet_pose = cabinet_start_pose cabinet_pose.p.x += self.start_position_noise * (np.random.rand() - 0.5) dz = 0.5 * np.random.rand() dy = np.random.rand() - 0.5 cabinet_pose.p.y += self.start_position_noise * dy cabinet_pose.p.z += self.start_position_noise * dz cabinet_actor = self.gym.create_actor(env_ptr, cabinet_asset, cabinet_pose, "cabinet", i, 2, 0) self.gym.set_actor_dof_properties(env_ptr, cabinet_actor, cabinet_dof_props) if self.aggregate_mode == 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) if self.num_props > 0: self.prop_start.append(self.gym.get_sim_actor_count(self.sim)) drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top") drawer_pose = self.gym.get_rigid_transform(env_ptr, drawer_handle) props_per_row = int(np.ceil(np.sqrt(self.num_props))) xmin = -0.5 * self.prop_spacing * (props_per_row - 1) yzmin = -0.5 * self.prop_spacing * (props_per_row - 1) prop_count = 0 for j in range(props_per_row): prop_up = yzmin + j * self.prop_spacing for k in range(props_per_row): if prop_count >= self.num_props: break propx = xmin + k * self.prop_spacing prop_state_pose = gymapi.Transform() prop_state_pose.p.x = drawer_pose.p.x + propx propz, propy = 0, prop_up prop_state_pose.p.y = drawer_pose.p.y + propy prop_state_pose.p.z = drawer_pose.p.z + propz prop_state_pose.r = gymapi.Quat(0, 0, 0, 1) prop_handle = self.gym.create_actor(env_ptr, prop_asset, prop_state_pose, "prop{}".format(prop_count), i, 0, 0) prop_count += 1 prop_idx = j * props_per_row + k self.default_prop_states.append([prop_state_pose.p.x, prop_state_pose.p.y, prop_state_pose.p.z, prop_state_pose.r.x, prop_state_pose.r.y, prop_state_pose.r.z, prop_state_pose.r.w, 0, 0, 0, 0, 0, 0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.frankas.append(franka_actor) self.cabinets.append(cabinet_actor) self.hand_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_link7") self.drawer_handle = self.gym.find_actor_rigid_body_handle(env_ptr, cabinet_actor, "drawer_top") self.lfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_leftfinger") self.rfinger_handle = self.gym.find_actor_rigid_body_handle(env_ptr, franka_actor, "panda_rightfinger") self.default_prop_states = to_torch(self.default_prop_states, device=self.device, dtype=torch.float).view(self.num_envs, self.num_props, 13) self.init_data() def init_data(self): hand = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_link7") lfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_leftfinger") rfinger = self.gym.find_actor_rigid_body_handle(self.envs[0], self.frankas[0], "panda_rightfinger") hand_pose = self.gym.get_rigid_transform(self.envs[0], hand) lfinger_pose = self.gym.get_rigid_transform(self.envs[0], lfinger) rfinger_pose = self.gym.get_rigid_transform(self.envs[0], rfinger) finger_pose = gymapi.Transform() finger_pose.p = (lfinger_pose.p + rfinger_pose.p) * 0.5 finger_pose.r = lfinger_pose.r hand_pose_inv = hand_pose.inverse() grasp_pose_axis = 1 franka_local_grasp_pose = hand_pose_inv * finger_pose franka_local_grasp_pose.p += gymapi.Vec3(*get_axis_params(0.04, grasp_pose_axis)) self.franka_local_grasp_pos = to_torch([franka_local_grasp_pose.p.x, franka_local_grasp_pose.p.y, franka_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1)) self.franka_local_grasp_rot = to_torch([franka_local_grasp_pose.r.x, franka_local_grasp_pose.r.y, franka_local_grasp_pose.r.z, franka_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1)) drawer_local_grasp_pose = gymapi.Transform() drawer_local_grasp_pose.p = gymapi.Vec3(*get_axis_params(0.01, grasp_pose_axis, 0.3)) drawer_local_grasp_pose.r = gymapi.Quat(0, 0, 0, 1) self.drawer_local_grasp_pos = to_torch([drawer_local_grasp_pose.p.x, drawer_local_grasp_pose.p.y, drawer_local_grasp_pose.p.z], device=self.device).repeat((self.num_envs, 1)) self.drawer_local_grasp_rot = to_torch([drawer_local_grasp_pose.r.x, drawer_local_grasp_pose.r.y, drawer_local_grasp_pose.r.z, drawer_local_grasp_pose.r.w], device=self.device).repeat((self.num_envs, 1)) self.gripper_forward_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1)) self.drawer_inward_axis = to_torch([-1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.gripper_up_axis = to_torch([0, 1, 0], device=self.device).repeat((self.num_envs, 1)) self.drawer_up_axis = to_torch([0, 0, 1], device=self.device).repeat((self.num_envs, 1)) self.franka_grasp_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_grasp_rot = torch.zeros_like(self.franka_local_grasp_rot) self.franka_grasp_rot[..., -1] = 1 # xyzw self.drawer_grasp_pos = torch.zeros_like(self.drawer_local_grasp_pos) self.drawer_grasp_rot = torch.zeros_like(self.drawer_local_grasp_rot) self.drawer_grasp_rot[..., -1] = 1 self.franka_lfinger_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_rfinger_pos = torch.zeros_like(self.franka_local_grasp_pos) self.franka_lfinger_rot = torch.zeros_like(self.franka_local_grasp_rot) self.franka_rfinger_rot = torch.zeros_like(self.franka_local_grasp_rot) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos, self.franka_grasp_pos, self.drawer_grasp_pos, self.franka_grasp_rot, self.drawer_grasp_rot, self.franka_lfinger_pos, self.franka_rfinger_pos, self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis, self.num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale, self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self.max_episode_length ) def compute_observations(self): self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) hand_pos = self.rigid_body_states[:, self.hand_handle][:, 0:3] hand_rot = self.rigid_body_states[:, self.hand_handle][:, 3:7] drawer_pos = self.rigid_body_states[:, self.drawer_handle][:, 0:3] drawer_rot = self.rigid_body_states[:, self.drawer_handle][:, 3:7] self.franka_grasp_rot[:], self.franka_grasp_pos[:], self.drawer_grasp_rot[:], self.drawer_grasp_pos[:] = \ compute_grasp_transforms(hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos, drawer_rot, drawer_pos, self.drawer_local_grasp_rot, self.drawer_local_grasp_pos ) self.franka_lfinger_pos = self.rigid_body_states[:, self.lfinger_handle][:, 0:3] self.franka_rfinger_pos = self.rigid_body_states[:, self.rfinger_handle][:, 0:3] self.franka_lfinger_rot = self.rigid_body_states[:, self.lfinger_handle][:, 3:7] self.franka_rfinger_rot = self.rigid_body_states[:, self.rfinger_handle][:, 3:7] dof_pos_scaled = (2.0 * (self.franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0) to_target = self.drawer_grasp_pos - self.franka_grasp_pos self.obs_buf = torch.cat((dof_pos_scaled, self.franka_dof_vel * self.dof_vel_scale, to_target, self.cabinet_dof_pos[:, 3].unsqueeze(-1), self.cabinet_dof_vel[:, 3].unsqueeze(-1)), dim=-1) return self.obs_buf def reset_idx(self, env_ids): env_ids_int32 = env_ids.to(dtype=torch.int32) # reset franka pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self.device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits) self.franka_dof_pos[env_ids, :] = pos self.franka_dof_vel[env_ids, :] = torch.zeros_like(self.franka_dof_vel[env_ids]) self.franka_dof_targets[env_ids, :self.num_franka_dofs] = pos # reset cabinet self.cabinet_dof_state[env_ids, :] = torch.zeros_like(self.cabinet_dof_state[env_ids]) # reset props if self.num_props > 0: prop_indices = self.global_indices[env_ids, 2:].flatten() self.prop_states[env_ids] = self.default_prop_states[env_ids] self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(prop_indices), len(prop_indices)) multi_env_ids_int32 = self.global_indices[env_ids, :2].flatten() self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.franka_dof_targets), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) targets = self.franka_dof_targets[:, :self.num_franka_dofs] + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:, :self.num_franka_dofs] = tensor_clamp( targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self.num_envs, dtype=torch.int32, device=self.device) self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.franka_dof_targets)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): px = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_grasp_pos[i] + quat_apply(self.franka_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_grasp_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85]) px = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.drawer_grasp_pos[i] + quat_apply(self.drawer_grasp_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.drawer_grasp_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) px = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_lfinger_pos[i] + quat_apply(self.franka_lfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_lfinger_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) px = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (self.franka_rfinger_pos[i] + quat_apply(self.franka_rfinger_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.franka_rfinger_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [1, 0, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0, 1, 0]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0, 0, 1]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_franka_reward( reset_buf, progress_buf, actions, cabinet_dof_pos, franka_grasp_pos, drawer_grasp_pos, franka_grasp_rot, drawer_grasp_rot, franka_lfinger_pos, franka_rfinger_pos, gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis, num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale, finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float) -> Tuple[Tensor, Tensor] # distance from hand to the drawer d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1) dist_reward = 1.0 / (1.0 + d ** 2) dist_reward *= dist_reward dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward) axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis) axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis) dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper # reward for matching the orientation of the hand to the drawer (fingers wrapped) rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2) # bonus if left finger is above the drawer handle and right below around_handle_reward = torch.zeros_like(rot_reward) around_handle_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward), around_handle_reward) # reward for distance of each finger from the drawer finger_dist_reward = torch.zeros_like(rot_reward) lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2]) finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2], torch.where(franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], (0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward), finger_dist_reward) # regularization on the actions (summed for each environment) action_penalty = torch.sum(actions ** 2, dim=-1) # how far the cabinet has been opened out open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint rewards = dist_reward_scale * dist_reward + rot_reward_scale * rot_reward \ + around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward \ + finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty # bonus for opening drawer properly rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards) rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards) # prevent bad style in opening drawer rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, torch.ones_like(rewards) * -1, rewards) rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset, torch.ones_like(rewards) * -1, rewards) # reset if drawer is open or max length reached reset_buf = torch.where(cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(reset_buf), reset_buf) reset_buf = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf) return rewards, reset_buf @torch.jit.script def compute_grasp_transforms(hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos, drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor] global_franka_rot, global_franka_pos = tf_combine( hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos) global_drawer_rot, global_drawer_pos = tf_combine( drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos) return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
32,782
Python
56.716549
217
0.613141
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .ant import Ant from .anymal import Anymal from .anymal_terrain import AnymalTerrain from .ball_balance import BallBalance from .cartpole import Cartpole from .factory.factory_task_gears import FactoryTaskGears from .factory.factory_task_insertion import FactoryTaskInsertion from .factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick from .factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace from .factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew from .franka_cabinet import FrankaCabinet from .franka_cube_stack import FrankaCubeStack from .humanoid import Humanoid from .humanoid_amp import HumanoidAMP from .ingenuity import Ingenuity from .quadcopter import Quadcopter from .shadow_hand import ShadowHand from .allegro_hand import AllegroHand from .dextreme.allegro_hand_dextreme import AllegroHandDextremeManualDR, AllegroHandDextremeADR from .trifinger import Trifinger from .allegro_kuka.allegro_kuka_reorientation import AllegroKukaReorientation from .allegro_kuka.allegro_kuka_regrasping import AllegroKukaRegrasping from .allegro_kuka.allegro_kuka_throw import AllegroKukaThrow from .allegro_kuka.allegro_kuka_two_arms_regrasping import AllegroKukaTwoArmsRegrasping from .allegro_kuka.allegro_kuka_two_arms_reorientation import AllegroKukaTwoArmsReorientation from .industreal.industreal_task_pegs_insert import IndustRealTaskPegsInsert from .industreal.industreal_task_gears_insert import IndustRealTaskGearsInsert def resolve_allegro_kuka(cfg, *args, **kwargs): subtask_name: str = cfg["env"]["subtask"] subtask_map = dict( reorientation=AllegroKukaReorientation, throw=AllegroKukaThrow, regrasping=AllegroKukaRegrasping, ) if subtask_name not in subtask_map: print("!!!!!") raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}") return subtask_map[subtask_name](cfg, *args, **kwargs) def resolve_allegro_kuka_two_arms(cfg, *args, **kwargs): subtask_name: str = cfg["env"]["subtask"] subtask_map = dict( reorientation=AllegroKukaTwoArmsReorientation, regrasping=AllegroKukaTwoArmsRegrasping, ) if subtask_name not in subtask_map: raise ValueError(f"Unknown subtask={subtask_name} in {subtask_map}") return subtask_map[subtask_name](cfg, *args, **kwargs) # Mappings from strings to environments isaacgym_task_map = { "AllegroHand": AllegroHand, "AllegroKuka": resolve_allegro_kuka, "AllegroKukaTwoArms": resolve_allegro_kuka_two_arms, "AllegroHandManualDR": AllegroHandDextremeManualDR, "AllegroHandADR": AllegroHandDextremeADR, "Ant": Ant, "Anymal": Anymal, "AnymalTerrain": AnymalTerrain, "BallBalance": BallBalance, "Cartpole": Cartpole, "FactoryTaskGears": FactoryTaskGears, "FactoryTaskInsertion": FactoryTaskInsertion, "FactoryTaskNutBoltPick": FactoryTaskNutBoltPick, "FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace, "FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew, "IndustRealTaskPegsInsert": IndustRealTaskPegsInsert, "IndustRealTaskGearsInsert": IndustRealTaskGearsInsert, "FrankaCabinet": FrankaCabinet, "FrankaCubeStack": FrankaCubeStack, "Humanoid": Humanoid, "HumanoidAMP": HumanoidAMP, "Ingenuity": Ingenuity, "Quadcopter": Quadcopter, "ShadowHand": ShadowHand, "Trifinger": Trifinger, }
4,960
Python
42.13913
95
0.777218
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid_amp.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.. from enum import Enum import numpy as np import torch import os from gym import spaces from isaacgym import gymapi from isaacgym import gymtorch from isaacgymenvs.tasks.amp.humanoid_amp_base import HumanoidAMPBase, dof_to_obs from isaacgymenvs.tasks.amp.utils_amp import gym_util from isaacgymenvs.tasks.amp.utils_amp.motion_lib import MotionLib from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, calc_heading_quat_inv, quat_to_tan_norm, my_quat_rotate NUM_AMP_OBS_PER_STEP = 13 + 52 + 28 + 12 # [root_h, root_rot, root_vel, root_ang_vel, dof_pos, dof_vel, key_body_pos] class HumanoidAMP(HumanoidAMPBase): class StateInit(Enum): Default = 0 Start = 1 Random = 2 Hybrid = 3 def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg state_init = cfg["env"]["stateInit"] self._state_init = HumanoidAMP.StateInit[state_init] self._hybrid_init_prob = cfg["env"]["hybridInitProb"] self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"] assert(self._num_amp_obs_steps >= 2) self._reset_default_env_ids = [] self._reset_ref_env_ids = [] super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) motion_file = cfg['env'].get('motion_file', "amp_humanoid_backflip.npy") motion_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets/amp/motions/" + motion_file) self._load_motion(motion_file_path) self.num_amp_obs = self._num_amp_obs_steps * NUM_AMP_OBS_PER_STEP self._amp_obs_space = spaces.Box(np.ones(self.num_amp_obs) * -np.Inf, np.ones(self.num_amp_obs) * np.Inf) self._amp_obs_buf = torch.zeros((self.num_envs, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float) self._curr_amp_obs_buf = self._amp_obs_buf[:, 0] self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:] self._amp_obs_demo_buf = None return def post_physics_step(self): super().post_physics_step() self._update_hist_amp_obs() self._compute_amp_observations() amp_obs_flat = self._amp_obs_buf.view(-1, self.get_num_amp_obs()) self.extras["amp_obs"] = amp_obs_flat return def get_num_amp_obs(self): return self.num_amp_obs @property def amp_observation_space(self): return self._amp_obs_space def fetch_amp_obs_demo(self, num_samples): return self.task.fetch_amp_obs_demo(num_samples) def fetch_amp_obs_demo(self, num_samples): dt = self.dt motion_ids = self._motion_lib.sample_motions(num_samples) if (self._amp_obs_demo_buf is None): self._build_amp_obs_demo_buf(num_samples) else: assert(self._amp_obs_demo_buf.shape[0] == num_samples) motion_times0 = self._motion_lib.sample_time(motion_ids) motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps]) motion_times = np.expand_dims(motion_times0, axis=-1) time_steps = -dt * np.arange(0, self._num_amp_obs_steps) motion_times = motion_times + time_steps motion_ids = motion_ids.flatten() motion_times = motion_times.flatten() root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1) amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos, self._local_root_obs) self._amp_obs_demo_buf[:] = amp_obs_demo.view(self._amp_obs_demo_buf.shape) amp_obs_demo_flat = self._amp_obs_demo_buf.view(-1, self.get_num_amp_obs()) return amp_obs_demo_flat def _build_amp_obs_demo_buf(self, num_samples): self._amp_obs_demo_buf = torch.zeros((num_samples, self._num_amp_obs_steps, NUM_AMP_OBS_PER_STEP), device=self.device, dtype=torch.float) return def _load_motion(self, motion_file): self._motion_lib = MotionLib(motion_file=motion_file, num_dofs=self.num_dof, key_body_ids=self._key_body_ids.cpu().numpy(), device=self.device) return def reset_idx(self, env_ids): super().reset_idx(env_ids) self._init_amp_obs(env_ids) return def _reset_actors(self, env_ids): if (self._state_init == HumanoidAMP.StateInit.Default): self._reset_default(env_ids) elif (self._state_init == HumanoidAMP.StateInit.Start or self._state_init == HumanoidAMP.StateInit.Random): self._reset_ref_state_init(env_ids) elif (self._state_init == HumanoidAMP.StateInit.Hybrid): self._reset_hybrid_state_init(env_ids) else: assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self._terminate_buf[env_ids] = 0 return def _reset_default(self, env_ids): self._dof_pos[env_ids] = self._initial_dof_pos[env_ids] self._dof_vel[env_ids] = self._initial_dof_vel[env_ids] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self._reset_default_env_ids = env_ids return def _reset_ref_state_init(self, env_ids): num_envs = env_ids.shape[0] motion_ids = self._motion_lib.sample_motions(num_envs) if (self._state_init == HumanoidAMP.StateInit.Random or self._state_init == HumanoidAMP.StateInit.Hybrid): motion_times = self._motion_lib.sample_time(motion_ids) elif (self._state_init == HumanoidAMP.StateInit.Start): motion_times = np.zeros(num_envs) else: assert(False), "Unsupported state initialization strategy: {:s}".format(str(self._state_init)) root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) self._set_env_state(env_ids=env_ids, root_pos=root_pos, root_rot=root_rot, dof_pos=dof_pos, root_vel=root_vel, root_ang_vel=root_ang_vel, dof_vel=dof_vel) self._reset_ref_env_ids = env_ids self._reset_ref_motion_ids = motion_ids self._reset_ref_motion_times = motion_times return def _reset_hybrid_state_init(self, env_ids): num_envs = env_ids.shape[0] ref_probs = to_torch(np.array([self._hybrid_init_prob] * num_envs), device=self.device) ref_init_mask = torch.bernoulli(ref_probs) == 1.0 ref_reset_ids = env_ids[ref_init_mask] if (len(ref_reset_ids) > 0): self._reset_ref_state_init(ref_reset_ids) default_reset_ids = env_ids[torch.logical_not(ref_init_mask)] if (len(default_reset_ids) > 0): self._reset_default(default_reset_ids) return def _init_amp_obs(self, env_ids): self._compute_amp_observations(env_ids) if (len(self._reset_default_env_ids) > 0): self._init_amp_obs_default(self._reset_default_env_ids) if (len(self._reset_ref_env_ids) > 0): self._init_amp_obs_ref(self._reset_ref_env_ids, self._reset_ref_motion_ids, self._reset_ref_motion_times) return def _init_amp_obs_default(self, env_ids): curr_amp_obs = self._curr_amp_obs_buf[env_ids].unsqueeze(-2) self._hist_amp_obs_buf[env_ids] = curr_amp_obs return def _init_amp_obs_ref(self, env_ids, motion_ids, motion_times): dt = self.dt motion_ids = np.tile(np.expand_dims(motion_ids, axis=-1), [1, self._num_amp_obs_steps - 1]) motion_times = np.expand_dims(motion_times, axis=-1) time_steps = -dt * (np.arange(0, self._num_amp_obs_steps - 1) + 1) motion_times = motion_times + time_steps motion_ids = motion_ids.flatten() motion_times = motion_times.flatten() root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos \ = self._motion_lib.get_motion_state(motion_ids, motion_times) root_states = torch.cat([root_pos, root_rot, root_vel, root_ang_vel], dim=-1) amp_obs_demo = build_amp_observations(root_states, dof_pos, dof_vel, key_pos, self._local_root_obs) self._hist_amp_obs_buf[env_ids] = amp_obs_demo.view(self._hist_amp_obs_buf[env_ids].shape) return def _set_env_state(self, env_ids, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel): self._root_states[env_ids, 0:3] = root_pos self._root_states[env_ids, 3:7] = root_rot self._root_states[env_ids, 7:10] = root_vel self._root_states[env_ids, 10:13] = root_ang_vel self._dof_pos[env_ids] = dof_pos self._dof_vel[env_ids] = dof_vel env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) return def _update_hist_amp_obs(self, env_ids=None): if (env_ids is None): for i in reversed(range(self._amp_obs_buf.shape[1] - 1)): self._amp_obs_buf[:, i + 1] = self._amp_obs_buf[:, i] else: for i in reversed(range(self._amp_obs_buf.shape[1] - 1)): self._amp_obs_buf[env_ids, i + 1] = self._amp_obs_buf[env_ids, i] return def _compute_amp_observations(self, env_ids=None): key_body_pos = self._rigid_body_pos[:, self._key_body_ids, :] if (env_ids is None): self._curr_amp_obs_buf[:] = build_amp_observations(self._root_states, self._dof_pos, self._dof_vel, key_body_pos, self._local_root_obs) else: self._curr_amp_obs_buf[env_ids] = build_amp_observations(self._root_states[env_ids], self._dof_pos[env_ids], self._dof_vel[env_ids], key_body_pos[env_ids], self._local_root_obs) return ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def build_amp_observations(root_states, dof_pos, dof_vel, key_body_pos, local_root_obs): # type: (Tensor, Tensor, Tensor, Tensor, bool) -> Tensor root_pos = root_states[:, 0:3] root_rot = root_states[:, 3:7] root_vel = root_states[:, 7:10] root_ang_vel = root_states[:, 10:13] root_h = root_pos[:, 2:3] heading_rot = calc_heading_quat_inv(root_rot) if (local_root_obs): root_rot_obs = quat_mul(heading_rot, root_rot) else: root_rot_obs = root_rot root_rot_obs = quat_to_tan_norm(root_rot_obs) local_root_vel = my_quat_rotate(heading_rot, root_vel) local_root_ang_vel = my_quat_rotate(heading_rot, root_ang_vel) root_pos_expand = root_pos.unsqueeze(-2) local_key_body_pos = key_body_pos - root_pos_expand heading_rot_expand = heading_rot.unsqueeze(-2) heading_rot_expand = heading_rot_expand.repeat((1, local_key_body_pos.shape[1], 1)) flat_end_pos = local_key_body_pos.view(local_key_body_pos.shape[0] * local_key_body_pos.shape[1], local_key_body_pos.shape[2]) flat_heading_rot = heading_rot_expand.view(heading_rot_expand.shape[0] * heading_rot_expand.shape[1], heading_rot_expand.shape[2]) local_end_pos = my_quat_rotate(flat_heading_rot, flat_end_pos) flat_local_key_pos = local_end_pos.view(local_key_body_pos.shape[0], local_key_body_pos.shape[1] * local_key_body_pos.shape[2]) dof_obs = dof_to_obs(dof_pos) obs = torch.cat((root_h, root_rot_obs, local_root_vel, local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos), dim=-1) return obs
14,984
Python
44
217
0.602309
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/humanoid.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp, compute_heading_and_up, compute_rot, normalize_angle from isaacgymenvs.tasks.base.vec_task import VecTask class Humanoid(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.angular_velocity_scale = self.cfg["env"].get("angularVelocityScale", 0.1) self.contact_force_scale = self.cfg["env"]["contactForceScale"] self.power_scale = self.cfg["env"]["powerScale"] self.heading_weight = self.cfg["env"]["headingWeight"] self.up_weight = self.cfg["env"]["upWeight"] self.actions_cost_scale = self.cfg["env"]["actionsCost"] self.energy_cost_scale = self.cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"] self.death_cost = self.cfg["env"]["deathCost"] self.termination_height = self.cfg["env"]["terminationHeight"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.cfg["env"]["numObservations"] = 108 self.cfg["env"]["numActions"] = 21 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(50.0, 25.0, 2.4) cam_target = gymapi.Vec3(45.0, 25.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) sensors_per_env = 2 self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dof) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.root_states = gymtorch.wrap_tensor(actor_root_state) self.initial_root_states = self.root_states.clone() self.initial_root_states[:, 7:13] = 0 # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float) zero_tensor = torch.tensor([0.0], device=self.device) self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower, torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos)) self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float) # initialize some data used later on self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1)) self.basis_vec0 = self.heading_vec.clone() self.basis_vec1 = self.up_vec.clone() self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.dt = self.cfg["sim"]["dt"] self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs) self.prev_potentials = self.potentials.clone() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction plane_params.restitution = self.plane_restitution self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "mjcf/nv_humanoid.xml" if "asset" in self.cfg["env"]: asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.angular_damping = 0.01 asset_options.max_angular_velocity = 100.0 # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE humanoid_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) # Note - for this asset we are loading the actuator info from the MJCF actuator_props = self.gym.get_asset_actuator_properties(humanoid_asset) motor_efforts = [prop.motor_effort for prop in actuator_props] # create force sensors at the feet right_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "right_foot") left_foot_idx = self.gym.find_asset_rigid_body_index(humanoid_asset, "left_foot") sensor_pose = gymapi.Transform() self.gym.create_asset_force_sensor(humanoid_asset, right_foot_idx, sensor_pose) self.gym.create_asset_force_sensor(humanoid_asset, left_foot_idx, sensor_pose) self.max_motor_effort = max(motor_efforts) self.motor_efforts = to_torch(motor_efforts, device=self.device) self.torso_index = 0 self.num_bodies = self.gym.get_asset_rigid_body_count(humanoid_asset) self.num_dof = self.gym.get_asset_dof_count(humanoid_asset) self.num_joints = self.gym.get_asset_joint_count(humanoid_asset) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*get_axis_params(1.34, self.up_axis_idx)) start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device) self.humanoid_handles = [] self.envs = [] self.dof_limits_lower = [] self.dof_limits_upper = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) handle = self.gym.create_actor(env_ptr, humanoid_asset, start_pose, "humanoid", i, 0, 0) self.gym.enable_actor_dof_force_sensors(env_ptr, handle) for j in range(self.num_bodies): self.gym.set_rigid_body_color( env_ptr, handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) self.envs.append(env_ptr) self.humanoid_handles.append(handle) dof_prop = self.gym.get_actor_dof_properties(env_ptr, handle) for j in range(self.num_dof): if dof_prop['lower'][j] > dof_prop['upper'][j]: self.dof_limits_lower.append(dof_prop['upper'][j]) self.dof_limits_upper.append(dof_prop['lower'][j]) else: self.dof_limits_lower.append(dof_prop['lower'][j]) self.dof_limits_upper.append(dof_prop['upper'][j]) self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device) self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device) self.extremities = to_torch([5, 8], device=self.device, dtype=torch.long) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf = compute_humanoid_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.joints_at_limit_cost_scale, self.max_motor_effort, self.motor_efforts, self.termination_height, self.death_cost, self.max_episode_length ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_humanoid_observations( self.obs_buf, self.root_states, self.targets, self.potentials, self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_force_tensor, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.angular_velocity_scale, self.basis_vec0, self.basis_vec1) def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper) self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3] to_target[:, self.up_axis_idx] = 0 self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt self.potentials[env_ids] = self.prev_potentials[env_ids].clone() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.to(self.device).clone() forces = self.actions * self.motor_efforts.unsqueeze(0) * self.power_scale force_tensor = gymtorch.unwrap_tensor(forces) self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) points = [] colors = [] for i in range(self.num_envs): origin = self.gym.get_env_origin(self.envs[i]) pose = self.root_states[:, 0:3][i].cpu().numpy() glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()]) colors.append([0.97, 0.1, 0.06]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()]) colors.append([0.05, 0.99, 0.04]) self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_humanoid_reward( obs_buf, reset_buf, progress_buf, actions, up_weight, heading_weight, potentials, prev_potentials, actions_cost_scale, energy_cost_scale, joints_at_limit_cost_scale, max_motor_effort, motor_efforts, termination_height, death_cost, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, Tensor, float, float, float) -> Tuple[Tensor, Tensor] # reward from the direction headed heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8) # reward for being upright up_reward = torch.zeros_like(heading_reward) up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward) actions_cost = torch.sum(actions ** 2, dim=-1) # energy cost reward motor_effort_ratio = motor_efforts / max_motor_effort scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02 dof_at_limit_cost = torch.sum((torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1) electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 33:54]) * motor_effort_ratio.unsqueeze(0), dim=-1) # reward for duration of being alive alive_reward = torch.ones_like(potentials) * 2.0 progress_reward = potentials - prev_potentials total_reward = progress_reward + alive_reward + up_reward + heading_reward - \ actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost # adjust reward for fallen agents total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward) # reset agents reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return total_reward, reset @torch.jit.script def compute_humanoid_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel, dof_force, dof_limits_lower, dof_limits_upper, dof_vel_scale, sensor_force_torques, actions, dt, contact_force_scale, angular_velocity_scale, basis_vec0, basis_vec1): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, float, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] torso_position = root_states[:, 0:3] torso_rotation = root_states[:, 3:7] velocity = root_states[:, 7:10] ang_velocity = root_states[:, 10:13] to_target = targets - torso_position to_target[:, 2] = 0 prev_potentials_new = potentials.clone() potentials = -torch.norm(to_target, p=2, dim=-1) / dt torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up( torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2) vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot( torso_quat, velocity, ang_velocity, targets, torso_position) roll = normalize_angle(roll).unsqueeze(-1) yaw = normalize_angle(yaw).unsqueeze(-1) angle_to_target = normalize_angle(angle_to_target).unsqueeze(-1) dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper) # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs (21), num_dofs (21), 6, num_acts (21) obs = torch.cat((torso_position[:, 2].view(-1, 1), vel_loc, angvel_loc * angular_velocity_scale, yaw, roll, angle_to_target, up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled, dof_vel * dof_vel_scale, dof_force * contact_force_scale, sensor_force_torques.view(-1, 12) * contact_force_scale, actions), dim=-1) return obs, potentials, prev_potentials_new, up_vec, heading_vec
20,168
Python
47.717391
217
0.631743
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ant.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgym.gymtorch import * from isaacgymenvs.utils.torch_jit_utils import * from isaacgymenvs.tasks.base.vec_task import VecTask class Ant(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self.contact_force_scale = self.cfg["env"]["contactForceScale"] self.power_scale = self.cfg["env"]["powerScale"] self.heading_weight = self.cfg["env"]["headingWeight"] self.up_weight = self.cfg["env"]["upWeight"] self.actions_cost_scale = self.cfg["env"]["actionsCost"] self.energy_cost_scale = self.cfg["env"]["energyCost"] self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"] self.death_cost = self.cfg["env"]["deathCost"] self.termination_height = self.cfg["env"]["terminationHeight"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] self.cfg["env"]["numObservations"] = 60 self.cfg["env"]["numActions"] = 8 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) if self.viewer != None: cam_pos = gymapi.Vec3(50.0, 25.0, 2.4) cam_target = gymapi.Vec3(45.0, 25.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) sensors_per_env = 4 self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, sensors_per_env * 6) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.root_states = gymtorch.wrap_tensor(actor_root_state) self.initial_root_states = self.root_states.clone() self.initial_root_states[:, 7:13] = 0 # set lin_vel and ang_vel to 0 # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.initial_dof_pos = torch.zeros_like(self.dof_pos, device=self.device, dtype=torch.float) zero_tensor = torch.tensor([0.0], device=self.device) self.initial_dof_pos = torch.where(self.dof_limits_lower > zero_tensor, self.dof_limits_lower, torch.where(self.dof_limits_upper < zero_tensor, self.dof_limits_upper, self.initial_dof_pos)) self.initial_dof_vel = torch.zeros_like(self.dof_vel, device=self.device, dtype=torch.float) # initialize some data used later on self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1)) self.basis_vec0 = self.heading_vec.clone() self.basis_vec1 = self.up_vec.clone() self.targets = to_torch([1000, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.target_dirs = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1)) self.dt = self.cfg["sim"]["dt"] self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs) self.prev_potentials = self.potentials.clone() def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() print(f'num envs {self.num_envs} env spacing {self.cfg["env"]["envSpacing"]}') self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "mjcf/nv_ant.xml" if "asset" in self.cfg["env"]: asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() # Note - DOF mode is set in the MJCF file and loaded by Isaac Gym asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.angular_damping = 0.0 ant_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(ant_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset) # Note - for this asset we are loading the actuator info from the MJCF actuator_props = self.gym.get_asset_actuator_properties(ant_asset) motor_efforts = [prop.motor_effort for prop in actuator_props] self.joint_gears = to_torch(motor_efforts, device=self.device) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*get_axis_params(0.44, self.up_axis_idx)) self.start_rotation = torch.tensor([start_pose.r.x, start_pose.r.y, start_pose.r.z, start_pose.r.w], device=self.device) self.torso_index = 0 self.num_bodies = self.gym.get_asset_rigid_body_count(ant_asset) body_names = [self.gym.get_asset_rigid_body_name(ant_asset, i) for i in range(self.num_bodies)] extremity_names = [s for s in body_names if "foot" in s] self.extremities_index = torch.zeros(len(extremity_names), dtype=torch.long, device=self.device) # create force sensors attached to the "feet" extremity_indices = [self.gym.find_asset_rigid_body_index(ant_asset, name) for name in extremity_names] sensor_pose = gymapi.Transform() for body_idx in extremity_indices: self.gym.create_asset_force_sensor(ant_asset, body_idx, sensor_pose) self.ant_handles = [] self.envs = [] self.dof_limits_lower = [] self.dof_limits_upper = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) ant_handle = self.gym.create_actor(env_ptr, ant_asset, start_pose, "ant", i, 1, 0) for j in range(self.num_bodies): self.gym.set_rigid_body_color( env_ptr, ant_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) self.envs.append(env_ptr) self.ant_handles.append(ant_handle) dof_prop = self.gym.get_actor_dof_properties(env_ptr, ant_handle) for j in range(self.num_dof): if dof_prop['lower'][j] > dof_prop['upper'][j]: self.dof_limits_lower.append(dof_prop['upper'][j]) self.dof_limits_upper.append(dof_prop['lower'][j]) else: self.dof_limits_lower.append(dof_prop['lower'][j]) self.dof_limits_upper.append(dof_prop['upper'][j]) self.dof_limits_lower = to_torch(self.dof_limits_lower, device=self.device) self.dof_limits_upper = to_torch(self.dof_limits_upper, device=self.device) for i in range(len(extremity_names)): self.extremities_index[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.ant_handles[0], extremity_names[i]) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_ant_reward( self.obs_buf, self.reset_buf, self.progress_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials, self.actions_cost_scale, self.energy_cost_scale, self.joints_at_limit_cost_scale, self.termination_height, self.death_cost, self.max_episode_length ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.obs_buf[:], self.potentials[:], self.prev_potentials[:], self.up_vec[:], self.heading_vec[:] = compute_ant_observations( self.obs_buf, self.root_states, self.targets, self.potentials, self.inv_start_rot, self.dof_pos, self.dof_vel, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale, self.vec_sensor_tensor, self.actions, self.dt, self.contact_force_scale, self.basis_vec0, self.basis_vec1, self.up_axis_idx) # Required for PBT training def compute_true_objective(self): velocity = self.root_states[:, 7:10] # We optimize for the maximum velocity along the x-axis (forward) self.extras['true_objective'] = velocity[:, 0].squeeze() def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions = torch_rand_float(-0.2, 0.2, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = tensor_clamp(self.initial_dof_pos[env_ids] + positions, self.dof_limits_lower, self.dof_limits_upper) self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) to_target = self.targets[env_ids] - self.initial_root_states[env_ids, 0:3] to_target[:, 2] = 0.0 self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt self.potentials[env_ids] = self.prev_potentials[env_ids].clone() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) forces = self.actions * self.joint_gears * self.power_scale force_tensor = gymtorch.unwrap_tensor(forces) self.gym.set_dof_actuation_force_tensor(self.sim, force_tensor) def post_physics_step(self): self.progress_buf += 1 self.randomize_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) self.compute_true_objective() # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_actor_root_state_tensor(self.sim) points = [] colors = [] for i in range(self.num_envs): origin = self.gym.get_env_origin(self.envs[i]) pose = self.root_states[:, 0:3][i].cpu().numpy() glob_pos = gymapi.Vec3(origin.x + pose[0], origin.y + pose[1], origin.z + pose[2]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.heading_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.heading_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.heading_vec[i, 2].cpu().numpy()]) colors.append([0.97, 0.1, 0.06]) points.append([glob_pos.x, glob_pos.y, glob_pos.z, glob_pos.x + 4 * self.up_vec[i, 0].cpu().numpy(), glob_pos.y + 4 * self.up_vec[i, 1].cpu().numpy(), glob_pos.z + 4 * self.up_vec[i, 2].cpu().numpy()]) colors.append([0.05, 0.99, 0.04]) self.gym.add_lines(self.viewer, None, self.num_envs * 2, points, colors) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_ant_reward( obs_buf, reset_buf, progress_buf, actions, up_weight, heading_weight, potentials, prev_potentials, actions_cost_scale, energy_cost_scale, joints_at_limit_cost_scale, termination_height, death_cost, max_episode_length ): # type: (Tensor, Tensor, Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, float, float) -> Tuple[Tensor, Tensor] # reward from direction headed heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8) # aligning up axis of ant and environment up_reward = torch.zeros_like(heading_reward) up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward) # energy penalty for movement actions_cost = torch.sum(actions ** 2, dim=-1) electricity_cost = torch.sum(torch.abs(actions * obs_buf[:, 20:28]), dim=-1) dof_at_limit_cost = torch.sum(obs_buf[:, 12:20] > 0.99, dim=-1) # reward for duration of staying alive alive_reward = torch.ones_like(potentials) * 0.5 progress_reward = potentials - prev_potentials total_reward = progress_reward + alive_reward + up_reward + heading_reward - \ actions_cost_scale * actions_cost - energy_cost_scale * electricity_cost - dof_at_limit_cost * joints_at_limit_cost_scale # adjust reward for fallen agents total_reward = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward) # reset agents reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return total_reward, reset @torch.jit.script def compute_ant_observations(obs_buf, root_states, targets, potentials, inv_start_rot, dof_pos, dof_vel, dof_limits_lower, dof_limits_upper, dof_vel_scale, sensor_force_torques, actions, dt, contact_force_scale, basis_vec0, basis_vec1, up_axis_idx): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float, float, Tensor, Tensor, int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] torso_position = root_states[:, 0:3] torso_rotation = root_states[:, 3:7] velocity = root_states[:, 7:10] ang_velocity = root_states[:, 10:13] to_target = targets - torso_position to_target[:, 2] = 0.0 prev_potentials_new = potentials.clone() potentials = -torch.norm(to_target, p=2, dim=-1) / dt torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up( torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2) vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot( torso_quat, velocity, ang_velocity, targets, torso_position) dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper) # obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs(8), num_dofs(8), 24, num_dofs(8) obs = torch.cat((torso_position[:, up_axis_idx].view(-1, 1), vel_loc, angvel_loc, yaw.unsqueeze(-1), roll.unsqueeze(-1), angle_to_target.unsqueeze(-1), up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1), dof_pos_scaled, dof_vel * dof_vel_scale, sensor_force_torques.view(-1, 24) * contact_force_scale, actions), dim=-1) return obs, potentials, prev_potentials_new, up_vec, heading_vec
19,545
Python
46.906863
217
0.626349
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/cartpole.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymutil, gymtorch, gymapi from .base.vec_task import VecTask class Cartpole(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.reset_dist = self.cfg["env"]["resetDist"] self.max_push_effort = self.cfg["env"]["maxEffort"] self.max_episode_length = 500 self.cfg["env"]["numObservations"] = 4 self.cfg["env"]["numActions"] = 1 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] def create_sim(self): # set the up axis to be z-up given that assets are y-up by default self.up_axis = self.cfg["sim"]["up_axis"] self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() # set the normal force to be z dimension plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): # define plane on which environments are initialized lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing) upper = gymapi.Vec3(0.5 * spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") asset_file = "urdf/cartpole.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file) asset_path = os.path.join(asset_root, asset_file) asset_root = os.path.dirname(asset_path) asset_file = os.path.basename(asset_path) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True cartpole_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(cartpole_asset) pose = gymapi.Transform() if self.up_axis == 'z': pose.p.z = 2.0 # asset is rotated z-up by default, no additional rotations needed pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) else: pose.p.y = 2.0 pose.r = gymapi.Quat(-np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) self.cartpole_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) cartpole_handle = self.gym.create_actor(env_ptr, cartpole_asset, pose, "cartpole", i, 1, 0) dof_props = self.gym.get_actor_dof_properties(env_ptr, cartpole_handle) dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE dof_props['stiffness'][:] = 0.0 dof_props['damping'][:] = 0.0 self.gym.set_actor_dof_properties(env_ptr, cartpole_handle, dof_props) self.envs.append(env_ptr) self.cartpole_handles.append(cartpole_handle) def compute_reward(self): # retrieve environment observations from buffer pole_angle = self.obs_buf[:, 2] pole_vel = self.obs_buf[:, 3] cart_vel = self.obs_buf[:, 1] cart_pos = self.obs_buf[:, 0] self.rew_buf[:], self.reset_buf[:] = compute_cartpole_reward( pole_angle, pole_vel, cart_vel, cart_pos, self.reset_dist, self.reset_buf, self.progress_buf, self.max_episode_length ) def compute_observations(self, env_ids=None): if env_ids is None: env_ids = np.arange(self.num_envs) self.gym.refresh_dof_state_tensor(self.sim) self.obs_buf[env_ids, 0] = self.dof_pos[env_ids, 0].squeeze() self.obs_buf[env_ids, 1] = self.dof_vel[env_ids, 0].squeeze() self.obs_buf[env_ids, 2] = self.dof_pos[env_ids, 1].squeeze() self.obs_buf[env_ids, 3] = self.dof_vel[env_ids, 1].squeeze() return self.obs_buf def reset_idx(self, env_ids): positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5) self.dof_pos[env_ids, :] = positions[:] self.dof_vel[env_ids, :] = velocities[:] env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, actions): actions_tensor = torch.zeros(self.num_envs * self.num_dof, device=self.device, dtype=torch.float) actions_tensor[::self.num_dof] = actions.to(self.device).squeeze() * self.max_push_effort forces = gymtorch.unwrap_tensor(actions_tensor) self.gym.set_dof_actuation_force_tensor(self.sim, forces) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_cartpole_reward(pole_angle, pole_vel, cart_vel, cart_pos, reset_dist, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # reward is combo of angle deviated from upright, velocity of cart, and velocity of pole moving reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel) # adjust reward for reset agents reward = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reward) * -2.0, reward) reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward) reset = torch.where(torch.abs(cart_pos) > reset_dist, torch.ones_like(reset_buf), reset_buf) reset = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reset_buf), reset) reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset) return reward, reset
9,134
Python
45.370558
217
0.629297
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/franka_cube_stack.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import quat_mul, to_torch, tensor_clamp from isaacgymenvs.tasks.base.vec_task import VecTask @torch.jit.script def axisangle2quat(vec, eps=1e-6): """ Converts scaled axis-angle to quat. Args: vec (tensor): (..., 3) tensor where final dim is (ax,ay,az) axis-angle exponential coordinates eps (float): Stability value below which small values will be mapped to 0 Returns: tensor: (..., 4) tensor where final dim is (x,y,z,w) vec4 float quaternion """ # type: (Tensor, float) -> Tensor # store input shape and reshape input_shape = vec.shape[:-1] vec = vec.reshape(-1, 3) # Grab angle angle = torch.norm(vec, dim=-1, keepdim=True) # Create return array quat = torch.zeros(torch.prod(torch.tensor(input_shape)), 4, device=vec.device) quat[:, 3] = 1.0 # Grab indexes where angle is not zero an convert the input to its quaternion form idx = angle.reshape(-1) > eps quat[idx, :] = torch.cat([ vec[idx, :] * torch.sin(angle[idx, :] / 2.0) / angle[idx, :], torch.cos(angle[idx, :] / 2.0) ], dim=-1) # Reshape and return output quat = quat.reshape(list(input_shape) + [4, ]) return quat class FrankaCubeStack(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["episodeLength"] self.action_scale = self.cfg["env"]["actionScale"] self.start_position_noise = self.cfg["env"]["startPositionNoise"] self.start_rotation_noise = self.cfg["env"]["startRotationNoise"] self.franka_position_noise = self.cfg["env"]["frankaPositionNoise"] self.franka_rotation_noise = self.cfg["env"]["frankaRotationNoise"] self.franka_dof_noise = self.cfg["env"]["frankaDofNoise"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] # Create dicts to pass to reward function self.reward_settings = { "r_dist_scale": self.cfg["env"]["distRewardScale"], "r_lift_scale": self.cfg["env"]["liftRewardScale"], "r_align_scale": self.cfg["env"]["alignRewardScale"], "r_stack_scale": self.cfg["env"]["stackRewardScale"], } # Controller type self.control_type = self.cfg["env"]["controlType"] assert self.control_type in {"osc", "joint_tor"},\ "Invalid control type specified. Must be one of: {osc, joint_tor}" # dimensions # obs include: cubeA_pose (7) + cubeB_pos (3) + eef_pose (7) + q_gripper (2) self.cfg["env"]["numObservations"] = 19 if self.control_type == "osc" else 26 # actions include: delta EEF if OSC (6) or joint torques (7) + bool gripper (1) self.cfg["env"]["numActions"] = 7 if self.control_type == "osc" else 8 # Values to be filled in at runtime self.states = {} # will be dict filled with relevant states to use for reward calculation self.handles = {} # will be dict mapping names to relevant sim handles self.num_dofs = None # Total number of DOFs per env self.actions = None # Current actions to be deployed self._init_cubeA_state = None # Initial state of cubeA for the current env self._init_cubeB_state = None # Initial state of cubeB for the current env self._cubeA_state = None # Current state of cubeA for the current env self._cubeB_state = None # Current state of cubeB for the current env self._cubeA_id = None # Actor ID corresponding to cubeA for a given env self._cubeB_id = None # Actor ID corresponding to cubeB for a given env # Tensor placeholders self._root_state = None # State of root body (n_envs, 13) self._dof_state = None # State of all joints (n_envs, n_dof) self._q = None # Joint positions (n_envs, n_dof) self._qd = None # Joint velocities (n_envs, n_dof) self._rigid_body_state = None # State of all rigid bodies (n_envs, n_bodies, 13) self._contact_forces = None # Contact forces in sim self._eef_state = None # end effector state (at grasping point) self._eef_lf_state = None # end effector state (at left fingertip) self._eef_rf_state = None # end effector state (at left fingertip) self._j_eef = None # Jacobian for end effector self._mm = None # Mass matrix self._arm_control = None # Tensor buffer for controlling arm self._gripper_control = None # Tensor buffer for controlling gripper self._pos_control = None # Position actions self._effort_control = None # Torque actions self._franka_effort_limits = None # Actuator effort limits for franka self._global_indices = None # Unique indices corresponding to all envs in flattened array self.debug_viz = self.cfg["env"]["enableDebugVis"] self.up_axis = "z" self.up_axis_idx = 2 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # Franka defaults self.franka_default_dof_pos = to_torch( [0, 0.1963, 0, -2.6180, 0, 2.9416, 0.7854, 0.035, 0.035], device=self.device ) # OSC Gains self.kp = to_torch([150.] * 6, device=self.device) self.kd = 2 * torch.sqrt(self.kp) self.kp_null = to_torch([10.] * 7, device=self.device) self.kd_null = 2 * torch.sqrt(self.kp_null) #self.cmd_limit = None # filled in later # Set control limits self.cmd_limit = to_torch([0.1, 0.1, 0.1, 0.5, 0.5, 0.5], device=self.device).unsqueeze(0) if \ self.control_type == "osc" else self._franka_effort_limits[:7].unsqueeze(0) # Reset all environments self.reset_idx(torch.arange(self.num_envs, device=self.device)) # Refresh tensors self._refresh() def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim( self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets") franka_asset_file = "urdf/franka_description/robots/franka_panda_gripper.urdf" if "asset" in self.cfg["env"]: asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root)) franka_asset_file = self.cfg["env"]["asset"].get("assetFileNameFranka", franka_asset_file) # load franka asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = True asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT asset_options.use_mesh_materials = True franka_asset = self.gym.load_asset(self.sim, asset_root, franka_asset_file, asset_options) franka_dof_stiffness = to_torch([0, 0, 0, 0, 0, 0, 0, 5000., 5000.], dtype=torch.float, device=self.device) franka_dof_damping = to_torch([0, 0, 0, 0, 0, 0, 0, 1.0e2, 1.0e2], dtype=torch.float, device=self.device) # Create table asset table_pos = [0.0, 0.0, 1.0] table_thickness = 0.05 table_opts = gymapi.AssetOptions() table_opts.fix_base_link = True table_asset = self.gym.create_box(self.sim, *[1.2, 1.2, table_thickness], table_opts) # Create table stand asset table_stand_height = 0.1 table_stand_pos = [-0.5, 0.0, 1.0 + table_thickness / 2 + table_stand_height / 2] table_stand_opts = gymapi.AssetOptions() table_stand_opts.fix_base_link = True table_stand_asset = self.gym.create_box(self.sim, *[0.2, 0.2, table_stand_height], table_opts) self.cubeA_size = 0.050 self.cubeB_size = 0.070 # Create cubeA asset cubeA_opts = gymapi.AssetOptions() cubeA_asset = self.gym.create_box(self.sim, *([self.cubeA_size] * 3), cubeA_opts) cubeA_color = gymapi.Vec3(0.6, 0.1, 0.0) # Create cubeB asset cubeB_opts = gymapi.AssetOptions() cubeB_asset = self.gym.create_box(self.sim, *([self.cubeB_size] * 3), cubeB_opts) cubeB_color = gymapi.Vec3(0.0, 0.4, 0.1) self.num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) self.num_franka_dofs = self.gym.get_asset_dof_count(franka_asset) print("num franka bodies: ", self.num_franka_bodies) print("num franka dofs: ", self.num_franka_dofs) # set franka dof properties franka_dof_props = self.gym.get_asset_dof_properties(franka_asset) self.franka_dof_lower_limits = [] self.franka_dof_upper_limits = [] self._franka_effort_limits = [] for i in range(self.num_franka_dofs): franka_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS if i > 6 else gymapi.DOF_MODE_EFFORT if self.physics_engine == gymapi.SIM_PHYSX: franka_dof_props['stiffness'][i] = franka_dof_stiffness[i] franka_dof_props['damping'][i] = franka_dof_damping[i] else: franka_dof_props['stiffness'][i] = 7000.0 franka_dof_props['damping'][i] = 50.0 self.franka_dof_lower_limits.append(franka_dof_props['lower'][i]) self.franka_dof_upper_limits.append(franka_dof_props['upper'][i]) self._franka_effort_limits.append(franka_dof_props['effort'][i]) self.franka_dof_lower_limits = to_torch(self.franka_dof_lower_limits, device=self.device) self.franka_dof_upper_limits = to_torch(self.franka_dof_upper_limits, device=self.device) self._franka_effort_limits = to_torch(self._franka_effort_limits, device=self.device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[[7, 8]] = 0.1 franka_dof_props['effort'][7] = 200 franka_dof_props['effort'][8] = 200 # Define start pose for franka franka_start_pose = gymapi.Transform() franka_start_pose.p = gymapi.Vec3(-0.45, 0.0, 1.0 + table_thickness / 2 + table_stand_height) franka_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # Define start pose for table table_start_pose = gymapi.Transform() table_start_pose.p = gymapi.Vec3(*table_pos) table_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self._table_surface_pos = np.array(table_pos) + np.array([0, 0, table_thickness / 2]) self.reward_settings["table_height"] = self._table_surface_pos[2] # Define start pose for table stand table_stand_start_pose = gymapi.Transform() table_stand_start_pose.p = gymapi.Vec3(*table_stand_pos) table_stand_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # Define start pose for cubes (doesn't really matter since they're get overridden during reset() anyways) cubeA_start_pose = gymapi.Transform() cubeA_start_pose.p = gymapi.Vec3(-1.0, 0.0, 0.0) cubeA_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) cubeB_start_pose = gymapi.Transform() cubeB_start_pose.p = gymapi.Vec3(1.0, 0.0, 0.0) cubeB_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) # compute aggregate size num_franka_bodies = self.gym.get_asset_rigid_body_count(franka_asset) num_franka_shapes = self.gym.get_asset_rigid_shape_count(franka_asset) max_agg_bodies = num_franka_bodies + 4 # 1 for table, table stand, cubeA, cubeB max_agg_shapes = num_franka_shapes + 4 # 1 for table, table stand, cubeA, cubeB self.frankas = [] self.envs = [] # Create environments for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) # Create actors and define aggregate group appropriately depending on setting # NOTE: franka should ALWAYS be loaded first in sim! if self.aggregate_mode >= 3: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create franka # Potentially randomize start pose if self.franka_position_noise > 0: rand_xy = self.franka_position_noise * (-1. + np.random.rand(2) * 2.0) franka_start_pose.p = gymapi.Vec3(-0.45 + rand_xy[0], 0.0 + rand_xy[1], 1.0 + table_thickness / 2 + table_stand_height) if self.franka_rotation_noise > 0: rand_rot = torch.zeros(1, 3) rand_rot[:, -1] = self.franka_rotation_noise * (-1. + np.random.rand() * 2.0) new_quat = axisangle2quat(rand_rot).squeeze().numpy().tolist() franka_start_pose.r = gymapi.Quat(*new_quat) franka_actor = self.gym.create_actor(env_ptr, franka_asset, franka_start_pose, "franka", i, 0, 0) self.gym.set_actor_dof_properties(env_ptr, franka_actor, franka_dof_props) if self.aggregate_mode == 2: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create table table_actor = self.gym.create_actor(env_ptr, table_asset, table_start_pose, "table", i, 1, 0) table_stand_actor = self.gym.create_actor(env_ptr, table_stand_asset, table_stand_start_pose, "table_stand", i, 1, 0) if self.aggregate_mode == 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # Create cubes self._cubeA_id = self.gym.create_actor(env_ptr, cubeA_asset, cubeA_start_pose, "cubeA", i, 2, 0) self._cubeB_id = self.gym.create_actor(env_ptr, cubeB_asset, cubeB_start_pose, "cubeB", i, 4, 0) # Set colors self.gym.set_rigid_body_color(env_ptr, self._cubeA_id, 0, gymapi.MESH_VISUAL, cubeA_color) self.gym.set_rigid_body_color(env_ptr, self._cubeB_id, 0, gymapi.MESH_VISUAL, cubeB_color) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) # Store the created env pointers self.envs.append(env_ptr) self.frankas.append(franka_actor) # Setup init state buffer self._init_cubeA_state = torch.zeros(self.num_envs, 13, device=self.device) self._init_cubeB_state = torch.zeros(self.num_envs, 13, device=self.device) # Setup data self.init_data() def init_data(self): # Setup sim handles env_ptr = self.envs[0] franka_handle = 0 self.handles = { # Franka "hand": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_hand"), "leftfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_leftfinger_tip"), "rightfinger_tip": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_rightfinger_tip"), "grip_site": self.gym.find_actor_rigid_body_handle(env_ptr, franka_handle, "panda_grip_site"), # Cubes "cubeA_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeA_id, "box"), "cubeB_body_handle": self.gym.find_actor_rigid_body_handle(self.envs[0], self._cubeB_id, "box"), } # Get total DOFs self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs # Setup tensor buffers _actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) _dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) _rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self._root_state = gymtorch.wrap_tensor(_actor_root_state_tensor).view(self.num_envs, -1, 13) self._dof_state = gymtorch.wrap_tensor(_dof_state_tensor).view(self.num_envs, -1, 2) self._rigid_body_state = gymtorch.wrap_tensor(_rigid_body_state_tensor).view(self.num_envs, -1, 13) self._q = self._dof_state[..., 0] self._qd = self._dof_state[..., 1] self._eef_state = self._rigid_body_state[:, self.handles["grip_site"], :] self._eef_lf_state = self._rigid_body_state[:, self.handles["leftfinger_tip"], :] self._eef_rf_state = self._rigid_body_state[:, self.handles["rightfinger_tip"], :] _jacobian = self.gym.acquire_jacobian_tensor(self.sim, "franka") jacobian = gymtorch.wrap_tensor(_jacobian) hand_joint_index = self.gym.get_actor_joint_dict(env_ptr, franka_handle)['panda_hand_joint'] self._j_eef = jacobian[:, hand_joint_index, :, :7] _massmatrix = self.gym.acquire_mass_matrix_tensor(self.sim, "franka") mm = gymtorch.wrap_tensor(_massmatrix) self._mm = mm[:, :7, :7] self._cubeA_state = self._root_state[:, self._cubeA_id, :] self._cubeB_state = self._root_state[:, self._cubeB_id, :] # Initialize states self.states.update({ "cubeA_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeA_size, "cubeB_size": torch.ones_like(self._eef_state[:, 0]) * self.cubeB_size, }) # Initialize actions self._pos_control = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self._effort_control = torch.zeros_like(self._pos_control) # Initialize control self._arm_control = self._effort_control[:, :7] self._gripper_control = self._pos_control[:, 7:9] # Initialize indices self._global_indices = torch.arange(self.num_envs * 5, dtype=torch.int32, device=self.device).view(self.num_envs, -1) def _update_states(self): self.states.update({ # Franka "q": self._q[:, :], "q_gripper": self._q[:, -2:], "eef_pos": self._eef_state[:, :3], "eef_quat": self._eef_state[:, 3:7], "eef_vel": self._eef_state[:, 7:], "eef_lf_pos": self._eef_lf_state[:, :3], "eef_rf_pos": self._eef_rf_state[:, :3], # Cubes "cubeA_quat": self._cubeA_state[:, 3:7], "cubeA_pos": self._cubeA_state[:, :3], "cubeA_pos_relative": self._cubeA_state[:, :3] - self._eef_state[:, :3], "cubeB_quat": self._cubeB_state[:, 3:7], "cubeB_pos": self._cubeB_state[:, :3], "cubeA_to_cubeB_pos": self._cubeB_state[:, :3] - self._cubeA_state[:, :3], }) def _refresh(self): self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_jacobian_tensors(self.sim) self.gym.refresh_mass_matrix_tensors(self.sim) # Refresh states self._update_states() def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_franka_reward( self.reset_buf, self.progress_buf, self.actions, self.states, self.reward_settings, self.max_episode_length ) def compute_observations(self): self._refresh() obs = ["cubeA_quat", "cubeA_pos", "cubeA_to_cubeB_pos", "eef_pos", "eef_quat"] obs += ["q_gripper"] if self.control_type == "osc" else ["q"] self.obs_buf = torch.cat([self.states[ob] for ob in obs], dim=-1) maxs = {ob: torch.max(self.states[ob]).item() for ob in obs} return self.obs_buf def reset_idx(self, env_ids): env_ids_int32 = env_ids.to(dtype=torch.int32) # Reset cubes, sampling cube B first, then A # if not self._i: self._reset_init_cube_state(cube='B', env_ids=env_ids, check_valid=False) self._reset_init_cube_state(cube='A', env_ids=env_ids, check_valid=True) # self._i = True # Write these new init states to the sim states self._cubeA_state[env_ids] = self._init_cubeA_state[env_ids] self._cubeB_state[env_ids] = self._init_cubeB_state[env_ids] # Reset agent reset_noise = torch.rand((len(env_ids), 9), device=self.device) pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + self.franka_dof_noise * 2.0 * (reset_noise - 0.5), self.franka_dof_lower_limits.unsqueeze(0), self.franka_dof_upper_limits) # Overwrite gripper init pos (no noise since these are always position controlled) pos[:, -2:] = self.franka_default_dof_pos[-2:] # Reset the internal obs accordingly self._q[env_ids, :] = pos self._qd[env_ids, :] = torch.zeros_like(self._qd[env_ids]) # Set any position control to the current position, and any vel / effort control to be 0 # NOTE: Task takes care of actually propagating these controls in sim using the SimActions API self._pos_control[env_ids, :] = pos self._effort_control[env_ids, :] = torch.zeros_like(pos) # Deploy updates multi_env_ids_int32 = self._global_indices[env_ids, 0].flatten() self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._pos_control), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_actuation_force_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._effort_control), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self._dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) # Update cube states multi_env_ids_cubes_int32 = self._global_indices[env_ids, -2:].flatten() self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self._root_state), gymtorch.unwrap_tensor(multi_env_ids_cubes_int32), len(multi_env_ids_cubes_int32)) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 def _reset_init_cube_state(self, cube, env_ids, check_valid=True): """ Simple method to sample @cube's position based on self.startPositionNoise and self.startRotationNoise, and automaticlly reset the pose internally. Populates the appropriate self._init_cubeX_state If @check_valid is True, then this will also make sure that the sampled position is not in contact with the other cube. Args: cube(str): Which cube to sample location for. Either 'A' or 'B' env_ids (tensor or None): Specific environments to reset cube for check_valid (bool): Whether to make sure sampled position is collision-free with the other cube. """ # If env_ids is None, we reset all the envs if env_ids is None: env_ids = torch.arange(start=0, end=self.num_envs, device=self.device, dtype=torch.long) # Initialize buffer to hold sampled values num_resets = len(env_ids) sampled_cube_state = torch.zeros(num_resets, 13, device=self.device) # Get correct references depending on which one was selected if cube.lower() == 'a': this_cube_state_all = self._init_cubeA_state other_cube_state = self._init_cubeB_state[env_ids, :] cube_heights = self.states["cubeA_size"] elif cube.lower() == 'b': this_cube_state_all = self._init_cubeB_state other_cube_state = self._init_cubeA_state[env_ids, :] cube_heights = self.states["cubeA_size"] else: raise ValueError(f"Invalid cube specified, options are 'A' and 'B'; got: {cube}") # Minimum cube distance for guarenteed collision-free sampling is the sum of each cube's effective radius min_dists = (self.states["cubeA_size"] + self.states["cubeB_size"])[env_ids] * np.sqrt(2) / 2.0 # We scale the min dist by 2 so that the cubes aren't too close together min_dists = min_dists * 2.0 # Sampling is "centered" around middle of table centered_cube_xy_state = torch.tensor(self._table_surface_pos[:2], device=self.device, dtype=torch.float32) # Set z value, which is fixed height sampled_cube_state[:, 2] = self._table_surface_pos[2] + cube_heights.squeeze(-1)[env_ids] / 2 # Initialize rotation, which is no rotation (quat w = 1) sampled_cube_state[:, 6] = 1.0 # If we're verifying valid sampling, we need to check and re-sample if any are not collision-free # We use a simple heuristic of checking based on cubes' radius to determine if a collision would occur if check_valid: success = False # Indexes corresponding to envs we're still actively sampling for active_idx = torch.arange(num_resets, device=self.device) num_active_idx = len(active_idx) for i in range(100): # Sample x y values sampled_cube_state[active_idx, :2] = centered_cube_xy_state + \ 2.0 * self.start_position_noise * ( torch.rand_like(sampled_cube_state[active_idx, :2]) - 0.5) # Check if sampled values are valid cube_dist = torch.linalg.norm(sampled_cube_state[:, :2] - other_cube_state[:, :2], dim=-1) active_idx = torch.nonzero(cube_dist < min_dists, as_tuple=True)[0] num_active_idx = len(active_idx) # If active idx is empty, then all sampling is valid :D if num_active_idx == 0: success = True break # Make sure we succeeded at sampling assert success, "Sampling cube locations was unsuccessful! ):" else: # We just directly sample sampled_cube_state[:, :2] = centered_cube_xy_state.unsqueeze(0) + \ 2.0 * self.start_position_noise * ( torch.rand(num_resets, 2, device=self.device) - 0.5) # Sample rotation value if self.start_rotation_noise > 0: aa_rot = torch.zeros(num_resets, 3, device=self.device) aa_rot[:, 2] = 2.0 * self.start_rotation_noise * (torch.rand(num_resets, device=self.device) - 0.5) sampled_cube_state[:, 3:7] = quat_mul(axisangle2quat(aa_rot), sampled_cube_state[:, 3:7]) # Lastly, set these sampled values as the new init state this_cube_state_all[env_ids, :] = sampled_cube_state def _compute_osc_torques(self, dpose): # Solve for Operational Space Control # Paper: khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf # Helpful resource: studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/ q, qd = self._q[:, :7], self._qd[:, :7] mm_inv = torch.inverse(self._mm) m_eef_inv = self._j_eef @ mm_inv @ torch.transpose(self._j_eef, 1, 2) m_eef = torch.inverse(m_eef_inv) # Transform our cartesian action `dpose` into joint torques `u` u = torch.transpose(self._j_eef, 1, 2) @ m_eef @ ( self.kp * dpose - self.kd * self.states["eef_vel"]).unsqueeze(-1) # Nullspace control torques `u_null` prevents large changes in joint configuration # They are added into the nullspace of OSC so that the end effector orientation remains constant # roboticsproceedings.org/rss07/p31.pdf j_eef_inv = m_eef @ self._j_eef @ mm_inv u_null = self.kd_null * -qd + self.kp_null * ( (self.franka_default_dof_pos[:7] - q + np.pi) % (2 * np.pi) - np.pi) u_null[:, 7:] *= 0 u_null = self._mm @ u_null.unsqueeze(-1) u += (torch.eye(7, device=self.device).unsqueeze(0) - torch.transpose(self._j_eef, 1, 2) @ j_eef_inv) @ u_null # Clip the values to be within valid effort range u = tensor_clamp(u.squeeze(-1), -self._franka_effort_limits[:7].unsqueeze(0), self._franka_effort_limits[:7].unsqueeze(0)) return u def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) # Split arm and gripper command u_arm, u_gripper = self.actions[:, :-1], self.actions[:, -1] # print(u_arm, u_gripper) # print(self.cmd_limit, self.action_scale) # Control arm (scale value first) u_arm = u_arm * self.cmd_limit / self.action_scale if self.control_type == "osc": u_arm = self._compute_osc_torques(dpose=u_arm) self._arm_control[:, :] = u_arm # Control gripper u_fingers = torch.zeros_like(self._gripper_control) u_fingers[:, 0] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-2].item(), self.franka_dof_lower_limits[-2].item()) u_fingers[:, 1] = torch.where(u_gripper >= 0.0, self.franka_dof_upper_limits[-1].item(), self.franka_dof_lower_limits[-1].item()) # Write gripper command to appropriate tensor buffer self._gripper_control[:, :] = u_fingers # Deploy actions self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self._pos_control)) self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(self._effort_control)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) # debug viz if self.viewer and self.debug_viz: self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) # Grab relevant states to visualize eef_pos = self.states["eef_pos"] eef_rot = self.states["eef_quat"] cubeA_pos = self.states["cubeA_pos"] cubeA_rot = self.states["cubeA_quat"] cubeB_pos = self.states["cubeB_pos"] cubeB_rot = self.states["cubeB_quat"] # Plot visualizations for i in range(self.num_envs): for pos, rot in zip((eef_pos, cubeA_pos, cubeB_pos), (eef_rot, cubeA_rot, cubeB_rot)): px = (pos[i] + quat_apply(rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() py = (pos[i] + quat_apply(rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() pz = (pos[i] + quat_apply(rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], px[0], px[1], px[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], py[0], py[1], py[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], pz[0], pz[1], pz[2]], [0.1, 0.1, 0.85]) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_franka_reward( reset_buf, progress_buf, actions, states, reward_settings, max_episode_length ): # type: (Tensor, Tensor, Tensor, Dict[str, Tensor], Dict[str, float], float) -> Tuple[Tensor, Tensor] # Compute per-env physical parameters target_height = states["cubeB_size"] + states["cubeA_size"] / 2.0 cubeA_size = states["cubeA_size"] cubeB_size = states["cubeB_size"] # distance from hand to the cubeA d = torch.norm(states["cubeA_pos_relative"], dim=-1) d_lf = torch.norm(states["cubeA_pos"] - states["eef_lf_pos"], dim=-1) d_rf = torch.norm(states["cubeA_pos"] - states["eef_rf_pos"], dim=-1) dist_reward = 1 - torch.tanh(10.0 * (d + d_lf + d_rf) / 3) # reward for lifting cubeA cubeA_height = states["cubeA_pos"][:, 2] - reward_settings["table_height"] cubeA_lifted = (cubeA_height - cubeA_size) > 0.04 lift_reward = cubeA_lifted # how closely aligned cubeA is to cubeB (only provided if cubeA is lifted) offset = torch.zeros_like(states["cubeA_to_cubeB_pos"]) offset[:, 2] = (cubeA_size + cubeB_size) / 2 d_ab = torch.norm(states["cubeA_to_cubeB_pos"] + offset, dim=-1) align_reward = (1 - torch.tanh(10.0 * d_ab)) * cubeA_lifted # Dist reward is maximum of dist and align reward dist_reward = torch.max(dist_reward, align_reward) # final reward for stacking successfully (only if cubeA is close to target height and corresponding location, and gripper is not grasping) cubeA_align_cubeB = (torch.norm(states["cubeA_to_cubeB_pos"][:, :2], dim=-1) < 0.02) cubeA_on_cubeB = torch.abs(cubeA_height - target_height) < 0.02 gripper_away_from_cubeA = (d > 0.04) stack_reward = cubeA_align_cubeB & cubeA_on_cubeB & gripper_away_from_cubeA # Compose rewards # We either provide the stack reward or the align + dist reward rewards = torch.where( stack_reward, reward_settings["r_stack_scale"] * stack_reward, reward_settings["r_dist_scale"] * dist_reward + reward_settings["r_lift_scale"] * lift_reward + reward_settings[ "r_align_scale"] * align_reward, ) # Compute resets reset_buf = torch.where((progress_buf >= max_episode_length - 1) | (stack_reward > 0), torch.ones_like(reset_buf), reset_buf) return rewards, reset_buf
37,426
Python
49.036096
217
0.595816
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/quadcopter.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgym import gymutil, gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import * from .base.vec_task import VecTask class Quadcopter(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.debug_viz = self.cfg["env"]["enableDebugVis"] dofs_per_env = 8 bodies_per_env = 9 # Observations: # 0:13 - root state # 13:29 - DOF states num_obs = 21 # Actions: # 0:8 - rotor DOF position targets # 8:12 - rotor thrust magnitudes num_acts = 12 self.cfg["env"]["numObservations"] = num_obs self.cfg["env"]["numActions"] = num_acts super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) self.root_states = vec_root_tensor self.root_positions = vec_root_tensor[..., 0:3] self.root_quats = vec_root_tensor[..., 3:7] self.root_linvels = vec_root_tensor[..., 7:10] self.root_angvels = vec_root_tensor[..., 10:13] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_root_states = vec_root_tensor.clone() self.initial_dof_states = vec_dof_tensor.clone() max_thrust = 2 self.thrust_lower_limits = torch.zeros(4, device=self.device, dtype=torch.float32) self.thrust_upper_limits = max_thrust * torch.ones(4, device=self.device, dtype=torch.float32) # control tensors self.dof_position_targets = torch.zeros((self.num_envs, dofs_per_env), dtype=torch.float32, device=self.device, requires_grad=False) self.thrusts = torch.zeros((self.num_envs, 4), dtype=torch.float32, device=self.device, requires_grad=False) self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(self.num_envs, dtype=torch.int32, device=self.device) if self.viewer: cam_pos = gymapi.Vec3(1.0, 1.0, 1.8) cam_target = gymapi.Vec3(2.2, 2.0, 1.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # need rigid body states for visualizing thrusts self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13) self.rb_positions = self.rb_states[..., 0:3] self.rb_quats = self.rb_states[..., 3:7] def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self.dt = self.sim_params.dt self._create_quadcopter_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_quadcopter_asset(self): chassis_radius = 0.1 chassis_thickness = 0.03 rotor_radius = 0.04 rotor_thickness = 0.01 rotor_arm_radius = 0.01 root = ET.Element('mujoco') root.attrib["model"] = "Quadcopter" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" worldbody = ET.SubElement(root, "worldbody") chassis = ET.SubElement(worldbody, "body") chassis.attrib["name"] = "chassis" chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0) chassis_geom = ET.SubElement(chassis, "geom") chassis_geom.attrib["type"] = "cylinder" chassis_geom.attrib["size"] = "%g %g" % (chassis_radius, 0.5 * chassis_thickness) chassis_geom.attrib["pos"] = "0 0 0" chassis_geom.attrib["density"] = "50" chassis_joint = ET.SubElement(chassis, "joint") chassis_joint.attrib["name"] = "root_joint" chassis_joint.attrib["type"] = "free" zaxis = gymapi.Vec3(0, 0, 1) rotor_arm_offset = gymapi.Vec3(chassis_radius + 0.25 * rotor_arm_radius, 0, 0) pitch_joint_offset = gymapi.Vec3(0, 0, 0) rotor_offset = gymapi.Vec3(rotor_radius + 0.25 * rotor_arm_radius, 0, 0) rotor_angles = [0.25 * math.pi, 0.75 * math.pi, 1.25 * math.pi, 1.75 * math.pi] for i in range(len(rotor_angles)): angle = rotor_angles[i] rotor_arm_quat = gymapi.Quat.from_axis_angle(zaxis, angle) rotor_arm_pos = rotor_arm_quat.rotate(rotor_arm_offset) pitch_joint_pos = pitch_joint_offset rotor_pos = rotor_offset rotor_quat = gymapi.Quat() rotor_arm = ET.SubElement(chassis, "body") rotor_arm.attrib["name"] = "rotor_arm" + str(i) rotor_arm.attrib["pos"] = "%g %g %g" % (rotor_arm_pos.x, rotor_arm_pos.y, rotor_arm_pos.z) rotor_arm.attrib["quat"] = "%g %g %g %g" % (rotor_arm_quat.w, rotor_arm_quat.x, rotor_arm_quat.y, rotor_arm_quat.z) rotor_arm_geom = ET.SubElement(rotor_arm, "geom") rotor_arm_geom.attrib["type"] = "sphere" rotor_arm_geom.attrib["size"] = "%g" % rotor_arm_radius rotor_arm_geom.attrib["density"] = "200" pitch_joint = ET.SubElement(rotor_arm, "joint") pitch_joint.attrib["name"] = "rotor_pitch" + str(i) pitch_joint.attrib["type"] = "hinge" pitch_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) pitch_joint.attrib["axis"] = "0 1 0" pitch_joint.attrib["limited"] = "true" pitch_joint.attrib["range"] = "-30 30" rotor = ET.SubElement(rotor_arm, "body") rotor.attrib["name"] = "rotor" + str(i) rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_geom = ET.SubElement(rotor, "geom") rotor_geom.attrib["type"] = "cylinder" rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness) #rotor_geom.attrib["type"] = "box" #rotor_geom.attrib["size"] = "%g %g %g" % (rotor_radius, rotor_radius, 0.5 * rotor_thickness) rotor_geom.attrib["density"] = "1000" roll_joint = ET.SubElement(rotor, "joint") roll_joint.attrib["name"] = "rotor_roll" + str(i) roll_joint.attrib["type"] = "hinge" roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) roll_joint.attrib["axis"] = "1 0 0" roll_joint.attrib["limited"] = "true" roll_joint.attrib["range"] = "-30 30" gymutil._indent_xml(root) ET.ElementTree(root).write("quadcopter.xml") def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "." asset_file = "quadcopter.xml" asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 4 * math.pi asset_options.slices_per_cylinder = 40 asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dofs = self.gym.get_asset_dof_count(asset) dof_props = self.gym.get_asset_dof_properties(asset) self.dof_lower_limits = [] self.dof_upper_limits = [] for i in range(self.num_dofs): self.dof_lower_limits.append(dof_props['lower'][i]) self.dof_upper_limits.append(dof_props['upper'][i]) self.dof_lower_limits = to_torch(self.dof_lower_limits, device=self.device) self.dof_upper_limits = to_torch(self.dof_upper_limits, device=self.device) self.dof_ranges = self.dof_upper_limits - self.dof_lower_limits default_pose = gymapi.Transform() default_pose.p.z = 1.0 self.envs = [] for i in range(self.num_envs): # create env instance env = self.gym.create_env(self.sim, lower, upper, num_per_row) actor_handle = self.gym.create_actor(env, asset, default_pose, "quadcopter", i, 1, 0) dof_props = self.gym.get_actor_dof_properties(env, actor_handle) dof_props['driveMode'].fill(gymapi.DOF_MODE_POS) dof_props['stiffness'].fill(1000.0) dof_props['damping'].fill(0.0) self.gym.set_actor_dof_properties(env, actor_handle, dof_props) # pretty colors chassis_color = gymapi.Vec3(0.8, 0.6, 0.2) rotor_color = gymapi.Vec3(0.1, 0.2, 0.6) arm_color = gymapi.Vec3(0.0, 0.0, 0.0) self.gym.set_rigid_body_color(env, actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, chassis_color) self.gym.set_rigid_body_color(env, actor_handle, 1, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 3, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 5, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 7, gymapi.MESH_VISUAL_AND_COLLISION, arm_color) self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, rotor_color) #self.gym.set_rigid_body_color(env, actor_handle, 2, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0)) #self.gym.set_rigid_body_color(env, actor_handle, 4, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 1, 0)) #self.gym.set_rigid_body_color(env, actor_handle, 6, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0, 0, 1)) #self.gym.set_rigid_body_color(env, actor_handle, 8, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0)) self.envs.append(env) if self.debug_viz: # need env offsets for the rotors self.rotor_env_offsets = torch.zeros((self.num_envs, 4, 3), device=self.device) for i in range(self.num_envs): env_origin = self.gym.get_env_origin(self.envs[i]) self.rotor_env_offsets[i, ..., 0] = env_origin.x self.rotor_env_offsets[i, ..., 1] = env_origin.y self.rotor_env_offsets[i, ..., 2] = env_origin.z def reset_idx(self, env_ids): num_resets = len(env_ids) self.dof_states[env_ids] = self.initial_dof_states[env_ids] actor_indices = self.all_actor_indices[env_ids].flatten() self.root_states[env_ids] = self.initial_root_states[env_ids] self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten() self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.dof_positions[env_ids] = torch_rand_float(-0.2, 0.2, (num_resets, 8), self.device) self.dof_velocities[env_ids] = 0.0 self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, _actions): # resets reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) actions = _actions.to(self.device) dof_action_speed_scale = 8 * math.pi self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8] self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits) thrust_action_speed_scale = 200 self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12] self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits) self.forces[:, 2, 2] = self.thrusts[:, 0] self.forces[:, 4, 2] = self.thrusts[:, 1] self.forces[:, 6, 2] = self.thrusts[:, 2] self.forces[:, 8, 2] = self.thrusts[:, 3] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 self.dof_position_targets[reset_env_ids] = self.dof_positions[reset_env_ids] # apply actions self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.dof_position_targets)) self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.compute_observations() self.compute_reward() # debug viz if self.viewer and self.debug_viz: # compute start and end positions for visualizing thrust lines self.gym.refresh_rigid_body_state_tensor(self.sim) rotor_indices = torch.LongTensor([2, 4, 6, 8]) quats = self.rb_quats[:, rotor_indices] dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3) starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs # submit debug line geometry verts = torch.stack([starts, ends], dim=2).cpu().numpy() colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32) colors[..., 0] = 1.0 self.gym.clear_lines(self.viewer) self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors) def compute_observations(self): target_x = 0.0 target_y = 0.0 target_z = 1.0 self.obs_buf[..., 0] = (target_x - self.root_positions[..., 0]) / 3 self.obs_buf[..., 1] = (target_y - self.root_positions[..., 1]) / 3 self.obs_buf[..., 2] = (target_z - self.root_positions[..., 2]) / 3 self.obs_buf[..., 3:7] = self.root_quats self.obs_buf[..., 7:10] = self.root_linvels / 2 self.obs_buf[..., 10:13] = self.root_angvels / math.pi self.obs_buf[..., 13:21] = self.dof_positions return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_quadcopter_reward( self.root_positions, self.root_quats, self.root_linvels, self.root_angvels, self.reset_buf, self.progress_buf, self.max_episode_length ) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_quadcopter_reward(root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # distance to target target_dist = torch.sqrt(root_positions[..., 0] * root_positions[..., 0] + root_positions[..., 1] * root_positions[..., 1] + (1 - root_positions[..., 2]) * (1 - root_positions[..., 2])) pos_reward = 1.0 / (1.0 + target_dist * target_dist) # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 1.0 / (1.0 + tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + spinnage * spinnage) # combined reward # uprigness and spinning only matter when close to the target reward = pos_reward + pos_reward * (up_reward + spinnage_reward) # resets due to misbehavior ones = torch.ones_like(reset_buf) die = torch.zeros_like(reset_buf) die = torch.where(target_dist > 3.0, ones, die) die = torch.where(root_positions[..., 2] < 0.3, ones, die) # resets due to episode length reset = torch.where(progress_buf >= max_episode_length - 1, ones, die) return reward, reset
19,725
Python
46.078759
217
0.61308
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/ingenuity.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy as np import os import torch import xml.etree.ElementTree as ET from isaacgymenvs.utils.torch_jit_utils import * from .base.vec_task import VecTask from isaacgym import gymutil, gymtorch, gymapi class Ingenuity(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.max_episode_length = self.cfg["env"]["maxEpisodeLength"] self.debug_viz = self.cfg["env"]["enableDebugVis"] # Observations: # 0:13 - root state self.cfg["env"]["numObservations"] = 13 # Actions: # 0:3 - xyz force vector for lower rotor # 4:6 - xyz force vector for upper rotor self.cfg["env"]["numActions"] = 6 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) dofs_per_env = 4 bodies_per_env = 6 self.root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) self.dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) vec_root_tensor = gymtorch.wrap_tensor(self.root_tensor).view(self.num_envs, 2, 13) vec_dof_tensor = gymtorch.wrap_tensor(self.dof_state_tensor).view(self.num_envs, dofs_per_env, 2) self.root_states = vec_root_tensor[:, 0, :] self.root_positions = self.root_states[:, 0:3] self.target_root_positions = torch.zeros((self.num_envs, 3), device=self.device, dtype=torch.float32) self.target_root_positions[:, 2] = 1 self.root_quats = self.root_states[:, 3:7] self.root_linvels = self.root_states[:, 7:10] self.root_angvels = self.root_states[:, 10:13] self.marker_states = vec_root_tensor[:, 1, :] self.marker_positions = self.marker_states[:, 0:3] self.dof_states = vec_dof_tensor self.dof_positions = vec_dof_tensor[..., 0] self.dof_velocities = vec_dof_tensor[..., 1] self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.initial_root_states = self.root_states.clone() self.initial_dof_states = self.dof_states.clone() self.thrust_lower_limit = 0 self.thrust_upper_limit = 2000 self.thrust_lateral_component = 0.2 # control tensors self.thrusts = torch.zeros((self.num_envs, 2, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.forces = torch.zeros((self.num_envs, bodies_per_env, 3), dtype=torch.float32, device=self.device, requires_grad=False) self.all_actor_indices = torch.arange(self.num_envs * 2, dtype=torch.int32, device=self.device).reshape((self.num_envs, 2)) if self.viewer: cam_pos = gymapi.Vec3(2.25, 2.25, 3.0) cam_target = gymapi.Vec3(3.5, 4.0, 1.9) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # need rigid body states for visualizing thrusts self.rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.rb_states = gymtorch.wrap_tensor(self.rb_state_tensor).view(self.num_envs, bodies_per_env, 13) self.rb_positions = self.rb_states[..., 0:3] self.rb_quats = self.rb_states[..., 3:7] def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z # Mars gravity self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -3.721 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self.dt = self.sim_params.dt self._create_ingenuity_asset() self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ingenuity_asset(self): chassis_size = 0.06 rotor_axis_length = 0.2 rotor_radius = 0.15 rotor_thickness = 0.01 rotor_arm_radius = 0.01 root = ET.Element('mujoco') root.attrib["model"] = "Ingenuity" compiler = ET.SubElement(root, "compiler") compiler.attrib["angle"] = "degree" compiler.attrib["coordinate"] = "local" compiler.attrib["inertiafromgeom"] = "true" mesh_asset = ET.SubElement(root, "asset") model_path = "../assets/glb/ingenuity/" mesh = ET.SubElement(mesh_asset, "mesh") mesh.attrib["file"] = model_path + "chassis.glb" mesh.attrib["name"] = "ingenuity_mesh" lower_prop_mesh = ET.SubElement(mesh_asset, "mesh") lower_prop_mesh.attrib["file"] = model_path + "lower_prop.glb" lower_prop_mesh.attrib["name"] = "lower_prop_mesh" upper_prop_mesh = ET.SubElement(mesh_asset, "mesh") upper_prop_mesh.attrib["file"] = model_path + "upper_prop.glb" upper_prop_mesh.attrib["name"] = "upper_prop_mesh" worldbody = ET.SubElement(root, "worldbody") chassis = ET.SubElement(worldbody, "body") chassis.attrib["name"] = "chassis" chassis.attrib["pos"] = "%g %g %g" % (0, 0, 0) chassis_geom = ET.SubElement(chassis, "geom") chassis_geom.attrib["type"] = "box" chassis_geom.attrib["size"] = "%g %g %g" % (chassis_size, chassis_size, chassis_size) chassis_geom.attrib["pos"] = "0 0 0" chassis_geom.attrib["density"] = "50" mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0) mesh_geom = ET.SubElement(chassis, "geom") mesh_geom.attrib["type"] = "mesh" mesh_geom.attrib["quat"] = "%g %g %g %g" % (mesh_quat.w, mesh_quat.x, mesh_quat.y, mesh_quat.z) mesh_geom.attrib["mesh"] = "ingenuity_mesh" mesh_geom.attrib["pos"] = "%g %g %g" % (0, 0, 0) mesh_geom.attrib["contype"] = "0" mesh_geom.attrib["conaffinity"] = "0" chassis_joint = ET.SubElement(chassis, "joint") chassis_joint.attrib["name"] = "root_joint" chassis_joint.attrib["type"] = "hinge" chassis_joint.attrib["limited"] = "true" chassis_joint.attrib["range"] = "0 0" zaxis = gymapi.Vec3(0, 0, 1) low_rotor_pos = gymapi.Vec3(0, 0, 0) rotor_separation = gymapi.Vec3(0, 0, 0.025) for i, mesh_name in enumerate(["lower_prop_mesh", "upper_prop_mesh"]): angle = 0 rotor_quat = gymapi.Quat.from_axis_angle(zaxis, angle) rotor_pos = low_rotor_pos + (rotor_separation * i) rotor = ET.SubElement(chassis, "body") rotor.attrib["name"] = "rotor_physics_" + str(i) rotor.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_geom = ET.SubElement(rotor, "geom") rotor_geom.attrib["type"] = "cylinder" rotor_geom.attrib["size"] = "%g %g" % (rotor_radius, 0.5 * rotor_thickness) rotor_geom.attrib["density"] = "1000" roll_joint = ET.SubElement(rotor, "joint") roll_joint.attrib["name"] = "rotor_roll" + str(i) roll_joint.attrib["type"] = "hinge" roll_joint.attrib["limited"] = "true" roll_joint.attrib["range"] = "0 0" roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) rotor_dummy = ET.SubElement(chassis, "body") rotor_dummy.attrib["name"] = "rotor_visual_" + str(i) rotor_dummy.attrib["pos"] = "%g %g %g" % (rotor_pos.x, rotor_pos.y, rotor_pos.z) rotor_dummy.attrib["quat"] = "%g %g %g %g" % (rotor_quat.w, rotor_quat.x, rotor_quat.y, rotor_quat.z) rotor_mesh_geom = ET.SubElement(rotor_dummy, "geom") rotor_mesh_geom.attrib["type"] = "mesh" rotor_mesh_geom.attrib["mesh"] = mesh_name rotor_mesh_quat = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0) rotor_mesh_geom.attrib["quat"] = "%g %g %g %g" % (rotor_mesh_quat.w, rotor_mesh_quat.x, rotor_mesh_quat.y, rotor_mesh_quat.z) rotor_mesh_geom.attrib["contype"] = "0" rotor_mesh_geom.attrib["conaffinity"] = "0" dummy_roll_joint = ET.SubElement(rotor_dummy, "joint") dummy_roll_joint.attrib["name"] = "rotor_roll" + str(i) dummy_roll_joint.attrib["type"] = "hinge" dummy_roll_joint.attrib["axis"] = "0 0 1" dummy_roll_joint.attrib["pos"] = "%g %g %g" % (0, 0, 0) gymutil._indent_xml(root) ET.ElementTree(root).write("ingenuity.xml") def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = "./" asset_file = "ingenuity.xml" asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 0.0 asset_options.max_angular_velocity = 4 * math.pi asset_options.slices_per_cylinder = 40 asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) asset_options.fix_base_link = True marker_asset = self.gym.create_sphere(self.sim, 0.1, asset_options) default_pose = gymapi.Transform() default_pose.p.z = 1.0 self.envs = [] self.actor_handles = [] for i in range(self.num_envs): # create env instance env = self.gym.create_env(self.sim, lower, upper, num_per_row) actor_handle = self.gym.create_actor(env, asset, default_pose, "ingenuity", i, 1, 1) dof_props = self.gym.get_actor_dof_properties(env, actor_handle) dof_props['stiffness'].fill(0) dof_props['damping'].fill(0) self.gym.set_actor_dof_properties(env, actor_handle, dof_props) marker_handle = self.gym.create_actor(env, marker_asset, default_pose, "marker", i, 1, 1) self.gym.set_rigid_body_color(env, marker_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 0, 0)) self.actor_handles.append(actor_handle) self.envs.append(env) if self.debug_viz: # need env offsets for the rotors self.rotor_env_offsets = torch.zeros((self.num_envs, 2, 3), device=self.device) for i in range(self.num_envs): env_origin = self.gym.get_env_origin(self.envs[i]) self.rotor_env_offsets[i, ..., 0] = env_origin.x self.rotor_env_offsets[i, ..., 1] = env_origin.y self.rotor_env_offsets[i, ..., 2] = env_origin.z def set_targets(self, env_ids): num_sets = len(env_ids) # set target position randomly with x, y in (-5, 5) and z in (1, 2) self.target_root_positions[env_ids, 0:2] = (torch.rand(num_sets, 2, device=self.device) * 10) - 5 self.target_root_positions[env_ids, 2] = torch.rand(num_sets, device=self.device) + 1 self.marker_positions[env_ids] = self.target_root_positions[env_ids] # copter "position" is at the bottom of the legs, so shift the target up so it visually aligns better self.marker_positions[env_ids, 2] += 0.4 actor_indices = self.all_actor_indices[env_ids, 1].flatten() return actor_indices def reset_idx(self, env_ids): # set rotor speeds self.dof_velocities[:, 1] = -50 self.dof_velocities[:, 3] = 50 num_resets = len(env_ids) target_actor_indices = self.set_targets(env_ids) actor_indices = self.all_actor_indices[env_ids, 0].flatten() self.root_states[env_ids] = self.initial_root_states[env_ids] self.root_states[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), self.device).flatten() self.root_states[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), self.device).flatten() self.gym.set_dof_state_tensor_indexed(self.sim, self.dof_state_tensor, gymtorch.unwrap_tensor(actor_indices), num_resets) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 return torch.unique(torch.cat([target_actor_indices, actor_indices])) def pre_physics_step(self, _actions): # resets set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1) target_actor_indices = torch.tensor([], device=self.device, dtype=torch.int32) if len(set_target_ids) > 0: target_actor_indices = self.set_targets(set_target_ids) reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) actor_indices = torch.tensor([], device=self.device, dtype=torch.int32) if len(reset_env_ids) > 0: actor_indices = self.reset_idx(reset_env_ids) reset_indices = torch.unique(torch.cat([target_actor_indices, actor_indices])) if len(reset_indices) > 0: self.gym.set_actor_root_state_tensor_indexed(self.sim, self.root_tensor, gymtorch.unwrap_tensor(reset_indices), len(reset_indices)) actions = _actions.to(self.device) thrust_action_speed_scale = 2000 vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit) vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * thrust_action_speed_scale, -self.thrust_upper_limit, self.thrust_upper_limit) lateral_fraction_prop_0 = torch.clamp(actions[:, 0:2], -self.thrust_lateral_component, self.thrust_lateral_component) lateral_fraction_prop_1 = torch.clamp(actions[:, 3:5], -self.thrust_lateral_component, self.thrust_lateral_component) self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0 self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0 self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1 self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1 self.forces[:, 1] = self.thrusts[:, 0] self.forces[:, 3] = self.thrusts[:, 1] # clear actions for reset envs self.thrusts[reset_env_ids] = 0.0 self.forces[reset_env_ids] = 0.0 # apply actions self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.compute_observations() self.compute_reward() # debug viz if self.viewer and self.debug_viz: # compute start and end positions for visualizing thrust lines self.gym.refresh_rigid_body_state_tensor(self.sim) rotor_indices = torch.LongTensor([2, 4, 6, 8]) quats = self.rb_quats[:, rotor_indices] dirs = -quat_axis(quats.view(self.num_envs * 4, 4), 2).view(self.num_envs, 4, 3) starts = self.rb_positions[:, rotor_indices] + self.rotor_env_offsets ends = starts + 0.1 * self.thrusts.view(self.num_envs, 4, 1) * dirs # submit debug line geometry verts = torch.stack([starts, ends], dim=2).cpu().numpy() colors = np.zeros((self.num_envs * 4, 3), dtype=np.float32) colors[..., 0] = 1.0 self.gym.clear_lines(self.viewer) self.gym.add_lines(self.viewer, None, self.num_envs * 4, verts, colors) def compute_observations(self): self.obs_buf[..., 0:3] = (self.target_root_positions - self.root_positions) / 3 self.obs_buf[..., 3:7] = self.root_quats self.obs_buf[..., 7:10] = self.root_linvels / 2 self.obs_buf[..., 10:13] = self.root_angvels / math.pi return self.obs_buf def compute_reward(self): self.rew_buf[:], self.reset_buf[:] = compute_ingenuity_reward( self.root_positions, self.target_root_positions, self.root_quats, self.root_linvels, self.root_angvels, self.reset_buf, self.progress_buf, self.max_episode_length ) ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_ingenuity_reward(root_positions, target_root_positions, root_quats, root_linvels, root_angvels, reset_buf, progress_buf, max_episode_length): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor] # distance to target target_dist = torch.sqrt(torch.square(target_root_positions - root_positions).sum(-1)) pos_reward = 1.0 / (1.0 + target_dist * target_dist) # uprightness ups = quat_axis(root_quats, 2) tiltage = torch.abs(1 - ups[..., 2]) up_reward = 5.0 / (1.0 + tiltage * tiltage) # spinning spinnage = torch.abs(root_angvels[..., 2]) spinnage_reward = 1.0 / (1.0 + spinnage * spinnage) # combined reward # uprigness and spinning only matter when close to the target reward = pos_reward + pos_reward * (up_reward + spinnage_reward) # resets due to misbehavior ones = torch.ones_like(reset_buf) die = torch.zeros_like(reset_buf) die = torch.where(target_dist > 8.0, ones, die) die = torch.where(root_positions[..., 2] < 0.5, ones, die) # resets due to episode length reset = torch.where(progress_buf >= max_episode_length - 1, ones, die) return reward, reset
19,671
Python
43.60771
217
0.614763
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/anymal.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import torch from isaacgym import gymtorch from isaacgym import gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse from isaacgymenvs.tasks.base.vec_task import VecTask from typing import Tuple, Dict class Anymal(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg # normalization self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"] self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"] self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"] self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"] self.action_scale = self.cfg["env"]["control"]["actionScale"] # reward scales self.rew_scales = {} self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"] self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"] self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"] # randomization self.randomization_params = self.cfg["task"]["randomization_params"] self.randomize = self.cfg["task"]["randomize"] # command ranges self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"] self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"] self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"] # plane params self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"] self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"] self.plane_restitution = self.cfg["env"]["plane"]["restitution"] # base init state pos = self.cfg["env"]["baseInitState"]["pos"] rot = self.cfg["env"]["baseInitState"]["rot"] v_lin = self.cfg["env"]["baseInitState"]["vLinear"] v_ang = self.cfg["env"]["baseInitState"]["vAngular"] state = pos + rot + v_lin + v_ang self.base_init_state = state # default joint positions self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"] self.cfg["env"]["numObservations"] = 48 self.cfg["env"]["numActions"] = 12 super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # other self.dt = self.sim_params.dt self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"] self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5) self.Kp = self.cfg["env"]["control"]["stiffness"] self.Kd = self.cfg["env"]["control"]["damping"] for key in self.rew_scales.keys(): self.rew_scales[key] *= self.dt if self.viewer != None: p = self.cfg["env"]["viewer"]["pos"] lookat = self.cfg["env"]["viewer"]["lookat"] cam_pos = gymapi.Vec3(p[0], p[1], p[2]) cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2]) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym state tensors actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim) torques = self.gym.acquire_dof_force_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) # create some wrapper tensors for different slices self.root_states = gymtorch.wrap_tensor(actor_root_state) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1] self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof) self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False) self.commands_y = self.commands.view(self.num_envs, 3)[..., 1] self.commands_x = self.commands.view(self.num_envs, 3)[..., 0] self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2] self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False) for i in range(self.cfg["env"]["numActions"]): name = self.dof_names[i] angle = self.named_default_joint_angles[name] self.default_dof_pos[:, i] = angle # initialize some data used later on self.extras = {} self.initial_root_states = self.root_states.clone() self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False) self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1)) self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False) self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) # If randomizing, apply once immediately on startup before the fist sim step if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.static_friction = self.plane_static_friction plane_params.dynamic_friction = self.plane_dynamic_friction self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../assets') asset_file = "urdf/anymal_c/urdf/anymal.urdf" asset_options = gymapi.AssetOptions() asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE asset_options.collapse_fixed_joints = True asset_options.replace_cylinder_with_capsule = True asset_options.flip_visual_attachments = True asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"] asset_options.density = 0.001 asset_options.angular_damping = 0.0 asset_options.linear_damping = 0.0 asset_options.armature = 0.0 asset_options.thickness = 0.01 asset_options.disable_gravity = False anymal_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) self.num_dof = self.gym.get_asset_dof_count(anymal_asset) self.num_bodies = self.gym.get_asset_rigid_body_count(anymal_asset) start_pose = gymapi.Transform() start_pose.p = gymapi.Vec3(*self.base_init_state[:3]) body_names = self.gym.get_asset_rigid_body_names(anymal_asset) self.dof_names = self.gym.get_asset_dof_names(anymal_asset) extremity_name = "SHANK" if asset_options.collapse_fixed_joints else "FOOT" feet_names = [s for s in body_names if extremity_name in s] self.feet_indices = torch.zeros(len(feet_names), dtype=torch.long, device=self.device, requires_grad=False) knee_names = [s for s in body_names if "THIGH" in s] self.knee_indices = torch.zeros(len(knee_names), dtype=torch.long, device=self.device, requires_grad=False) self.base_index = 0 dof_props = self.gym.get_asset_dof_properties(anymal_asset) for i in range(self.num_dof): dof_props['driveMode'][i] = gymapi.DOF_MODE_POS dof_props['stiffness'][i] = self.cfg["env"]["control"]["stiffness"] #self.Kp dof_props['damping'][i] = self.cfg["env"]["control"]["damping"] #self.Kd env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) env_upper = gymapi.Vec3(spacing, spacing, spacing) self.anymal_handles = [] self.envs = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row) anymal_handle = self.gym.create_actor(env_ptr, anymal_asset, start_pose, "anymal", i, 1, 0) self.gym.set_actor_dof_properties(env_ptr, anymal_handle, dof_props) self.gym.enable_actor_dof_force_sensors(env_ptr, anymal_handle) self.envs.append(env_ptr) self.anymal_handles.append(anymal_handle) for i in range(len(feet_names)): self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], feet_names[i]) for i in range(len(knee_names)): self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], knee_names[i]) self.base_index = self.gym.find_actor_rigid_body_handle(self.envs[0], self.anymal_handles[0], "base") def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) targets = self.action_scale * self.actions + self.default_dof_pos self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(targets)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward(self.actions) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:] = compute_anymal_reward( # tensors self.root_states, self.commands, self.torques, self.contact_forces, self.knee_indices, self.progress_buf, # Dict self.rew_scales, # other self.base_index, self.max_episode_length, ) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) # done in step self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.obs_buf[:] = compute_anymal_observations( # tensors self.root_states, self.commands, self.dof_pos, self.default_dof_pos, self.dof_vel, self.gravity_vec, self.actions, # scales self.lin_vel_scale, self.ang_vel_scale, self.dof_pos_scale, self.dof_vel_scale ) def reset_idx(self, env_ids): # Randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device) velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device) self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset self.dof_vel[env_ids] = velocities env_ids_int32 = env_ids.to(dtype=torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.initial_root_states), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze() self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze() self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 1 ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_anymal_reward( # tensors root_states, commands, torques, contact_forces, knee_indices, episode_lengths, # Dict rew_scales, # other base_index, max_episode_length ): # (reward, reset, feet_in air, feet_air_time, episode sums) # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], int, int) -> Tuple[Tensor, Tensor] # prepare quantities (TODO: return from obs ?) base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) # velocity tracking reward lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1) ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2]) rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"] rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"] # torque penalty rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"] total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque total_reward = torch.clip(total_reward, 0., None) # reset agents reset = torch.norm(contact_forces[:, base_index, :], dim=1) > 1. reset = reset | torch.any(torch.norm(contact_forces[:, knee_indices, :], dim=2) > 1., dim=1) time_out = episode_lengths >= max_episode_length - 1 # no terminal reward for time-outs reset = reset | time_out return total_reward.detach(), reset @torch.jit.script def compute_anymal_observations(root_states, commands, dof_pos, default_dof_pos, dof_vel, gravity_vec, actions, lin_vel_scale, ang_vel_scale, dof_pos_scale, dof_vel_scale ): # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> Tensor base_quat = root_states[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) * lin_vel_scale base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) * ang_vel_scale projected_gravity = quat_rotate(base_quat, gravity_vec) dof_pos_scaled = (dof_pos - default_dof_pos) * dof_pos_scale commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device) obs = torch.cat((base_lin_vel, base_ang_vel, projected_gravity, commands_scaled, dof_pos_scaled, dof_vel*dof_vel_scale, actions ), dim=-1) return obs
18,546
Python
46.925064
217
0.602071
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/allegro_hand_dextreme.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os from typing import Tuple, List import itertools from itertools import permutations from tkinter import W from typing import Tuple, Dict, List, Set import numpy as np import torch from isaacgym import gymapi from isaacgym import gymtorch from isaacgymenvs.utils.torch_jit_utils import scale, unscale, quat_mul, quat_conjugate, quat_from_angle_axis, \ to_torch, get_axis_params, torch_rand_float, tensor_clamp from torch import Tensor from isaacgymenvs.tasks.dextreme.adr_vec_task import ADRVecTask from isaacgymenvs.utils.torch_jit_utils import quaternion_to_matrix, matrix_to_quaternion from isaacgymenvs.utils.rna_util import RandomNetworkAdversary class AllegroHandDextreme(ADRVecTask): dict_obs_cls = True def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): ''' obligatory constructor to fill-in class variables and setting up the simulation. self._read_cfg() is about initialising class variables from a config file. self._init_pre_sim_buffers() initialises particular tensors that are useful in storing various states randomised or otherwise self._init_post_sim_buffers() initialises the root tensors and other auxiliary variables that can be provided as input to the controller or the value function ''' self.cfg = cfg # Read the task config file and store all the relevant variables in the class self._read_cfg() self.fingertips = [s+"_link_3" for s in ["index", "middle", "ring", "thumb"]] self.num_fingertips = len(self.fingertips) num_dofs = 16 self.num_obs_dict = self.get_num_obs_dict(num_dofs) self.cfg["env"]["obsDims"] = {} for o in self.num_obs_dict.keys(): if o not in self.num_obs_dict: raise Exception(f"Unknown type of observation {o}!") self.cfg["env"]["obsDims"][o] = (self.num_obs_dict[o],) self.up_axis = 'z' self.use_vel_obs = False self.fingertip_obs = True self.asymmetric_obs = self.cfg["env"]["asymmetric_observations"] self.cfg["env"]["numActions"] = 16 self.sim_device = sim_device rl_device = self.cfg.get("rl_device", "cuda:0") self._init_pre_sim_buffers() super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, use_dict_obs=True) self._init_post_sim_buffers() reward_keys = ['dist_rew', 'rot_rew', 'action_penalty', 'action_delta_penalty', 'velocity_penalty', 'reach_goal_rew', 'fall_rew', 'timeout_rew'] self.rewards_episode = {key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys} if self.use_adr: self.apply_reset_buf = torch.zeros(self.num_envs, dtype=torch.long, device=self.device) if self.print_success_stat: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.last_ep_successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros(self.max_consecutive_successes + 1, dtype=torch.float, device=self.device) from tensorboardX import SummaryWriter self.eval_summary_dir = './eval_summaries' # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) def get_env_state(self): env_dict=dict(act_moving_average=self.act_moving_average) if self.use_adr: env_dict = dict(**env_dict, **super().get_env_state()) return env_dict def get_save_tensors(self): if hasattr(self, 'actions'): actions = self.actions else: actions = torch.zeros((self.num_envs, self.cfg["env"]["numActions"])).to(self.device) # scale is [-1, 1] -> [low, upper] # unscale is [low, upper] -> [-1, 1] # self.actions are in [-1, 1] as they are raw # actions returned by the policy return { # 'observations': self.obs_buf, 'actions': actions, 'cube_state': self.root_state_tensor[self.object_indices], 'goal_state': self.goal_states, 'joint_positions': self.dof_pos, 'joint_velocities': self.dof_vel, 'root_state': self.root_state_tensor[self.hand_indices], } def save_step(self): self.capture.append_experience(self.get_save_tensors()) def get_num_obs_dict(self, num_dofs): # This is what we use for ADR num_obs = { "dof_pos": num_dofs, "dof_pos_randomized": num_dofs, "dof_vel": num_dofs, "dof_force": num_dofs, # generalised forces "object_vels": 6, "last_actions": num_dofs, "cube_random_params": 3, "hand_random_params": 1, "gravity_vec": 3, "ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip "ft_force_torques": 6 * self.num_fingertips, # wrenches "rb_forces": 3, # random forces being applied to the cube "rot_dist": 2, "stochastic_delay_params": 4, # cube obs + action delay prob, action fixed latency, pose refresh rate "affine_params": 16*2 + 7*2 + 16*2, "object_pose": 7, "goal_pose": 7, "goal_relative_rot": 4, "object_pose_cam_randomized": 7, "goal_relative_rot_cam_randomized": 4, } return num_obs def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../assets') hand_asset_file = "urdf/kuka_allegro_description/allegro.urdf" if "asset" in self.cfg["env"]: asset_root = self.cfg["env"]["asset"].get("assetRoot", asset_root) hand_asset_file = self.cfg["env"]["asset"].get("assetFileName", hand_asset_file) object_asset_file = self.asset_files_dict[self.object_type] # load allegro hand_ asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True # The control interface i.e. we will be sending target positions to the robot asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS hand_asset = self.gym.load_asset(self.sim, asset_root, hand_asset_file, asset_options) self.num_hand_bodies = self.gym.get_asset_rigid_body_count(hand_asset) self.num_hand_shapes = self.gym.get_asset_rigid_shape_count(hand_asset) self.num_hand_dofs = self.gym.get_asset_dof_count(hand_asset) print("Num dofs: ", self.num_hand_dofs) self.num_hand_actuators = self.num_hand_dofs self.actuated_dof_indices = [i for i in range(self.num_hand_dofs)] # set allegro_hand dof properties hand_dof_props = self.gym.get_asset_dof_properties(hand_asset) self.hand_dof_lower_limits = [] self.hand_dof_upper_limits = [] self.hand_dof_default_pos = [] self.hand_dof_default_vel = [] self.sensors = [] sensor_pose = gymapi.Transform() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips] # create fingertip force sensors sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(hand_asset, ft_handle, sensor_pose) for i in range(self.num_hand_dofs): self.hand_dof_lower_limits.append(hand_dof_props['lower'][i]) self.hand_dof_upper_limits.append(hand_dof_props['upper'][i]) self.hand_dof_default_pos.append(0.0) self.hand_dof_default_vel.append(0.0) hand_dof_props['effort'][i] = self.max_effort hand_dof_props['stiffness'][i] = 2 hand_dof_props['damping'][i] = 0.1 hand_dof_props['friction'][i] = 0.01 hand_dof_props['armature'][i] = 0.002 self.actuated_dof_indices = to_torch(self.actuated_dof_indices, dtype=torch.long, device=self.device) self.hand_dof_lower_limits = to_torch(self.hand_dof_lower_limits, device=self.device) self.hand_dof_upper_limits = to_torch(self.hand_dof_upper_limits, device=self.device) self.hand_dof_default_pos = to_torch(self.hand_dof_default_pos, device=self.device) self.hand_dof_default_vel = to_torch(self.hand_dof_default_vel, device=self.device) # load manipulated object and goal assets object_asset_options = gymapi.AssetOptions() object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) hand_start_pose = gymapi.Transform() hand_start_pose.p = gymapi.Vec3(*get_axis_params(0.5, self.up_axis_idx)) hand_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) * \ gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.47 * np.pi) * \ gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.25 * np.pi) object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = hand_start_pose.p.x pose_dy, pose_dz = self.start_object_pose_dy, self.start_object_pose_dz object_start_pose.p.y = hand_start_pose.p.y + pose_dy object_start_pose.p.z = hand_start_pose.p.z + pose_dz self.goal_displacement = gymapi.Vec3(-0.2, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement goal_start_pose.p.y -= 0.02 goal_start_pose.p.z -= 0.04 # compute aggregate size max_agg_bodies = self.num_hand_bodies + 2 max_agg_shapes = self.num_hand_shapes + 2 self.allegro_hands = [] self.object_handles = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(hand_asset, name) for name in self.fingertips] hand_rb_count = self.gym.get_asset_rigid_body_count(hand_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) self.object_rb_handles = list(range(hand_rb_count, hand_rb_count + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add hand - collision filter = -1 to use asset collision filters set in mjcf loader hand_actor = self.gym.create_actor(env_ptr, hand_asset, hand_start_pose, "hand", i, -1, 0) self.hand_start_states.append([hand_start_pose.p.x, hand_start_pose.p.y, hand_start_pose.p.z, hand_start_pose.r.x, hand_start_pose.r.y, hand_start_pose.r.z, hand_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, hand_actor, hand_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, hand_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.enable_actor_dof_force_sensors(env_ptr, hand_actor) # add object object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 0) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 0) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(hand_actor) self.object_handles.append(object_handle) self.palm_link_handle = self.gym.find_actor_rigid_body_handle(env_ptr, hand_actor, "palm_link"), object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) # Random Network Adversary # As mentioned in OpenAI et al. 2019 (Appendix B.3) https://arxiv.org/abs/1910.07113 # and DeXtreme, 2022 (Section 2.6.2) https://arxiv.org/abs/2210.13702 if self.enable_rna: softmax_bins = 32 num_dofs = len(self.hand_dof_lower_limits) self.discretised_dofs = torch.zeros((num_dofs, softmax_bins)).to(self.device) # Discretising the joing angles into 32 bins for i in range(0, len(self.hand_dof_lower_limits)): self.discretised_dofs[i] = torch.linspace(self.hand_dof_lower_limits[i], self.hand_dof_upper_limits[i], steps=softmax_bins).to(self.device) # input is the joint angles and cube pose (pos: 3 + quat: 4), therefore a total of 16+7 dimensions self.rna_network = RandomNetworkAdversary(num_envs=self.num_envs, in_dims=num_dofs+7, \ out_dims=num_dofs, softmax_bins=softmax_bins, device=self.device) # Random cube observations. Need this tensor for Random Cube Pose Injection self.random_cube_poses = torch.zeros(self.num_envs, 7, device=self.device) def compute_reward(self, actions): self.rew_buf[:], self.reset_buf[:], self.reset_goal_buf[:], self.progress_buf[:], \ self.hold_count_buf[:], self.successes[:], self.consecutive_successes[:], \ dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew = compute_hand_reward( self.rew_buf, self.reset_buf, self.reset_goal_buf, self.progress_buf, self.hold_count_buf, self.cur_targets, self.prev_targets, self.dof_vel, self.successes, self.consecutive_successes, self.max_episode_length, self.object_pos, self.object_rot, self.goal_pos, self.goal_rot, self.dist_reward_scale, self.rot_reward_scale, self.rot_eps, self.actions, self.action_penalty_scale, self.action_delta_penalty_scale, self.success_tolerance, self.reach_goal_bonus, self.fall_dist, self.fall_penalty, self.max_consecutive_successes, self.av_factor, self.num_success_hold_steps ) # update best rotation distance in the current episode self.best_rotation_dist = torch.minimum(self.best_rotation_dist, self.curr_rotation_dist) self.extras['consecutive_successes'] = self.consecutive_successes.mean() self.extras['true_objective'] = self.successes episode_cumulative = dict() episode_cumulative['dist_rew'] = dist_rew episode_cumulative['rot_rew'] = rot_rew episode_cumulative['action_penalty'] = action_penalty episode_cumulative['action_delta_penalty'] = action_delta_penalty episode_cumulative['velocity_penalty'] = velocity_penalty episode_cumulative['reach_goal_rew'] = reach_goal_rew episode_cumulative['fall_rew'] = fall_rew episode_cumulative['timeout_rew'] = timeout_rew self.extras['episode_cumulative'] = episode_cumulative if self.print_success_stat: is_success = self.reset_goal_buf.to(torch.bool) frame_ = torch.empty_like(self.last_success_step).fill_(self.frame) self.success_time = torch.where(is_success, frame_ - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, frame_, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan envs_reset = self.reset_buf if self.use_adr: envs_reset = self.reset_buf & ~self.apply_reset_buf self.total_resets = self.total_resets + envs_reset.sum() direct_average_successes = self.total_successes + self.successes.sum() self.total_successes = self.total_successes + (self.successes * envs_reset).sum() self.total_num_resets += envs_reset self.last_ep_successes = torch.where(envs_reset > 0, self.successes, self.last_ep_successes) reset_ids = envs_reset.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if self.frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print("Direct average consecutive successes = {:.1f}".format(direct_average_successes/(self.total_resets + self.num_envs))) if self.total_resets > 0: print("Post-Reset average consecutive successes = {:.1f}".format(self.total_successes/self.total_resets)) print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.consecutive_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.last_ep_successes.mean().item():.2f} {self.last_ep_successes}") self.eval_summaries.add_scalar("consecutive_successes", self.consecutive_successes.mean().item(), self.frame) self.eval_summaries.add_scalar("last_ep_successes", self.last_ep_successes.mean().item(), self.frame) self.eval_summaries.add_scalar("reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, self.frame) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), self.frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, self.frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar("policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, self.frame) self.eval_summaries.add_scalar("policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), self.frame) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") dof_delta = self.dof_delta.abs() print(f"Max dof deltas: {dof_delta.max(dim=0).values}, max across dofs: {self.dof_delta.abs().max().item():.2f}, mean: {self.dof_delta.abs().mean().item():.2f}") print(f"Max dof delta radians per sec: {dof_delta.max().item() / frame_time:.2f}, mean: {dof_delta.mean().item() / frame_time:.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_poses_wrt_wrist(self, object_pose, palm_link_pose, goal_pose=None): object_pos = object_pose[:, 0:3] object_rot = object_pose[:, 3:7] palm_link_pos = palm_link_pose[:, 0:3] palm_link_quat_xyzw = palm_link_pose[:, 3:7] palm_link_quat_wxyz = palm_link_quat_xyzw[:, [3, 0, 1, 2]] R_W_P = quaternion_to_matrix(palm_link_quat_wxyz) T_W_P = torch.eye(4).repeat(R_W_P.shape[0], 1, 1).to(R_W_P.device) T_W_P[:, 0:3, 0:3] = R_W_P T_W_P[:, 0:3, 3] = palm_link_pos object_quat_xyzw = object_rot object_quat_wxyz = object_quat_xyzw[:, [3, 0, 1, 2]] R_W_O = quaternion_to_matrix(object_quat_wxyz) T_W_O = torch.eye(4).repeat(R_W_O.shape[0], 1, 1).to(R_W_O.device) T_W_O[:, 0:3, 0:3] = R_W_O T_W_O[:, 0:3, 3] = object_pos relative_pose = torch.matmul(torch.inverse(T_W_P), T_W_O) relative_translation = relative_pose[:, 0:3, 3] relative_quat_wxyz = matrix_to_quaternion(relative_pose[:, 0:3, 0:3]) relative_quat_xyzw = relative_quat_wxyz[:, [1, 2, 3, 0]] object_pos_wrt_wrist = relative_translation object_quat_wrt_wrist = relative_quat_xyzw object_pose_wrt_wrist = torch.cat((object_pos_wrt_wrist, object_quat_wrt_wrist), axis=-1) if goal_pose == None: return object_pose_wrt_wrist goal_pos = goal_pose[:, 0:3] goal_quat_xyzw = goal_pose[:, 3:7] goal_quat_wxyz = goal_quat_xyzw[:, [3, 0, 1, 2]] R_W_G = quaternion_to_matrix(goal_quat_wxyz) T_W_G = torch.eye(4).repeat(R_W_G.shape[0], 1, 1).to(R_W_G.device) T_W_G[:, 0:3, 0:3] = R_W_G T_W_G[:, 0:3, 3] = goal_pos relative_goal_pose = torch.matmul(torch.inverse(T_W_P), T_W_G) relative_goal_translation = relative_goal_pose[:, 0:3, 3] relative_goal_quat_wxyz = matrix_to_quaternion(relative_goal_pose[:, 0:3, 0:3]) relative_goal_quat_xyzw = relative_goal_quat_wxyz[:, [1, 2, 3, 0]] goal_pose_wrt_wrist = torch.cat((relative_goal_translation, relative_goal_quat_xyzw), axis=-1) return object_pose_wrt_wrist, goal_pose_wrt_wrist def convert_pos_quat_to_mat(self, obj_pose_pos_quat): pos = obj_pose_pos_quat[:, 0:3] quat_xyzw = obj_pose_pos_quat[:, 3:7] quat_wxyz = quat_xyzw[:, [3, 0, 1, 2]] R = quaternion_to_matrix(quat_wxyz) T = torch.eye(4).repeat(R.shape[0], 1, 1).to(R.device) T[:, 0:3, 0:3] = R T[:, 0:3, 3] = pos return T def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_force_sensor_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] # Need to update the pose of the cube so that it is represented wrt wrist self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7) self.object_pose_wrt_wrist, self.goal_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose, self.palm_link_pose, self.goal_pose) self.goal_wrt_wrist_rot = self.goal_pose_wrt_wrist[:, 3:7] self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3] if not self.use_adr and self.randomize: update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rates) == 0 self.obs_object_pose_freq[update_freq] = self.object_pose_wrt_wrist[update_freq] # simulate adding delay update_delay = torch.randn(self.num_envs, device=self.device) > self.cube_obs_delay_prob self.obs_object_pose[update_delay] = self.obs_object_pose_freq[update_delay] # increment the frame counter both for manual DR and ADR self.frame += 1 cube_scale = self.cube_random_params[:, 0] cube_scale = cube_scale.reshape(-1, 1) # unscale is [low, upper] -> [-1, 1] self.obs_dict["dof_pos"][:] = unscale(self.dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits) self.obs_dict["dof_vel"][:] = self.dof_vel self.obs_dict["dof_force"][:] = self.force_torque_obs_scale * self.dof_force_tensor self.obs_dict["object_pose"][:] = self.object_pose_wrt_wrist self.obs_dict["object_vels"][:, 0:3] = self.object_linvel self.obs_dict["object_vels"][:, 3:6] = self.vel_obs_scale * self.object_angvel self.obs_dict["goal_pose"][:] = self.goal_pose_wrt_wrist self.obs_dict["goal_relative_rot"][:] = quat_mul(self.object_pose_wrt_wrist[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) # This is only needed for manul DR experiments if not self.use_adr: self.obs_dict["object_pose_cam"][:] = self.obs_object_pose self.obs_dict["goal_relative_rot_cam"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) self.obs_dict["ft_states"][:] = self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips) self.obs_dict["ft_force_torques"][:] = self.force_torque_obs_scale * self.vec_sensor_tensor # wrenches self.obs_dict["rb_forces"] = self.rb_forces[:, self.object_rb_handles, :].view(-1, 3) self.obs_dict["last_actions"][:] = self.actions if self.randomize: self.obs_dict["cube_random_params"][:] = self.cube_random_params self.obs_dict["hand_random_params"][:] = self.hand_random_params self.obs_dict["gravity_vec"][:] = self.gravity_vec quat_diff = quat_mul(self.object_rot, quat_conjugate(self.goal_rot)) self.curr_rotation_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) self.best_rotation_dist = torch.where(self.best_rotation_dist < 0.0, self.curr_rotation_dist, self.best_rotation_dist) # add rotation distances to the observations so that critic could predict the rewards better self.obs_dict["rot_dist"][:, 0] = self.curr_rotation_dist self.obs_dict["rot_dist"][:, 1] = self.best_rotation_dist def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids, apply_reset=False): rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device) if self.apply_random_quat: new_rot = self.get_random_quat(env_ids) else: new_rot = randomize_rotation(rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3] self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.goal_object_indices[env_ids], 7:13]) if apply_reset: goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(goal_object_indices), len(env_ids)) self.reset_goal_buf[env_ids] = 0 # change back to non-initialized state self.best_rotation_dist[env_ids] = -1 def get_relative_rot(self, obj_rot, goal_rot): return quat_mul(obj_rot, quat_conjugate(goal_rot)) def get_random_cube_observation(self, current_cube_pose): ''' This function replaces cube pose in some environments with a random cube pose to simulate noisy perception estimates in the real world. It is also called random cube pose injection. ''' env_ids = np.arange(0, self.num_envs) rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 5), device=self.device) if self.apply_random_quat: new_object_rot = self.get_random_quat(env_ids) else: new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.random_cube_poses[:, 0:2] = self.object_init_state[env_ids, 0:2] +\ 0.5 * rand_floats[:, 0:2] self.random_cube_poses[:, 2] = self.object_init_state[env_ids, 2] + \ 0.5 * rand_floats[:, 2] self.random_cube_poses[:, 3:7] = new_object_rot random_cube_pose_mask = torch.rand(len(env_ids), 1, device=self.device) < self.random_cube_pose_prob current_cube_pose = current_cube_pose * ~random_cube_pose_mask + self.random_cube_poses * random_cube_pose_mask return current_cube_pose def reset_idx(self, env_ids, goal_env_ids): # generate random values rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_dofs * 2 + 5), device=self.device) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone() self.root_state_tensor[self.object_indices[env_ids], 0:2] = self.object_init_state[env_ids, 0:2] + \ self.reset_position_noise * rand_floats[:, 0:2] self.root_state_tensor[self.object_indices[env_ids], self.up_axis_idx] = self.object_init_state[env_ids, self.up_axis_idx] + \ self.reset_position_noise_z * rand_floats[:, self.up_axis_idx] if self.apply_random_quat: new_object_rot = self.get_random_quat(env_ids) else: new_object_rot = randomize_rotation(rand_floats[:, 3], rand_floats[:, 4], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]) self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(self.root_state_tensor[self.object_indices[env_ids], 7:13]) object_indices = torch.unique(torch.cat([self.object_indices[env_ids], self.goal_object_indices[env_ids], self.goal_object_indices[goal_env_ids]]).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(object_indices), len(object_indices)) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1])) # reset allegro hand delta_max = self.hand_dof_upper_limits - self.hand_dof_default_pos delta_min = self.hand_dof_lower_limits - self.hand_dof_default_pos rand_floats_dof_pos = (rand_floats[:, 5:5+self.num_hand_dofs] + 1) / 2 rand_delta = delta_min + (delta_max - delta_min) * rand_floats_dof_pos pos = self.hand_default_dof_pos + self.reset_dof_pos_noise * rand_delta self.dof_pos[env_ids, :] = pos self.dof_vel[env_ids, :] = self.hand_dof_default_vel + \ self.reset_dof_vel_noise * rand_floats[:, 5+self.num_hand_dofs:5+self.num_hand_dofs*2] self.prev_targets[env_ids, :self.num_hand_dofs] = pos self.cur_targets[env_ids, :self.num_hand_dofs] = pos self.prev_prev_targets[env_ids, :self.num_hand_dofs] = pos hand_indices = self.hand_indices[env_ids].to(torch.int32) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids)) # Need to update the pose of the cube so that it is represented wrt wrist self.palm_link_pose = self.rigid_body_states[:, self.palm_link_handle, 0:7].view(-1, 7) self.object_pose_wrt_wrist = self.compute_poses_wrt_wrist(self.object_pose, self.palm_link_pose) # object pose is represented with respect to the wrist self.obs_object_pose[env_ids] = self.object_pose_wrt_wrist[env_ids].clone() self.obs_object_pose_freq[env_ids] = self.object_pose_wrt_wrist[env_ids].clone() if self.use_adr and len(env_ids) == self.num_envs: self.progress_buf = torch.randint(0, self.max_episode_length, size=(self.num_envs,), dtype=torch.long, device=self.device) else: self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 if self.use_adr: self.apply_reset_buf[env_ids] = 0 self.successes[env_ids] = 0 self.best_rotation_dist[env_ids] = -1 self.hold_count_buf[env_ids] = 0 def get_rna_alpha(self): """Function to get RNA alpha value.""" raise NotImplementedError def get_random_network_adversary_action(self, canonical_action): if self.enable_rna: if self.last_step > 0 and self.last_step % self.random_adversary_weight_sample_freq == 0: self.rna_network._refresh() rand_action_softmax = self.rna_network(torch.cat([self.dof_pos, self.object_pose_wrt_wrist], axis=-1)) rand_action_inds = torch.argmax(rand_action_softmax, axis=-1) rand_action_inds = torch.permute(rand_action_inds, (1, 0)) rand_perturbation = torch.gather(self.discretised_dofs, 1, rand_action_inds) rand_perturbation = torch.permute(rand_perturbation, (1, 0)) # unscale it first (normalise it to [-1, 1]) rand_perturbation = unscale(rand_perturbation, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) if not self.use_adr: action_perturb_mask = torch.rand(self.num_envs, 1, device=self.device) < self.action_perturb_prob rand_perturbation = ~action_perturb_mask * canonical_action + action_perturb_mask * rand_perturbation rna_alpha = self.get_rna_alpha() rand_perturbation = rna_alpha * rand_perturbation + (1 - rna_alpha) * canonical_action return rand_perturbation else: return canonical_action def update_action_moving_average(self): # scheduling action moving average if self.last_step > 0 and self.last_step % self.act_moving_average_scheduled_freq == 0: sched_scaling = 1.0 / self.act_moving_average_scheduled_steps * min(self.last_step, self.act_moving_average_scheduled_steps) self.act_moving_average = self.act_moving_average_upper + (self.act_moving_average_lower - self.act_moving_average_upper) * \ sched_scaling print('action moving average: {}'.format(self.act_moving_average)) print('last_step: {}'.format(self.last_step), ' scheduled steps: {}'.format(self.act_moving_average_scheduled_steps)) self.extras['annealing/action_moving_average_scalar'] = self.act_moving_average def pre_physics_step(self, actions): # Anneal action moving average self.update_action_moving_average() env_ids_reset = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) if self.randomize and not self.use_adr: self.apply_randomizations(dr_params=self.randomization_params, randomisation_callback=self.randomisation_callback) elif self.randomize and self.use_adr: # NB - when we are daing ADR, we must calculate the ADR or new DR vals one step BEFORE applying randomisations # this is because reset needs to be applied on the next step for it to take effect env_mask_randomize = (self.reset_buf & ~self.apply_reset_buf).bool() env_ids_reset = self.apply_reset_buf.nonzero(as_tuple=False).flatten() if len(env_mask_randomize.nonzero(as_tuple=False).flatten()) > 0: self.apply_randomizations(dr_params=self.randomization_params, randomize_buf=env_mask_randomize, adr_objective=self.successes, randomisation_callback=self.randomisation_callback) self.apply_reset_buf[env_mask_randomize] = 1 # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(env_ids_reset) == 0: self.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset() elif len(goal_env_ids) > 0: self.reset_target_pose(goal_env_ids) if len(env_ids_reset) > 0: self.reset_idx(env_ids_reset, goal_env_ids) self.apply_actions(actions) self.apply_random_forces() def apply_action_noise_latency(self): return self.actions def apply_actions(self, actions): self.actions = actions.clone().to(self.device) refreshed = self.progress_buf == 0 self.prev_actions_queue[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits, self.hand_dof_upper_limits).view(-1, 1, self.num_actions) # Needed for the first step and every refresh # you don't want to mix with zeros self.prev_actions[refreshed] = unscale(self.dof_pos[refreshed], self.hand_dof_lower_limits, self.hand_dof_upper_limits).view(-1, self.num_actions) # update the actions queue self.prev_actions_queue[:, 1:] = self.prev_actions_queue[:, :-1].detach() self.prev_actions_queue[:, 0, :] = self.actions # apply action delay actions_delayed = self.apply_action_noise_latency() # apply random network adversary actions_delayed = self.get_random_network_adversary_action(actions_delayed) if self.use_relative_control: targets = self.prev_targets[:, self.actuated_dof_indices] + self.hand_dof_speed_scale * self.dt * actions_delayed self.cur_targets[:, self.actuated_dof_indices] = targets elif self.use_capped_dof_control: # This is capping the maximum dof velocity targets = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) delta = targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices] max_dof_delta = self.max_dof_radians_per_second * self.dt * self.control_freq_inv delta = torch.clamp_(delta, -max_dof_delta, max_dof_delta) self.cur_targets[:, self.actuated_dof_indices] = self.prev_targets[:, self.actuated_dof_indices] + delta else: self.cur_targets[:, self.actuated_dof_indices] = scale(actions_delayed, self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) self.cur_targets[:, self.actuated_dof_indices] = self.act_moving_average * self.cur_targets[:,self.actuated_dof_indices] + \ (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices] self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(self.cur_targets[:, self.actuated_dof_indices], self.hand_dof_lower_limits[self.actuated_dof_indices], self.hand_dof_upper_limits[self.actuated_dof_indices]) self.dof_delta = self.cur_targets[:, self.actuated_dof_indices] - self.prev_targets[:, self.actuated_dof_indices] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) self.prev_actions[:] = self.actions.clone() def apply_random_forces(self): """Applies random forces to the object. Forces are applied as in https://arxiv.org/abs/1808.00177 """ if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = torch.randn( self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE) def post_physics_step(self): self.progress_buf += 1 # This is for manual DR so ADR has to be OFF if self.randomize and not self.use_adr: # This buffer is needed for manual DR randomisation self.randomize_buf += 1 self.compute_observations() self.compute_reward(self.actions) # update the previous targets self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] # save and viz dr params changing on the fly self.track_dr_params() if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) for i in range(self.num_envs): targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85]) objectx = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy() objecty = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy() objectz = (self.object_pos[i] + quat_apply(self.object_rot[i], to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy() p0 = self.object_pos[i].cpu().numpy() self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1]) self.gym.add_lines(self.viewer, self.envs[i], 1, [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85]) def track_dr_params(self): ''' Track the parameters you wish to here ''' pass def _read_cfg(self): ''' reads various variables from the config file ''' self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rotRewardScale"] self.action_penalty_scale = self.cfg["env"]["actionPenaltyScale"] self.action_delta_penalty_scale = self.cfg["env"]["actionDeltaPenaltyScale"] self.success_tolerance = self.cfg["env"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations if "max_effort" in self.cfg["env"]: self.max_effort = self.cfg["env"]["max_effort"] else: self.max_effort = 0.35 self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.start_object_pose_dy = self.cfg["env"]["startObjectPoseDY"] self.start_object_pose_dz = self.cfg["env"]["startObjectPoseDZ"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.use_capped_dof_control = self.cfg["env"]["use_capped_dof_control"] self.max_dof_radians_per_second = self.cfg["env"]["max_dof_radians_per_second"] self.num_success_hold_steps = self.cfg["env"].get("num_success_hold_steps", 1) # Moving average related self.act_moving_average_range = self.cfg["env"]["actionsMovingAverage"]["range"] self.act_moving_average_scheduled_steps = self.cfg["env"]["actionsMovingAverage"]["schedule_steps"] self.act_moving_average_scheduled_freq = self.cfg["env"]["actionsMovingAverage"]["schedule_freq"] self.act_moving_average_lower = self.act_moving_average_range[0] self.act_moving_average_upper = self.act_moving_average_range[1] self.act_moving_average = self.act_moving_average_upper # Random cube observation has_random_cube_obs = 'random_cube_observation' in self.cfg["env"] if has_random_cube_obs: self.enable_random_obs = self.cfg["env"]["random_cube_observation"]["enable"] self.random_cube_pose_prob = self.cfg["env"]["random_cube_observation"]["prob"] else: self.enable_random_obs = False # We have two ways to sample quaternions where one of the samplings is biased # If this flag is enabled, the sampling will be UNBIASED self.apply_random_quat = self.cfg['env'].get("apply_random_quat", True) self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.eval_stats_name = self.cfg["env"].get("evalStatsName", '') self.num_eval_frames = self.cfg["env"].get("numEvalFrames", None) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.cube_obs_delay_prob = self.cfg["env"].get("cubeObsDelayProb", 0.0) # Action delay self.action_delay_prob_max = self.cfg["env"]["actionDelayProbMax"] self.action_latency_max = self.cfg["env"]["actionLatencyMax"] self.action_latency_scheduled_steps = self.cfg["env"]["actionLatencyScheduledSteps"] self.frame = 0 self.max_skip_obs = self.cfg["env"].get("maxObjectSkipObs", 1) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block", "egg"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # "block": "urdf/objects/cube_multicolor_sdf.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", } if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) # Random Network Adversary self.enable_rna = "random_network_adversary" in self.cfg["env"] and self.cfg["env"]["random_network_adversary"]["enable"] if self.enable_rna: if "prob" in self.cfg["env"]["random_network_adversary"]: self.action_perturb_prob = self.cfg["env"]["random_network_adversary"]["prob"] # how often we want to resample the weights of the random neural network self.random_adversary_weight_sample_freq = self.cfg["env"]["random_network_adversary"]["weight_sample_freq"] def _init_pre_sim_buffers(self): """Initialise buffers that must be initialised before sim startup.""" # 0 - scale, 1 - mass, 2 - friction self.cube_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device) # 0 - scale self.hand_random_params = torch.zeros((self.cfg["env"]["numEnvs"], 1), dtype=torch.float, device=self.sim_device) self.gravity_vec = torch.zeros((self.cfg["env"]["numEnvs"], 3), dtype=torch.float, device=self.sim_device) def _init_post_sim_buffers(self): """Initialise buffers that must be initialised after sim startup.""" self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time/(control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_hand_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.hand_default_dof_pos = torch.zeros(self.num_hand_dofs, dtype=torch.float, device=self.device) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_hand_dofs] self.dof_pos = self.dof_state[..., 0] self.dof_vel = self.dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs print("Num dofs: ", self.num_dofs) self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.prev_prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.hold_count_buf = self.progress_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) # object observations parameters self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] # buffer storing object poses which are only refreshed every n steps self.obs_object_pose_freq = self.object_pose.clone() # buffer storing object poses with added delay which are only refreshed every n steps self.obs_object_pose = self.object_pose.clone() self.current_object_pose = self.object_pose.clone() self.object_pose_wrt_wrist = torch.zeros_like(self.object_pose) self.object_pose_wrt_wrist[:, 6] = 1.0 self.prev_object_pose = self.object_pose.clone() # inverse refresh rate for each environment self.cube_pose_refresh_rates = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device) self.prev_actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device) # Related to action delay self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], \ self.action_latency_max+1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) # We have action latency MIN and MAX (declared in _read_cfg() function reading from a config file) self.action_latency_min = 1 self.action_latency = torch.randint(0, self.action_latency_min + 1, \ size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device) # tensors for rotation approach reward (-1 stands for not initialized) self.curr_rotation_dist = None self.best_rotation_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.unique_cube_rotations = torch.tensor(unique_cube_rotations_3d(), dtype=torch.float, device=self.device) self.unique_cube_rotations = matrix_to_quaternion(self.unique_cube_rotations) self.num_unique_cube_rotations = self.unique_cube_rotations.shape[0] def randomisation_callback(self, param_name, param_val, env_id=None, actor=None): if param_name == "gravity": self.gravity_vec[:, 0] = param_val.x self.gravity_vec[:, 1] = param_val.y self.gravity_vec[:, 2] = param_val.z elif param_name == "scale" and actor == "object": self.cube_random_params[env_id, 0] = param_val.mean() elif param_name == "mass" and actor == "object": self.cube_random_params[env_id, 1] = np.mean(param_val) elif param_name == "friction" and actor == "object": self.cube_random_params[env_id, 2] = np.mean(param_val) elif param_name == "scale" and actor == "hand": self.hand_random_params[env_id, 0] = param_val.mean() class AllegroHandDextremeADR(AllegroHandDextreme): def _init_pre_sim_buffers(self): super()._init_pre_sim_buffers() """Initialise buffers that must be initialised before sim startup.""" self.cube_pose_refresh_rate = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.zeros(self.cfg["env"]["numEnvs"], device=self.sim_device, dtype=torch.long) # stores previous actions self.prev_actions_queue = torch.zeros(self.cfg["env"]["numEnvs"], self.action_latency_max + 1, self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) # tensors to store random affine transforms self.affine_actions_scaling = torch.ones(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) self.affine_actions_additive = torch.zeros(self.cfg["env"]["numEnvs"], self.cfg["env"]["numActions"], dtype=torch.float, device=self.sim_device) self.affine_cube_pose_scaling = torch.ones(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device) self.affine_cube_pose_additive = torch.zeros(self.cfg["env"]["numEnvs"], 7, dtype=torch.float, device=self.sim_device) self.affine_dof_pos_scaling = torch.ones(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device) self.affine_dof_pos_additive = torch.zeros(self.cfg["env"]["numEnvs"], 16, dtype=torch.float, device=self.sim_device) self.action_latency = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=self.sim_device) def sample_discrete_adr(self, param_name, env_ids): """Samples a discrete value from ADR continuous distribution. Eg, given a parameter with uniform sampling range [0, 0.4] Will sample 0 with 40% probability and 1 with 60% probability. """ adr_value = self.get_adr_tensor(param_name, env_ids=env_ids) continuous_fuzzed = adr_value + (- (torch.rand_like(adr_value) - 0.5)) return continuous_fuzzed.round().long() def sample_gaussian_adr(self, param_name, env_ids, trailing_dim=1): adr_value = self.get_adr_tensor(param_name, env_ids=env_ids).view(-1, 1) nonlinearity = torch.exp(torch.pow(adr_value, 2.)) - 1. stdev = torch.where(adr_value > 0, nonlinearity, torch.zeros_like(adr_value)) return torch.randn(len(env_ids), trailing_dim, device=self.device, dtype=torch.float) * stdev def get_rna_alpha(self): return self.get_adr_tensor('rna_alpha').view(-1, 1) def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None): super().apply_randomizations(dr_params, randomize_buf, adr_objective, randomisation_callback=self.randomisation_callback) randomize_env_ids = randomize_buf.nonzero(as_tuple=False).squeeze(-1) self.action_latency[randomize_env_ids] = self.sample_discrete_adr("action_latency", randomize_env_ids) self.cube_pose_refresh_rate[randomize_env_ids] = self.sample_discrete_adr("cube_pose_refresh_rate", randomize_env_ids) # Nb - code is to generate uniform from 1 to max_skip_obs (inclusive), but cant use # torch.uniform as it doesn't support a different max/min value on each self.cube_pose_refresh_offset[randomize_buf] = \ (torch.rand(randomize_env_ids.shape, device=self.device, dtype=torch.float) \ * (self.cube_pose_refresh_rate[randomize_env_ids].view(-1).float()) - 0.5).round().long() # offset range shifted back by one self.affine_actions_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_action_scaling", randomize_env_ids, trailing_dim=self.num_actions) self.affine_actions_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_action_additive", randomize_env_ids, trailing_dim=self.num_actions) self.affine_cube_pose_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_cube_pose_scaling", randomize_env_ids, trailing_dim=7) self.affine_cube_pose_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_cube_pose_additive", randomize_env_ids, trailing_dim=7) self.affine_dof_pos_scaling[randomize_env_ids] = 1. + self.sample_gaussian_adr("affine_dof_pos_scaling", randomize_env_ids, trailing_dim=16) self.affine_dof_pos_additive[randomize_env_ids] = self.sample_gaussian_adr("affine_dof_pos_additive", randomize_env_ids, trailing_dim=16) def create_sim(self): super().create_sim() # If randomizing, apply once immediately on startup before the fist sim step if self.randomize and self.use_adr: adr_objective = torch.zeros(self.num_envs, dtype=float, device=self.device) if self.use_adr else None apply_rand_ones = torch.ones(self.num_envs, dtype=bool, device=self.device) self.apply_randomizations(self.randomization_params, apply_rand_ones, adr_objective=adr_objective, randomisation_callback=self.randomisation_callback) def apply_action_noise_latency(self): action_delay_mask = (torch.rand(self.num_envs, device=self.device) < self.get_adr_tensor("action_delay_prob")).view(-1, 1) actions = \ self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * ~action_delay_mask \ + self.prev_actions * action_delay_mask white_noise = self.sample_gaussian_adr("affine_action_white", self.all_env_ids, trailing_dim=self.num_actions) actions = self.affine_actions_scaling * actions + self.affine_actions_additive + white_noise return actions def compute_observations(self): super().compute_observations() update_freq = torch.remainder(self.frame + self.cube_pose_refresh_offset, self.cube_pose_refresh_rate) == 0 # get white noise white_noise_pose = self.sample_gaussian_adr("affine_cube_pose_white", self.all_env_ids, trailing_dim=7) # compute noisy object pose as a stochatsic affine transform of actual noisy_object_pose = self.get_random_cube_observation( self.affine_cube_pose_scaling * self.object_pose_wrt_wrist + self.affine_cube_pose_additive + white_noise_pose ) self.obs_object_pose_freq[update_freq] = noisy_object_pose[update_freq] # simulate adding delay cube_obs_delay_prob = self.get_adr_tensor("cube_obs_delay_prob", self.all_env_ids).view(self.num_envs,) update_delay = torch.rand(self.num_envs, device=self.device) < cube_obs_delay_prob # update environments that are NOT delayed self.obs_object_pose[~update_delay] = self.obs_object_pose_freq[~update_delay] white_noise_dof_pos = self.sample_gaussian_adr("affine_dof_pos_white", self.all_env_ids, trailing_dim=16) self.dof_pos_randomized = self.affine_dof_pos_scaling * self.dof_pos + self.affine_dof_pos_additive + white_noise_dof_pos cube_scale = self.cube_random_params[:, 0] cube_scale = cube_scale.reshape(-1, 1) self.obs_dict["dof_pos_randomized"][:] = unscale(self.dof_pos_randomized, self.hand_dof_lower_limits, self.hand_dof_upper_limits) self.obs_dict["object_pose_cam_randomized"][:] = self.obs_object_pose self.obs_dict["goal_relative_rot_cam_randomized"][:] = quat_mul(self.obs_object_pose[:, 3:7], quat_conjugate(self.goal_wrt_wrist_rot)) self.obs_dict["stochastic_delay_params"][:] = torch.stack([ self.get_adr_tensor("cube_obs_delay_prob"), self.cube_pose_refresh_rate.float() / 6.0, self.get_adr_tensor("action_delay_prob"), self.action_latency.float() / 60.0, ], dim=1) self.obs_dict["affine_params"][:] = torch.cat([ self.affine_actions_scaling, self.affine_actions_additive, self.affine_cube_pose_scaling, self.affine_cube_pose_additive, self.affine_dof_pos_scaling, self.affine_dof_pos_additive ], dim=-1) def _read_cfg(self): super()._read_cfg() self.vel_obs_scale = 1.0 # scale factor of velocity based observations self.force_torque_obs_scale = 1.0 # scale factor of velocity based observations return class AllegroHandDextremeManualDR(AllegroHandDextreme): def _init_post_sim_buffers(self): super()._init_post_sim_buffers() # We could potentially update this regularly self.action_delay_prob = self.action_delay_prob_max * \ torch.rand(self.cfg["env"]["numEnvs"], dtype=torch.float, device=self.device) # inverse refresh rate for each environment self.cube_pose_refresh_rate = torch.randint(1, self.max_skip_obs+1, size=(self.num_envs,), device=self.device) # offset so not all the environments have it each time self.cube_pose_refresh_offset = torch.randint(0, self.max_skip_obs, size=(self.num_envs,), device=self.device) def get_num_obs_dict(self, num_dofs=16): return {"dof_pos": num_dofs, "dof_vel": num_dofs, "dof_force": num_dofs, # generalised forces "object_pose": 7, "object_vels": 6, "goal_pose": 7, "goal_relative_rot": 4, "object_pose_cam": 7, "goal_relative_rot_cam": 4, "last_actions": num_dofs, "cube_random_params": 3, "hand_random_params": 1, "gravity_vec": 3, "rot_dist": 2, "ft_states": 13 * self.num_fingertips, # (pos, quat, linvel, angvel) per fingertip "ft_force_torques": 6 * self.num_fingertips, # wrenches } def get_rna_alpha(self): if self.randomize: return torch.rand(self.num_envs, 1, device=self.device) else: return torch.zeros(self.num_envs, 1, device=self.device) def create_sim(self): super().create_sim() # If randomizing, apply once immediately on startup before the fist sim step # ADR has its own create_sim and randomisation is called there with appropriate # inputs if self.randomize and not self.use_adr: self.apply_randomizations(self.randomization_params, randomisation_callback=self.randomisation_callback) def apply_randomizations(self, dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=None): super().apply_randomizations(dr_params, randomize_buf=None, adr_objective=None, randomisation_callback=self.randomisation_callback) def apply_action_noise_latency(self): # anneal action latency if self.randomize: self.cur_action_latency = 1.0 / self.action_latency_scheduled_steps \ * min(self.last_step, self.action_latency_scheduled_steps) self.cur_action_latency = min(max(int(self.cur_action_latency), self.action_latency_min), self.action_latency_max) self.extras['annealing/cur_action_latency_max'] = self.cur_action_latency self.action_latency = torch.randint(0, self.cur_action_latency + 1, \ size=(self.cfg["env"]["numEnvs"],), dtype=torch.long, device=self.device) # probability of not updating the action this step (on top of the delay) action_delay_mask = (torch.rand(self.num_envs, device=self.device) > self.action_delay_prob).view(-1, 1) actions_delayed = \ self.prev_actions_queue[torch.arange(self.prev_actions_queue.shape[0]), self.action_latency] * action_delay_mask \ + self.prev_actions * ~action_delay_mask return actions_delayed def compute_observations(self): super().compute_observations() ##################################################################### ###=========================jit functions=========================### ##################################################################### @torch.jit.script def compute_hand_reward( rew_buf, reset_buf, reset_goal_buf, progress_buf, hold_count_buf, cur_targets, prev_targets, hand_dof_vel, successes, consecutive_successes, max_episode_length: float, object_pos, object_rot, target_pos, target_rot, dist_reward_scale: float, rot_reward_scale: float, rot_eps: float, actions, action_penalty_scale: float, action_delta_penalty_scale: float, #max_velocity: float, success_tolerance: float, reach_goal_bonus: float, fall_dist: float, fall_penalty: float, max_consecutive_successes: int, av_factor: float, num_success_hold_steps: int ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: # Distance from the hand to the object goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 0:3], p=2, dim=-1), max=1.0)) dist_rew = goal_dist * dist_reward_scale rot_rew = 1.0/(torch.abs(rot_dist) + rot_eps) * rot_reward_scale action_penalty = action_penalty_scale * torch.sum(actions ** 2, dim=-1) action_delta_penalty = action_delta_penalty_scale * torch.sum((cur_targets - prev_targets) ** 2, dim=-1) max_velocity = 5.0 #rad/s vel_tolerance = 1.0 velocity_penalty_coef = -0.05 # todo add actions regularization velocity_penalty = velocity_penalty_coef * torch.sum((hand_dof_vel/(max_velocity - vel_tolerance)) ** 2, dim=-1) # Find out which envs hit the goal and update successes count goal_reached = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf) hold_count_buf = torch.where(goal_reached, hold_count_buf + 1, torch.zeros_like(goal_reached)) goal_resets = torch.where(hold_count_buf > num_success_hold_steps, torch.ones_like(reset_goal_buf), reset_goal_buf) successes = successes + goal_resets # Success bonus: orientation is within `success_tolerance` of goal orientation reach_goal_rew = (goal_resets == 1) * reach_goal_bonus # Fall penalty: distance to the goal is larger than a threashold fall_rew = (goal_dist >= fall_dist) * fall_penalty # Check env termination conditions, including maximum success number resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf) if max_consecutive_successes > 0: # Reset progress buffer on goal envs if max_consecutive_successes > 0 progress_buf = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf) resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets) timed_out = progress_buf >= max_episode_length - 1 resets = torch.where(timed_out, torch.ones_like(resets), resets) # Apply penalty for not reaching the goal timeout_rew = timed_out * 0.5 * fall_penalty # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty reward = dist_rew + rot_rew + action_penalty + action_delta_penalty + velocity_penalty + reach_goal_rew + fall_rew + timeout_rew num_resets = torch.sum(resets) finished_cons_successes = torch.sum(successes * resets.float()) cons_successes = torch.where(num_resets > 0, av_factor*finished_cons_successes/num_resets + (1.0 - av_factor)*consecutive_successes, consecutive_successes) return reward, resets, goal_resets, progress_buf, hold_count_buf, successes, cons_successes, \ dist_rew, rot_rew, action_penalty, action_delta_penalty, velocity_penalty, reach_goal_rew, fall_rew, timeout_rew # return individual rewards for visualization @torch.jit.script def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): return quat_mul(quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)) def unique_cube_rotations_3d() -> List[np.ndarray]: """ Returns the list of all possible 90-degree cube rotations in 3D. Based on https://stackoverflow.com/a/70413438/1645784 """ all_rotations = [] for x, y, z in permutations([0, 1, 2]): for sx, sy, sz in itertools.product([-1, 1], repeat=3): rotation_matrix = np.zeros((3, 3)) rotation_matrix[0, x] = sx rotation_matrix[1, y] = sy rotation_matrix[2, z] = sz if np.linalg.det(rotation_matrix) == 1: all_rotations.append(rotation_matrix) return all_rotations
83,095
Python
48.198342
183
0.619592
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/dextreme/adr_vec_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy from typing import Dict, Any, Tuple, List, Set import gym from gym import spaces from isaacgym import gymtorch, gymapi from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \ get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples import torch import numpy as np import operator, random from copy import deepcopy from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr from collections import deque from enum import Enum import sys import abc from abc import ABC from omegaconf import ListConfig class RolloutWorkerModes: ADR_ROLLOUT = 0 # rollout with current ADR params ADR_BOUNDARY = 1 # rollout with params on boundaries of ADR, used to decide whether to expand ranges TEST_ENV = 2 # rollout wit default DR params, used to measure overall success rate. (currently unused) from isaacgymenvs.tasks.base.vec_task import Env, VecTask class EnvDextreme(Env): def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool, use_dict_obs: bool): Env.__init__(self, config, rl_device, sim_device, graphics_device_id, headless) self.use_dict_obs = use_dict_obs if self.use_dict_obs: self.obs_dims = config["env"]["obsDims"] self.obs_space = spaces.Dict( { k: spaces.Box( np.ones(shape=dims) * -np.Inf, np.ones(shape=dims) * np.Inf ) for k, dims in self.obs_dims.items() } ) else: self.num_observations = config["env"]["numObservations"] self.num_states = config["env"].get("numStates", 0) self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf) self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf) def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return None def set_env_state(self, env_state): pass class VecTaskDextreme(EnvDextreme, VecTask): def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False): """Initialise the `VecTask`. Args: config: config dictionary for the environment. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. """ EnvDextreme.__init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs) self.sim_params = self._VecTask__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"]) if self.cfg["physics_engine"] == "physx": self.physics_engine = gymapi.SIM_PHYSX elif self.cfg["physics_engine"] == "flex": self.physics_engine = gymapi.SIM_FLEX else: msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}" raise ValueError(msg) self.virtual_display = None # optimization flags for pytorch JIT torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) self.gym = gymapi.acquire_gym() self.first_randomization = True self.randomize = self.cfg["task"]["randomize"] self.randomize_obs_builtin = "observations" in self.cfg["task"].get("randomization_params", {}) self.randomize_act_builtin = "actions" in self.cfg["task"].get("randomization_params", {}) self.randomized_suffix = "randomized" if self.use_dict_obs and self.randomize and self.randomize_obs_builtin: self.randomisation_obs = set(self.obs_space.keys()).intersection(set(self.randomization_params['observations'].keys())) for obs_name in self.randomisation_obs: self.obs_space[f"{obs_name}_{self.randomized_suffix}"] = self.obs_space[obs_name] self.obs_dims[f"{obs_name}_{self.randomized_suffix}"] = self.obs_dims[obs_name] self.obs_randomizations = {} elif self.randomize_obs_builtin: self.obs_randomizations = None self.action_randomizations = None self.original_props = {} self.actor_params_generator = None self.extern_actor_params = {} self.last_step = -1 self.last_rand_step = -1 for env_id in range(self.num_envs): self.extern_actor_params[env_id] = None # create envs, sim and viewer self.sim_initialized = False self.create_sim() self.gym.prepare_sim(self.sim) self.sim_initialized = True self.set_viewer() self.allocate_buffers() def allocate_buffers(self): """Allocate the observation, states, etc. buffers. These are what is used to set observations and states in the environment classes which inherit from this one, and are read in `step` and other related functions. """ # allocate buffers if self.use_dict_obs: self.obs_dict = { k: torch.zeros( (self.num_envs, *dims), device=self.device, dtype=torch.float ) for k, dims in self.obs_dims.items() } print("Obs dictinary: ") print(self.obs_dims) # print(self.obs_dict) for k, dims in self.obs_dims.items(): print("1") print(dims) self.obs_dict_repeat = { k: torch.zeros( (self.num_envs, *dims), device=self.device, dtype=torch.float ) for k, dims in self.obs_dims.items() } else: self.obs_dict = {} self.obs_buf = torch.zeros( (self.num_envs, self.num_obs), device=self.device, dtype=torch.float) self.states_buf = torch.zeros( (self.num_envs, self.num_states), device=self.device, dtype=torch.float) self.rew_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.float) self.reset_buf = torch.ones( self.num_envs, device=self.device, dtype=torch.long) self.timeout_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.progress_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.randomize_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.extras = {} def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams): """Create an Isaac Gym sim object. Args: compute_device: ID of compute device to use. graphics_device: ID of graphics device to use. physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`) sim_params: sim params to use. Returns: the Isaac Gym sim object. """ sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params) if sim is None: print("*** Failed to create sim") quit() return sim def get_state(self): """Returns the state buffer of the environment (the priviledged observations for asymmetric training).""" if self.use_dict_obs: raise NotImplementedError("No states in vec task when `use_dict_obs=True`") return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) @abc.abstractmethod def pre_physics_step(self, actions: torch.Tensor): """Apply the actions to the environment (eg by setting torques, position targets). Args: actions: the actions to apply """ @abc.abstractmethod def post_physics_step(self): """Compute reward and observations, reset any environments that require it.""" def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ # randomize actions if self.action_randomizations is not None and self.randomize_act_builtin: actions = self.action_randomizations['noise_lambda'](actions) action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions) # apply actions self.pre_physics_step(action_tensor) # step physics and render each frame for i in range(self.control_freq_inv): self.render() self.gym.simulate(self.sim) if self.device == 'cpu': self.gym.fetch_results(self.sim, True) # compute observations, rewards, resets, ... self.post_physics_step() # fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1. self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0) # randomize observations # cannot randomise in the env because of missing suffix in the observation dict if self.randomize and self.randomize_obs_builtin and self.use_dict_obs and len(self.obs_randomizations) > 0: for obs_name, v in self.obs_randomizations.items(): self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] = v['noise_lambda'](self.obs_dict[obs_name]) # Random cube pose if hasattr(self, 'enable_random_obs') and self.enable_random_obs and obs_name == 'object_pose_cam': self.obs_dict[f"{obs_name}_{self.randomized_suffix}"] \ = self.get_random_cube_observation(self.obs_dict[f"{obs_name}_{self.randomized_suffix}"]) if hasattr(self, 'enable_random_obs') and self.enable_random_obs: relative_rot = self.get_relative_rot(self.obs_dict['object_pose_cam_'+ self.randomized_suffix][:, 3:7], self.obs_dict['goal_pose'][:, 3:7]) v = self.obs_randomizations['goal_relative_rot_cam'] self.obs_dict["goal_relative_rot_cam_" + self.randomized_suffix] = v['noise_lambda'](relative_rot) elif self.randomize and self.randomize_obs_builtin and not self.use_dict_obs and self.obs_randomizations is not None: self.obs_buf = self.obs_randomizations['noise_lambda'](self.obs_buf) self.extras["time_outs"] = self.timeout_buf.to(self.rl_device) if self.use_dict_obs: obs_dict_ret = { k: torch.clone(torch.clamp(t, -self.clip_obs, self.clip_obs)).to( self.rl_device ) for k, t in self.obs_dict.items() } return obs_dict_ret, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras else: self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() # step the simulator self.step(zero_actions) if self.use_dict_obs: obs_dict_ret = { k: torch.clone( torch.clamp(t, -self.clip_obs, self.clip_obs).to(self.rl_device) ) for k, t in self.obs_dict.items() } return obs_dict_ret else: self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict """ Domain Randomization methods """ def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ if self.use_adr: return dict(adr_params=self.adr_params) else: return {} def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): if key == "adr_params" and self.use_adr and not self.adr_load_from_checkpoint: print("Skipping loading ADR params from checkpoint...") continue value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f'Loaded env state value {key}:{value}') if self.use_adr: print(f'ADR Params after loading from checkpoint: {self.adr_params}') def get_randomization_dict(self, dr_params, obs_shape): dist = dr_params["distribution"] op_type = dr_params["operation"] sched_type = dr_params["schedule"] if "schedule" in dr_params else None sched_step = dr_params["schedule_steps"] if "schedule" in dr_params else None op = operator.add if op_type == 'additive' else operator.mul if not self.use_adr: apply_white_noise_prob = dr_params.get("apply_white_noise", 0.5) if sched_type == 'linear': sched_scaling = 1.0 / sched_step * \ min(self.last_step, sched_step) elif sched_type == 'constant': sched_scaling = 0 if self.last_step < sched_step else 1 else: sched_scaling = 1 if dist == 'gaussian': mu, var = dr_params["range"] mu_corr, var_corr = dr_params.get("range_correlated", [0., 0.]) if op_type == 'additive': mu *= sched_scaling var *= sched_scaling mu_corr *= sched_scaling var_corr *= sched_scaling elif op_type == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate var_corr = var_corr * sched_scaling # scale up var over time mu_corr = mu_corr * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate local_params = { 'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'corr': torch.randn(self.num_envs, *obs_shape, device=self.device) } if not self.use_adr: local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float() def noise_lambda(tensor, params=local_params): corr = local_params['corr'] corr = corr * params['var_corr'] + params['mu_corr'] if self.use_adr: return op( tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu']) else: return op( tensor, corr + torch.randn_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * params['var'] + params['mu']) elif dist == 'uniform': lo, hi = dr_params["range"] lo_corr, hi_corr = dr_params.get("range_correlated", [0., 0.]) if op_type == 'additive': lo *= sched_scaling hi *= sched_scaling lo_corr *= sched_scaling hi_corr *= sched_scaling elif op_type == 'scaling': lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling) hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling) lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) local_params = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'corr': torch.rand(self.num_envs, *obs_shape, device=self.device) } if not self.use_adr: local_params['apply_white_noise_mask'] = (torch.rand(self.num_envs, device=self.device) < apply_white_noise_prob).float() def noise_lambda(tensor, params=local_params): corr = params['corr'] corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr'] if self.use_adr: return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo']) else: return op(tensor, corr + torch.rand_like(tensor) * params['apply_white_noise_mask'].view(-1, 1) * (params['hi'] - params['lo']) + params['lo']) else: raise NotImplementedError # return {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda} return {'noise_lambda': noise_lambda, 'corr_val': local_params['corr']} class ADRVecTask(VecTaskDextreme): def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=False): self.adr_cfg = self.cfg["task"].get("adr", {}) self.use_adr = self.adr_cfg.get("use_adr", False) self.all_env_ids = torch.tensor(list(range(self.cfg["env"]["numEnvs"])), dtype=torch.long, device=sim_device) if self.use_adr: self.worker_adr_boundary_fraction = self.adr_cfg["worker_adr_boundary_fraction"] self.adr_queue_threshold_length = self.adr_cfg["adr_queue_threshold_length"] self.adr_objective_threshold_low = self.adr_cfg["adr_objective_threshold_low"] self.adr_objective_threshold_high = self.adr_cfg["adr_objective_threshold_high"] self.adr_extended_boundary_sample = self.adr_cfg["adr_extended_boundary_sample"] self.adr_rollout_perf_alpha = self.adr_cfg["adr_rollout_perf_alpha"] self.update_adr_ranges = self.adr_cfg["update_adr_ranges"] self.adr_clear_other_queues = self.adr_cfg["clear_other_queues"] self.adr_rollout_perf_last = None self.adr_load_from_checkpoint = self.adr_cfg["adr_load_from_checkpoint"] assert self.randomize, "Worker mode currently only supported when Domain Randomization is turned on" # 0 = rollout worker # 1 = ADR worker (see https://arxiv.org/pdf/1910.07113.pdf Section 5) # 2 = eval worker # rollout type is selected when an environment gets randomized self.worker_types = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device) self.adr_tensor_values = {} self.adr_params = self.adr_cfg["params"] self.adr_params_keys = list(self.adr_params.keys()) # list of params which rely on patching the built in domain randomisation self.adr_params_builtin_keys = [] for k in self.adr_params: self.adr_params[k]["range"] = self.adr_params[k]["init_range"] if "limits" not in self.adr_params[k]: self.adr_params[k]["limits"] = [None, None] if "delta_style" in self.adr_params[k]: assert self.adr_params[k]["delta_style"] in ["additive", "multiplicative"] else: self.adr_params[k]["delta_style"] = "additive" if "range_path" in self.adr_params[k]: self.adr_params_builtin_keys.append(k) else: # normal tensorised ADR param param_type = self.adr_params[k].get("type", "uniform") dtype = torch.long if param_type == "categorical" else torch.float self.adr_tensor_values[k] = torch.zeros(self.cfg["env"]["numEnvs"], device=sim_device, dtype=dtype) self.num_adr_params = len(self.adr_params) # modes for ADR workers. # there are 2n modes, where mode 2n is lower range and mode 2n+1 is upper range for DR parameter n self.adr_modes = torch.zeros(self.cfg["env"]["numEnvs"], dtype=torch.long, device=sim_device) self.adr_objective_queues = [deque(maxlen=self.adr_queue_threshold_length) for _ in range(2*self.num_adr_params)] super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs=use_dict_obs) def get_current_adr_params(self, dr_params): """Splices the current ADR parameters into the requried ranges""" current_adr_params = copy.deepcopy(dr_params) for k in self.adr_params_builtin_keys: nested_dict_set_attr(current_adr_params, self.adr_params[k]["range_path"], self.adr_params[k]["range"]) return current_adr_params def get_dr_params_by_env_id(self, env_id, default_dr_params, current_adr_params): """Returns the (dictionary) DR params for a particular env ID. (only applies to env randomisations, for tensor randomisations see `sample_adr_tensor`.) Params: env_id: which env ID to get the dict for. default_dr_params: environment default DR params. current_adr_params: current dictionary of DR params with current ADR ranges patched in. Returns: a patched dictionary with the env randomisations corresponding to the env ID. """ env_type = self.worker_types[env_id] if env_type == RolloutWorkerModes.ADR_ROLLOUT: # rollout worker, uses current ADR params return current_adr_params elif env_type == RolloutWorkerModes.ADR_BOUNDARY: # ADR worker, substitute upper or lower bound as entire range for this env adr_mode = int(self.adr_modes[env_id]) env_adr_params = copy.deepcopy(current_adr_params) adr_id = adr_mode // 2 # which adr parameter adr_bound = adr_mode % 2 # 0 = lower, 1 = upper param_name = self.adr_params_keys[adr_id] # this DR parameter is randomised as a tensor not through normal DR api # if not "range_path" in self.adr_params[self.adr_params_keys[adr_id]]: if not param_name in self.adr_params_builtin_keys: return env_adr_params if self.adr_extended_boundary_sample: boundary_value = self.adr_params[param_name]["next_limits"][adr_bound] else: boundary_value = self.adr_params[param_name]["range"][adr_bound] new_range = [boundary_value, boundary_value] nested_dict_set_attr(env_adr_params, self.adr_params[param_name]["range_path"], new_range) return env_adr_params elif env_type == RolloutWorkerModes.TEST_ENV: # eval worker, uses default fixed params return default_dr_params else: raise NotImplementedError def modify_adr_param(self, param, direction, adr_param_dict, param_limit=None): """Modify an ADR param. Args: param: current value of the param. direction: what direction to move the ADR parameter ('up' or 'down') adr_param_dict: dictionary of ADR parameter, used to read delta and method of applying delta param_limit: limit of the parameter (upper bound for 'up' and lower bound for 'down' mode) Returns: whether the param was updated """ op = adr_param_dict["delta_style"] delta = adr_param_dict["delta"] if direction == 'up': if op == "additive": new_val = param + delta elif op == "multiplicative": assert delta > 1.0, "Must have delta>1 for multiplicative ADR update." new_val = param * delta else: raise NotImplementedError if param_limit is not None: new_val = min(new_val, param_limit) changed = abs(new_val - param) > 1e-9 return new_val, changed elif direction == 'down': if op == "additive": new_val = param - delta elif op == "multiplicative": assert delta > 1.0, "Must have delta>1 for multiplicative ADR update." new_val = param / delta else: raise NotImplementedError if param_limit is not None: new_val = max(new_val, param_limit) changed = abs(new_val - param) > 1e-9 return new_val, changed else: raise NotImplementedError @staticmethod def env_ids_from_mask(mask): return torch.nonzero(mask, as_tuple=False).squeeze(-1) def sample_adr_tensor(self, param_name, env_ids=None): """Samples the values for a particular ADR parameter as a tensor. Sets the value as a side-effect in the dictionary of current adr tensors. Args: param_name: name of the parameter to sample env_ids: env ids to sample Returns: (len(env_ids), tensor_dim) tensor of sampled parameter values, where tensor_dim is the trailing dimension of the generated tensor as specifide in the ADR conifg """ if env_ids is None: env_ids = self.all_env_ids sample_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) sample_mask[env_ids] = True params = self.adr_params[param_name] param_range = params["range"] next_limits = params.get("next_limits", None) param_type = params.get("type", "uniform") n = self.adr_params_keys.index(param_name) low_idx = 2*n high_idx = 2*n + 1 adr_workers_low_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) & sample_mask adr_workers_high_mask = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) & sample_mask rollout_workers_mask = (~adr_workers_low_mask) & (~adr_workers_high_mask) & sample_mask rollout_workers_env_ids = self.env_ids_from_mask(rollout_workers_mask) if param_type == "uniform": result = torch.zeros((len(env_ids),), device=self.device, dtype=torch.float) uniform_noise_rollout_workers = \ torch.rand((rollout_workers_env_ids.shape[0],), device=self.device, dtype=torch.float) \ * (param_range[1] - param_range[0]) + param_range[0] result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers if self.adr_extended_boundary_sample: result[adr_workers_low_mask[env_ids]] = next_limits[0] result[adr_workers_high_mask[env_ids]] = next_limits[1] else: result[adr_workers_low_mask[env_ids]] = param_range[0] result[adr_workers_high_mask[env_ids]] = param_range[1] elif param_type == "categorical": result = torch.zeros((len(env_ids), ), device=self.device, dtype=torch.long) uniform_noise_rollout_workers = torch.randint(int(param_range[0]), int(param_range[1])+1, size=(rollout_workers_env_ids.shape[0], ), device=self.device) result[rollout_workers_mask[env_ids]] = uniform_noise_rollout_workers result[adr_workers_low_mask[env_ids]] = int(next_limits[0] if self.adr_extended_boundary_sample else param_range[0]) result[adr_workers_high_mask[env_ids]] = int(next_limits[1] if self.adr_extended_boundary_sample else param_range[1]) else: raise NotImplementedError(f"Unknown distribution type {param_type}") self.adr_tensor_values[param_name][env_ids] = result return result def get_adr_tensor(self, param_name, env_ids=None): """Returns the current value of an ADR tensor. """ if env_ids is None: return self.adr_tensor_values[param_name] else: return self.adr_tensor_values[param_name][env_ids] def recycle_envs(self, recycle_envs): """Recycle the workers that have finished their episodes or to be reassigned etc. Args: recycle_envs: env_ids of environments to be recycled """ worker_types_rand = torch.rand(len(recycle_envs), device=self.device, dtype=torch.float) new_worker_types = torch.zeros(len(recycle_envs), device=self.device, dtype=torch.long) # Choose new types for wokrers new_worker_types[(worker_types_rand < self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_ROLLOUT new_worker_types[(worker_types_rand >= self.worker_adr_boundary_fraction)] = RolloutWorkerModes.ADR_BOUNDARY self.worker_types[recycle_envs] = new_worker_types # resample the ADR modes (which boundary values to sample) for the given environments (only applies to ADR_BOUNDARY mode) self.adr_modes[recycle_envs] = torch.randint(0, self.num_adr_params * 2, (len(recycle_envs),), dtype=torch.long, device=self.device) def adr_update(self, rand_envs, adr_objective): """Performs ADR update step (implements algorithm 1 from https://arxiv.org/pdf/1910.07113.pdf). """ rand_env_mask = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) rand_env_mask[rand_envs] = True total_nats = 0.0 # measuring entropy if self.update_adr_ranges: adr_params_iter = list(enumerate(self.adr_params)) random.shuffle(adr_params_iter) # only recycle once already_recycled = False for n, adr_param_name in adr_params_iter: # mode index for environments evaluating lower ADR bound low_idx = 2*n # mode index for environments evaluating upper ADR bound high_idx = 2*n+1 adr_workers_low = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == low_idx) adr_workers_high = (self.worker_types == RolloutWorkerModes.ADR_BOUNDARY) & (self.adr_modes == high_idx) # environments which will be evaluated for ADR (finished the episode) and which are evaluating performance at the # lower and upper boundaries adr_done_low = rand_env_mask & adr_workers_low adr_done_high = rand_env_mask & adr_workers_high # objective value at environments which have been evaluating the lower bound of ADR param n objective_low_bounds = adr_objective[adr_done_low] # objective value at environments which have been evaluating the upper bound of ADR param n objective_high_bounds = adr_objective[adr_done_high] # add the success of objectives to queues self.adr_objective_queues[low_idx].extend(objective_low_bounds.cpu().numpy().tolist()) self.adr_objective_queues[high_idx].extend(objective_high_bounds.cpu().numpy().tolist()) low_queue = self.adr_objective_queues[low_idx] high_queue = self.adr_objective_queues[high_idx] mean_low = np.mean(low_queue) if len(low_queue) > 0 else 0. mean_high = np.mean(high_queue) if len(high_queue) > 0 else 0. current_range = self.adr_params[adr_param_name]["range"] range_lower = current_range[0] range_upper = current_range[1] range_limits = self.adr_params[adr_param_name]["limits"] init_range = self.adr_params[adr_param_name]["init_range"] # one step beyond the current ADR values [next_limit_lower, next_limit_upper] = self.adr_params[adr_param_name].get("next_limits", [None, None]) changed_low, changed_high = False, False if len(low_queue) >= self.adr_queue_threshold_length: changed_low = False if mean_low < self.adr_objective_threshold_low: # increase lower bound range_lower, changed_low = self.modify_adr_param( range_lower, 'up', self.adr_params[adr_param_name], param_limit=init_range[0] ) elif mean_low > self.adr_objective_threshold_high: # reduce lower bound range_lower, changed_low = self.modify_adr_param( range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0] ) # if the ADR boundary is changed, workers working from the old paremeters become invalid. # Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary if changed_low: print(f'Changing {adr_param_name} lower bound. Queue length {len(self.adr_objective_queues[low_idx])}. Mean perf: {mean_low}. Old val: {current_range[0]}. New val: {range_lower}') self.adr_objective_queues[low_idx].clear() self.worker_types[adr_workers_low] = RolloutWorkerModes.ADR_ROLLOUT if len(high_queue) >= self.adr_queue_threshold_length: if mean_high < self.adr_objective_threshold_low: # reduce upper bound range_upper, changed_high = self.modify_adr_param( range_upper, 'down', self.adr_params[adr_param_name], param_limit=init_range[1] ) elif mean_high > self.adr_objective_threshold_high: # increase upper bound range_upper, changed_high = self.modify_adr_param( range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1] ) # if the ADR boundary is changed, workers working from the old paremeters become invalid. # Therefore, while we use the data from them to train, we can no longer use them to evaluate DR at the boundary if changed_high: print(f'Changing upper bound {adr_param_name}. Queue length {len(self.adr_objective_queues[high_idx])}. Mean perf {mean_high}. Old val: {current_range[1]}. New val: {range_upper}') self.adr_objective_queues[high_idx].clear() self.worker_types[adr_workers_high] = RolloutWorkerModes.ADR_ROLLOUT if changed_low or next_limit_lower is None: next_limit_lower, _ = self.modify_adr_param(range_lower, 'down', self.adr_params[adr_param_name], param_limit=range_limits[0]) if changed_high or next_limit_upper is None: next_limit_upper, _ = self.modify_adr_param(range_upper, 'up', self.adr_params[adr_param_name], param_limit=range_limits[1]) self.adr_params[adr_param_name]["range"] = [range_lower, range_upper] if not self.adr_params[adr_param_name]["delta"] < 1e-9: # disabled upper_lower_delta = range_upper - range_lower if upper_lower_delta < 1e-3: upper_lower_delta = 1e-3 nats = np.log(upper_lower_delta) total_nats += nats # print(f'nats {nats} delta {upper_lower_delta} range lower {range_lower} range upper {range_upper}') self.adr_params[adr_param_name]["next_limits"] = [next_limit_lower, next_limit_upper] if hasattr(self, 'extras') and ((changed_high or changed_low) or self.last_step % 100 == 0): # only log so often to prevent huge log files with ADR vars self.extras[f'adr/params/{adr_param_name}/lower'] = range_lower self.extras[f'adr/params/{adr_param_name}/upper'] = range_upper self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/value'] = mean_low self.extras[f'adr/objective_perf/boundary/{adr_param_name}/lower/queue_len'] = len(low_queue) self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/value'] = mean_high self.extras[f'adr/objective_perf/boundary/{adr_param_name}/upper/queue_len'] = len(high_queue) if self.adr_clear_other_queues and (changed_low or changed_high): for q in self.adr_objective_queues: q.clear() recycle_envs = torch.nonzero((self.worker_types == RolloutWorkerModes.ADR_BOUNDARY), as_tuple=False).squeeze(-1) self.recycle_envs(recycle_envs) already_recycled = True break if hasattr(self, 'extras') and self.last_step % 100 == 0: # only log so often to prevent huge log files with ADR vars mean_perf = adr_objective[rand_env_mask & (self.worker_types == RolloutWorkerModes.ADR_ROLLOUT)].mean() if self.adr_rollout_perf_last is None: self.adr_rollout_perf_last = mean_perf else: self.adr_rollout_perf_last = self.adr_rollout_perf_last * self.adr_rollout_perf_alpha + mean_perf * (1-self.adr_rollout_perf_alpha) self.extras[f'adr/objective_perf/rollouts'] = self.adr_rollout_perf_last self.extras[f'adr/npd'] = total_nats / len(self.adr_params) if not already_recycled: self.recycle_envs(rand_envs) else: self.worker_types[rand_envs] = RolloutWorkerModes.ADR_ROLLOUT # ensure tensors get re-sampled before new episode for k in self.adr_tensor_values: self.sample_adr_tensor(k, rand_envs) def apply_randomizations(self, dr_params, randomize_buf, adr_objective=None, randomisation_callback=None): """Apply domain randomizations to the environment. Note that currently we can only apply randomizations only on resets, due to current PhysX limitations Args: dr_params: parameters for domain randomization to use. randomize_buf: selective randomisation of environments adr_objective: consecutive successes scalar randomisation_callback: callbacks we may want to use from the environment class """ # If we don't have a randomization frequency, randomize every step rand_freq = dr_params.get("frequency", 1) # First, determine what to randomize: # - non-environment parameters when > frequency steps have passed since the last non-environment # - physical environments in the reset buffer, which have exceeded the randomization frequency threshold # - on the first call, randomize everything self.last_step = self.gym.get_frame_count(self.sim) # for ADR if self.use_adr: if self.first_randomization: adr_env_ids = list(range(self.num_envs)) else: adr_env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist() self.adr_update(adr_env_ids, adr_objective) current_adr_params = self.get_current_adr_params(dr_params) if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq env_ids = torch.nonzero(randomize_buf, as_tuple=False).squeeze(-1).tolist() if do_nonenv_randomize: self.last_rand_step = self.last_step # For Manual DR if not self.use_adr: if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: # randomise if the number of steps since the last randomization is greater than the randomization frequency do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf)) rand_envs = torch.logical_and(rand_envs, self.reset_buf) env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist() self.randomize_buf[rand_envs] = 0 if do_nonenv_randomize: self.last_rand_step = self.last_step # We don't use it for ADR(!) if self.randomize_act_builtin: self.action_randomizations = self.get_randomization_dict(dr_params['actions'], (self.num_actions,)) if self.use_dict_obs and self.randomize_obs_builtin: for nonphysical_param in self.randomisation_obs: self.obs_randomizations[nonphysical_param] = self.get_randomization_dict(dr_params['observations'][nonphysical_param], self.obs_space[nonphysical_param].shape) elif self.randomize_obs_builtin: self.observation_randomizations = self.get_randomization_dict(dr_params['observations'], self.obs_space.shape) param_setters_map = get_property_setter_map(self.gym) param_setter_defaults_map = get_default_setter_args(self.gym) param_getters_map = get_property_getter_map(self.gym) # On first iteration, check the number of buckets if self.first_randomization: check_buckets(self.gym, self.envs, dr_params) # Randomize non-environment parameters e.g. gravity, timestep, rest_offset etc. if "sim_params" in dr_params and do_nonenv_randomize: prop_attrs = dr_params["sim_params"] prop = self.gym.get_sim_params(self.sim) # Get the list of original paramters set in the yaml and we do add/scale # on these values if self.first_randomization: self.original_props["sim_params"] = { attr: getattr(prop, attr) for attr in dir(prop)} # Get prop attrs randomised by add/scale of the original_props values # attr is [gravity, reset_offset, ... ] # attr_randomization_params can be {'range': [0, 0.5], 'operation': 'additive', 'distribution': 'gaussian'} # therefore, prop.val = original_val <operator> random sample # where operator is add/mul for attr, attr_randomization_params in prop_attrs.items(): apply_random_samples( prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step) if attr == "gravity": randomisation_callback('gravity', prop.gravity) # Randomize physical environments # if self.last_step % 10 == 0 and self.last_step > 0: # print('random rest offset = ', prop.physx.rest_offset) self.gym.set_sim_params(self.sim, prop) # If self.actor_params_generator is initialized: use it to # sample actor simulation params. This gives users the # freedom to generate samples from arbitrary distributions, # e.g. use full-covariance distributions instead of the DR's # default of treating each simulation parameter independently. extern_offsets = {} if self.actor_params_generator is not None: for env_id in env_ids: self.extern_actor_params[env_id] = \ self.actor_params_generator.sample() extern_offsets[env_id] = 0 # randomise all attributes of each actor (hand, cube etc..) # actor_properties are (stiffness, damping etc..) # Loop over envs, then loop over actors, then loop over their props # and lastly loop over the ranges of the params for i_, env_id in enumerate(env_ids): if self.use_adr: # need to generate a custom dictionary for ADR parameters env_dr_params = self.get_dr_params_by_env_id(env_id, dr_params, current_adr_params) else: env_dr_params = dr_params for actor, actor_properties in env_dr_params["actor_params"].items(): if self.first_randomization and i_ % 1000 == 0: print(f'Initializing domain randomization for {actor} env={i_}') env = self.envs[env_id] handle = self.gym.find_actor_handle(env, actor) extern_sample = self.extern_actor_params[env_id] # randomise dof_props, rigid_body, rigid_shape properties # all obtained from the YAML file # EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties # prop_attrs: # {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'} # {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'} for prop_name, prop_attrs in actor_properties.items(): # These properties are to do with whole obj mesh related if prop_name == 'color': num_bodies = self.gym.get_actor_rigid_body_count( env, handle) for n in range(num_bodies): self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL, gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))) continue if prop_name == 'scale': setup_only = prop_attrs.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: attr_randomization_params = prop_attrs sample = generate_random_samples(attr_randomization_params, 1, self.last_step, None) og_scale = 1 if attr_randomization_params['operation'] == 'scaling': new_scale = og_scale * sample elif attr_randomization_params['operation'] == 'additive': new_scale = og_scale + sample self.gym.set_actor_scale(env, handle, new_scale) if hasattr(self, 'cube_random_params') and actor == 'object': randomisation_callback('scale', new_scale, actor=actor, env_id=env_id) if hasattr(self, 'hand_random_params') and actor == 'object': self.hand_random_params[env_id, 0] = new_scale.mean() continue # Get the properties from the sim API # prop_names is dof_properties, rigid_body_properties, rigid_shape_properties prop = param_getters_map[prop_name](env, handle) set_random_properties = True # if list it is likely to be # - rigid_body_properties # - rigid_shape_properties if isinstance(prop, list): # Read the original values; remember that # randomised_prop_val = original_prop_val <operator> random sample if self.first_randomization: self.original_props[prop_name] = [ {attr: getattr(p, attr) for attr in dir(p)} for p in prop] # # list to record value of attr for each body. # recorded_attrs = {"mass": [], "friction": []} # Loop over all the rigid bodies of the actor and then the corresponding # attribute ranges for attr, attr_randomization_params_cfg in prop_attrs.items(): # for curr_prop, og_p in zip(prop, self.original_props[prop_name]): for body_idx, (p, og_p) in enumerate(zip(prop, self.original_props[prop_name])): curr_prop = p if self.use_adr and isinstance(attr_randomization_params_cfg['range'], dict): # we have custom ranges for different bodies in this actor # first: let's find out which group of bodies this body belongs to body_group_name = None for group_name, list_of_bodies in self.custom_body_handles[actor].items(): if body_idx in list_of_bodies: body_group_name = group_name break if body_group_name is None: raise ValueError( f'Could not find body group for body {body_idx} in actor {actor}.\n' f'Body groups: {self.custom_body_handles}', ) # now: get the range for this body group rand_range = attr_randomization_params_cfg['range'][body_group_name] attr_randomization_params = copy.deepcopy(attr_randomization_params_cfg) attr_randomization_params['range'] = rand_range # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = copy.deepcopy(dr_params['actor_params'][actor][prop_name][attr]) original_randomization_params['range'] = original_randomization_params['range'][body_group_name] else: attr_randomization_params = attr_randomization_params_cfg # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = dr_params['actor_params'][actor][prop_name][attr] assert isinstance(attr_randomization_params['range'], (list, tuple, ListConfig)), \ f'range for {prop_name} must be a list or tuple, got {attr_randomization_params["range"]}' # attrs: # if rigid_body_properties, it is mass # if rigid_shape_properties it is friction etc. setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], curr_prop, attr) # generate the samples and add them to props # e.g. curr_prop is rigid_body_properties # attr is 'mass' (string) # mass_val = getattr(curr_prop, 'mass') # new_mass_val = mass_val <operator> sample # setattr(curr_prop, 'mass', new_mass_val) apply_random_samples( curr_prop, og_p, attr, attr_randomization_params, self.last_step, smpl, bucketing_randomization_params=original_randomization_params) # if attr in recorded_attrs: # recorded_attrs[attr] = getattr(curr_prop, attr) if hasattr(self, 'cube_random_params') and actor == 'object': assert len(self.original_props[prop_name]) == 1 if attr == 'mass': self.cube_random_params[env_id, 1] = p.mass elif attr == 'friction': self.cube_random_params[env_id, 2] = p.friction else: set_random_properties = False # # call the callback with the list of attr values that have just been set (for each rigid body / shape in the actor) # for attr, val_list in recorded_attrs.items(): # randomisation_callback(attr, val_list, actor=actor, env_id=env_id) # if it is not a list, it is likely an array # which means it is for dof_properties else: # prop_name is e.g. dof_properties with corresponding meta-data if self.first_randomization: self.original_props[prop_name] = deepcopy(prop) # attrs is damping, stiffness etc. # attrs_randomisation_params is range, distr, schedule for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], prop, attr) # we need to sore original params as ADR generated samples need to be bucketed original_randomization_params = dr_params['actor_params'][actor][prop_name][attr] # generate random samples and add them to props # and we set the props back in sim later on apply_random_samples( prop, self.original_props[prop_name], attr, attr_randomization_params, self.last_step, smpl, bucketing_randomization_params=original_randomization_params) else: set_random_properties = False if set_random_properties: setter = param_setters_map[prop_name] default_args = param_setter_defaults_map[prop_name] setter(env, handle, prop, *default_args) if self.actor_params_generator is not None: for env_id in env_ids: # check that we used all dims in sample if extern_offsets[env_id] > 0: extern_sample = self.extern_actor_params[env_id] if extern_offsets[env_id] != extern_sample.shape[0]: print('env_id', env_id, 'extern_offset', extern_offsets[env_id], 'vs extern_sample.shape', extern_sample.shape) raise Exception("Invalid extern_sample size") self.first_randomization = False
60,236
Python
47.151079
204
0.55671
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/base/vec_task.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import time from datetime import datetime from os.path import join from typing import Dict, Any, Tuple, List, Set import gym from gym import spaces from isaacgym import gymtorch, gymapi from isaacgymenvs.utils.torch_jit_utils import to_torch from isaacgymenvs.utils.dr_utils import get_property_setter_map, get_property_getter_map, \ get_default_setter_args, apply_random_samples, check_buckets, generate_random_samples import torch import numpy as np import operator, random from copy import deepcopy from isaacgymenvs.utils.utils import nested_dict_get_attr, nested_dict_set_attr from collections import deque import sys import abc from abc import ABC EXISTING_SIM = None SCREEN_CAPTURE_RESOLUTION = (1027, 768) def _create_sim_once(gym, *args, **kwargs): global EXISTING_SIM if EXISTING_SIM is not None: return EXISTING_SIM else: EXISTING_SIM = gym.create_sim(*args, **kwargs) return EXISTING_SIM class Env(ABC): def __init__(self, config: Dict[str, Any], rl_device: str, sim_device: str, graphics_device_id: int, headless: bool): """Initialise the env. Args: config: the configuration dictionary. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. """ split_device = sim_device.split(":") self.device_type = split_device[0] self.device_id = int(split_device[1]) if len(split_device) > 1 else 0 self.device = "cpu" if config["sim"]["use_gpu_pipeline"]: if self.device_type.lower() == "cuda" or self.device_type.lower() == "gpu": self.device = "cuda" + ":" + str(self.device_id) else: print("GPU Pipeline can only be used with GPU simulation. Forcing CPU Pipeline.") config["sim"]["use_gpu_pipeline"] = False self.rl_device = rl_device # Rendering # if training in a headless mode self.headless = headless enable_camera_sensors = config["env"].get("enableCameraSensors", False) self.graphics_device_id = graphics_device_id if enable_camera_sensors == False and self.headless == True: self.graphics_device_id = -1 self.num_environments = config["env"]["numEnvs"] self.num_agents = config["env"].get("numAgents", 1) # used for multi-agent environments self.num_observations = config["env"].get("numObservations", 0) self.num_states = config["env"].get("numStates", 0) self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf) self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf) self.num_actions = config["env"]["numActions"] self.control_freq_inv = config["env"].get("controlFrequencyInv", 1) self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.) self.clip_obs = config["env"].get("clipObservations", np.Inf) self.clip_actions = config["env"].get("clipActions", np.Inf) # Total number of training frames since the beginning of the experiment. # We get this information from the learning algorithm rather than tracking ourselves. # The learning algorithm tracks the total number of frames since the beginning of training and accounts for # experiments restart/resumes. This means this number can be > 0 right after initialization if we resume the # experiment. self.total_train_env_frames: int = 0 # number of control steps self.control_steps: int = 0 self.render_fps: int = config["env"].get("renderFPS", -1) self.last_frame_time: float = 0.0 self.record_frames: bool = False self.record_frames_dir = join("recorded_frames", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) @abc.abstractmethod def allocate_buffers(self): """Create torch buffers for observations, rewards, actions dones and any additional data.""" @abc.abstractmethod def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ @abc.abstractmethod def reset(self)-> Dict[str, torch.Tensor]: """Reset the environment. Returns: Observation dictionary """ @abc.abstractmethod def reset_idx(self, env_ids: torch.Tensor): """Reset environments having the provided indices. Args: env_ids: environments to reset """ @property def observation_space(self) -> gym.Space: """Get the environment's observation space.""" return self.obs_space @property def action_space(self) -> gym.Space: """Get the environment's action space.""" return self.act_space @property def num_envs(self) -> int: """Get the number of environments.""" return self.num_environments @property def num_acts(self) -> int: """Get the number of actions in the environment.""" return self.num_actions @property def num_obs(self) -> int: """Get the number of observations in the environment.""" return self.num_observations def set_train_info(self, env_frames, *args, **kwargs): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ self.total_train_env_frames = env_frames # print(f'env_frames updated to {self.total_train_env_frames}') def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return None def set_env_state(self, env_state): pass class VecTask(Env): metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 24} def __init__(self, config, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture: bool = False, force_render: bool = False): """Initialise the `VecTask`. Args: config: config dictionary for the environment. sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu' graphics_device_id: the device ID to render with. headless: Set to False to disable viewer rendering. virtual_screen_capture: Set to True to allow the users get captured screen in RGB array via `env.render(mode='rgb_array')`. force_render: Set to True to always force rendering in the steps (if the `control_freq_inv` is greater than 1 we suggest stting this arg to True) """ # super().__init__(config, rl_device, sim_device, graphics_device_id, headless, use_dict_obs) super().__init__(config, rl_device, sim_device, graphics_device_id, headless) self.virtual_screen_capture = virtual_screen_capture self.virtual_display = None if self.virtual_screen_capture: from pyvirtualdisplay.smartdisplay import SmartDisplay self.virtual_display = SmartDisplay(size=SCREEN_CAPTURE_RESOLUTION) self.virtual_display.start() self.force_render = force_render self.sim_params = self.__parse_sim_params(self.cfg["physics_engine"], self.cfg["sim"]) if self.cfg["physics_engine"] == "physx": self.physics_engine = gymapi.SIM_PHYSX elif self.cfg["physics_engine"] == "flex": self.physics_engine = gymapi.SIM_FLEX else: msg = f"Invalid physics engine backend: {self.cfg['physics_engine']}" raise ValueError(msg) self.dt: float = self.sim_params.dt # optimization flags for pytorch JIT torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) self.gym = gymapi.acquire_gym() self.first_randomization = True self.original_props = {} self.dr_randomizations = {} self.actor_params_generator = None self.extern_actor_params = {} self.last_step = -1 self.last_rand_step = -1 for env_id in range(self.num_envs): self.extern_actor_params[env_id] = None # create envs, sim and viewer self.sim_initialized = False self.create_sim() self.gym.prepare_sim(self.sim) self.sim_initialized = True self.set_viewer() self.allocate_buffers() self.obs_dict = {} def set_viewer(self): """Create the viewer.""" # todo: read from config self.enable_viewer_sync = True self.viewer = None # if running with a viewer, set up keyboard shortcuts and camera if self.headless == False: # subscribe to keyboard shortcuts self.viewer = self.gym.create_viewer( self.sim, gymapi.CameraProperties()) self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_ESCAPE, "QUIT") self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_V, "toggle_viewer_sync") self.gym.subscribe_viewer_keyboard_event( self.viewer, gymapi.KEY_R, "record_frames") # set the camera position based on up axis sim_params = self.gym.get_sim_params(self.sim) if sim_params.up_axis == gymapi.UP_AXIS_Z: cam_pos = gymapi.Vec3(20.0, 25.0, 3.0) cam_target = gymapi.Vec3(10.0, 15.0, 0.0) else: cam_pos = gymapi.Vec3(20.0, 3.0, 25.0) cam_target = gymapi.Vec3(10.0, 0.0, 15.0) self.gym.viewer_camera_look_at( self.viewer, None, cam_pos, cam_target) def allocate_buffers(self): """Allocate the observation, states, etc. buffers. These are what is used to set observations and states in the environment classes which inherit from this one, and are read in `step` and other related functions. """ # allocate buffers self.obs_buf = torch.zeros( (self.num_envs, self.num_obs), device=self.device, dtype=torch.float) self.states_buf = torch.zeros( (self.num_envs, self.num_states), device=self.device, dtype=torch.float) self.rew_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.float) self.reset_buf = torch.ones( self.num_envs, device=self.device, dtype=torch.long) self.timeout_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.progress_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.randomize_buf = torch.zeros( self.num_envs, device=self.device, dtype=torch.long) self.extras = {} def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams): """Create an Isaac Gym sim object. Args: compute_device: ID of compute device to use. graphics_device: ID of graphics device to use. physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`) sim_params: sim params to use. Returns: the Isaac Gym sim object. """ sim = _create_sim_once(self.gym, compute_device, graphics_device, physics_engine, sim_params) if sim is None: print("*** Failed to create sim") quit() return sim def get_state(self): """Returns the state buffer of the environment (the privileged observations for asymmetric training).""" return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) @abc.abstractmethod def pre_physics_step(self, actions: torch.Tensor): """Apply the actions to the environment (eg by setting torques, position targets). Args: actions: the actions to apply """ @abc.abstractmethod def post_physics_step(self): """Compute reward and observations, reset any environments that require it.""" def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]: """Step the physics of the environment. Args: actions: actions to apply Returns: Observations, rewards, resets, info Observations are dict of observations (currently only one member called 'obs') """ # randomize actions if self.dr_randomizations.get('actions', None): actions = self.dr_randomizations['actions']['noise_lambda'](actions) action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions) # apply actions self.pre_physics_step(action_tensor) # step physics and render each frame for i in range(self.control_freq_inv): if self.force_render: self.render() self.gym.simulate(self.sim) # to fix! if self.device == 'cpu': self.gym.fetch_results(self.sim, True) # compute observations, rewards, resets, ... self.post_physics_step() self.control_steps += 1 # fill time out buffer: set to 1 if we reached the max episode length AND the reset buffer is 1. Timeout == 1 makes sense only if the reset buffer is 1. self.timeout_buf = (self.progress_buf >= self.max_episode_length - 1) & (self.reset_buf != 0) # randomize observations if self.dr_randomizations.get('observations', None): self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf) self.extras["time_outs"] = self.timeout_buf.to(self.rl_device) self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, self.rew_buf.to(self.rl_device), self.reset_buf.to(self.rl_device), self.extras def zero_actions(self) -> torch.Tensor: """Returns a buffer with zero actions. Returns: A buffer of zero torch actions """ actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.rl_device) return actions def reset_idx(self, env_idx): """Reset environment with indces in env_idx. Should be implemented in an environment class inherited from VecTask. """ pass def reset(self): """Is called only once when environment starts to provide the first observations. Doesn't calculate observations. Actual reset and observation calculation need to be implemented by user. Returns: Observation dictionary """ self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict def reset_done(self): """Reset the environment. Returns: Observation dictionary, indices of environments being reset """ done_env_ids = self.reset_buf.nonzero(as_tuple=False).flatten() if len(done_env_ids) > 0: self.reset_idx(done_env_ids) self.obs_dict["obs"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device) # asymmetric actor-critic if self.num_states > 0: self.obs_dict["states"] = self.get_state() return self.obs_dict, done_env_ids def render(self, mode="rgb_array"): """Draw the frame to the viewer, and check for keyboard events.""" if self.viewer: # check for window closed if self.gym.query_viewer_has_closed(self.viewer): sys.exit() # check for keyboard events for evt in self.gym.query_viewer_action_events(self.viewer): if evt.action == "QUIT" and evt.value > 0: sys.exit() elif evt.action == "toggle_viewer_sync" and evt.value > 0: self.enable_viewer_sync = not self.enable_viewer_sync elif evt.action == "record_frames" and evt.value > 0: self.record_frames = not self.record_frames # fetch results if self.device != 'cpu': self.gym.fetch_results(self.sim, True) # step graphics if self.enable_viewer_sync: self.gym.step_graphics(self.sim) self.gym.draw_viewer(self.viewer, self.sim, True) # Wait for dt to elapse in real time. # This synchronizes the physics simulation with the rendering rate. self.gym.sync_frame_time(self.sim) # it seems like in some cases sync_frame_time still results in higher-than-realtime framerate # this code will slow down the rendering to real time now = time.time() delta = now - self.last_frame_time if self.render_fps < 0: # render at control frequency render_dt = self.dt * self.control_freq_inv # render every control step else: render_dt = 1.0 / self.render_fps if delta < render_dt: time.sleep(render_dt - delta) self.last_frame_time = time.time() else: self.gym.poll_viewer_events(self.viewer) if self.record_frames: if not os.path.isdir(self.record_frames_dir): os.makedirs(self.record_frames_dir, exist_ok=True) self.gym.write_viewer_image_to_file(self.viewer, join(self.record_frames_dir, f"frame_{self.control_steps}.png")) if self.virtual_display and mode == "rgb_array": img = self.virtual_display.grab() return np.array(img) def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams: """Parse the config dictionary for physics stepping settings. Args: physics_engine: which physics engine to use. "physx" or "flex" config_sim: dict of sim configuration parameters Returns IsaacGym SimParams object with updated settings. """ sim_params = gymapi.SimParams() # check correct up-axis if config_sim["up_axis"] not in ["z", "y"]: msg = f"Invalid physics up-axis: {config_sim['up_axis']}" print(msg) raise ValueError(msg) # assign general sim parameters sim_params.dt = config_sim["dt"] sim_params.num_client_threads = config_sim.get("num_client_threads", 0) sim_params.use_gpu_pipeline = config_sim["use_gpu_pipeline"] sim_params.substeps = config_sim.get("substeps", 2) # assign up-axis if config_sim["up_axis"] == "z": sim_params.up_axis = gymapi.UP_AXIS_Z else: sim_params.up_axis = gymapi.UP_AXIS_Y # assign gravity sim_params.gravity = gymapi.Vec3(*config_sim["gravity"]) # configure physics parameters if physics_engine == "physx": # set the parameters if "physx" in config_sim: for opt in config_sim["physx"].keys(): if opt == "contact_collection": setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim["physx"][opt])) else: setattr(sim_params.physx, opt, config_sim["physx"][opt]) else: # set the parameters if "flex" in config_sim: for opt in config_sim["flex"].keys(): setattr(sim_params.flex, opt, config_sim["flex"][opt]) # return the configured params return sim_params """ Domain Randomization methods """ def get_actor_params_info(self, dr_params: Dict[str, Any], env): """Generate a flat array of actor params, their names and ranges. Returns: The array """ if "actor_params" not in dr_params: return None params = [] names = [] lows = [] highs = [] param_getters_map = get_property_getter_map(self.gym) for actor, actor_properties in dr_params["actor_params"].items(): handle = self.gym.find_actor_handle(env, actor) for prop_name, prop_attrs in actor_properties.items(): if prop_name == 'color': continue # this is set randomly props = param_getters_map[prop_name](env, handle) if not isinstance(props, list): props = [props] for prop_idx, prop in enumerate(props): for attr, attr_randomization_params in prop_attrs.items(): name = prop_name+'_' + str(prop_idx) + '_'+attr lo_hi = attr_randomization_params['range'] distr = attr_randomization_params['distribution'] if 'uniform' not in distr: lo_hi = (-1.0*float('Inf'), float('Inf')) if isinstance(prop, np.ndarray): for attr_idx in range(prop[attr].shape[0]): params.append(prop[attr][attr_idx]) names.append(name+'_'+str(attr_idx)) lows.append(lo_hi[0]) highs.append(lo_hi[1]) else: params.append(getattr(prop, attr)) names.append(name) lows.append(lo_hi[0]) highs.append(lo_hi[1]) return params, names, lows, highs def apply_randomizations(self, dr_params): """Apply domain randomizations to the environment. Note that currently we can only apply randomizations only on resets, due to current PhysX limitations Args: dr_params: parameters for domain randomization to use. """ # If we don't have a randomization frequency, randomize every step rand_freq = dr_params.get("frequency", 1) # First, determine what to randomize: # - non-environment parameters when > frequency steps have passed since the last non-environment # - physical environments in the reset buffer, which have exceeded the randomization frequency threshold # - on the first call, randomize everything self.last_step = self.gym.get_frame_count(self.sim) if self.first_randomization: do_nonenv_randomize = True env_ids = list(range(self.num_envs)) else: do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf)) rand_envs = torch.logical_and(rand_envs, self.reset_buf) env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist() self.randomize_buf[rand_envs] = 0 if do_nonenv_randomize: self.last_rand_step = self.last_step param_setters_map = get_property_setter_map(self.gym) param_setter_defaults_map = get_default_setter_args(self.gym) param_getters_map = get_property_getter_map(self.gym) # On first iteration, check the number of buckets if self.first_randomization: check_buckets(self.gym, self.envs, dr_params) for nonphysical_param in ["observations", "actions"]: if nonphysical_param in dr_params and do_nonenv_randomize: dist = dr_params[nonphysical_param]["distribution"] op_type = dr_params[nonphysical_param]["operation"] sched_type = dr_params[nonphysical_param]["schedule"] if "schedule" in dr_params[nonphysical_param] else None sched_step = dr_params[nonphysical_param]["schedule_steps"] if "schedule" in dr_params[nonphysical_param] else None op = operator.add if op_type == 'additive' else operator.mul if sched_type == 'linear': sched_scaling = 1.0 / sched_step * \ min(self.last_step, sched_step) elif sched_type == 'constant': sched_scaling = 0 if self.last_step < sched_step else 1 else: sched_scaling = 1 if dist == 'gaussian': mu, var = dr_params[nonphysical_param]["range"] mu_corr, var_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.]) if op_type == 'additive': mu *= sched_scaling var *= sched_scaling mu_corr *= sched_scaling var_corr *= sched_scaling elif op_type == 'scaling': var = var * sched_scaling # scale up var over time mu = mu * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate var_corr = var_corr * sched_scaling # scale up var over time mu_corr = mu_corr * sched_scaling + 1.0 * \ (1.0 - sched_scaling) # linearly interpolate def noise_lambda(tensor, param_name=nonphysical_param): params = self.dr_randomizations[param_name] corr = params.get('corr', None) if corr is None: corr = torch.randn_like(tensor) params['corr'] = corr corr = corr * params['var_corr'] + params['mu_corr'] return op( tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu']) self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr, 'noise_lambda': noise_lambda} elif dist == 'uniform': lo, hi = dr_params[nonphysical_param]["range"] lo_corr, hi_corr = dr_params[nonphysical_param].get("range_correlated", [0., 0.]) if op_type == 'additive': lo *= sched_scaling hi *= sched_scaling lo_corr *= sched_scaling hi_corr *= sched_scaling elif op_type == 'scaling': lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling) hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling) lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling) def noise_lambda(tensor, param_name=nonphysical_param): params = self.dr_randomizations[param_name] corr = params.get('corr', None) if corr is None: corr = torch.randn_like(tensor) params['corr'] = corr corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr'] return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo']) self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr, 'noise_lambda': noise_lambda} if "sim_params" in dr_params and do_nonenv_randomize: prop_attrs = dr_params["sim_params"] prop = self.gym.get_sim_params(self.sim) if self.first_randomization: self.original_props["sim_params"] = { attr: getattr(prop, attr) for attr in dir(prop)} for attr, attr_randomization_params in prop_attrs.items(): apply_random_samples( prop, self.original_props["sim_params"], attr, attr_randomization_params, self.last_step) self.gym.set_sim_params(self.sim, prop) # If self.actor_params_generator is initialized: use it to # sample actor simulation params. This gives users the # freedom to generate samples from arbitrary distributions, # e.g. use full-covariance distributions instead of the DR's # default of treating each simulation parameter independently. extern_offsets = {} if self.actor_params_generator is not None: for env_id in env_ids: self.extern_actor_params[env_id] = \ self.actor_params_generator.sample() extern_offsets[env_id] = 0 # randomise all attributes of each actor (hand, cube etc..) # actor_properties are (stiffness, damping etc..) # Loop over actors, then loop over envs, then loop over their props # and lastly loop over the ranges of the params for actor, actor_properties in dr_params["actor_params"].items(): # Loop over all envs as this part is not tensorised yet for env_id in env_ids: env = self.envs[env_id] handle = self.gym.find_actor_handle(env, actor) extern_sample = self.extern_actor_params[env_id] # randomise dof_props, rigid_body, rigid_shape properties # all obtained from the YAML file # EXAMPLE: prop name: dof_properties, rigid_body_properties, rigid_shape properties # prop_attrs: # {'damping': {'range': [0.3, 3.0], 'operation': 'scaling', 'distribution': 'loguniform'} # {'stiffness': {'range': [0.75, 1.5], 'operation': 'scaling', 'distribution': 'loguniform'} for prop_name, prop_attrs in actor_properties.items(): if prop_name == 'color': num_bodies = self.gym.get_actor_rigid_body_count( env, handle) for n in range(num_bodies): self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL, gymapi.Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))) continue if prop_name == 'scale': setup_only = prop_attrs.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: attr_randomization_params = prop_attrs sample = generate_random_samples(attr_randomization_params, 1, self.last_step, None) og_scale = 1 if attr_randomization_params['operation'] == 'scaling': new_scale = og_scale * sample elif attr_randomization_params['operation'] == 'additive': new_scale = og_scale + sample self.gym.set_actor_scale(env, handle, new_scale) continue prop = param_getters_map[prop_name](env, handle) set_random_properties = True if isinstance(prop, list): if self.first_randomization: self.original_props[prop_name] = [ {attr: getattr(p, attr) for attr in dir(p)} for p in prop] for p, og_p in zip(prop, self.original_props[prop_name]): for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], p, attr) apply_random_samples( p, og_p, attr, attr_randomization_params, self.last_step, smpl) else: set_random_properties = False else: if self.first_randomization: self.original_props[prop_name] = deepcopy(prop) for attr, attr_randomization_params in prop_attrs.items(): setup_only = attr_randomization_params.get('setup_only', False) if (setup_only and not self.sim_initialized) or not setup_only: smpl = None if self.actor_params_generator is not None: smpl, extern_offsets[env_id] = get_attr_val_from_sample( extern_sample, extern_offsets[env_id], prop, attr) apply_random_samples( prop, self.original_props[prop_name], attr, attr_randomization_params, self.last_step, smpl) else: set_random_properties = False if set_random_properties: setter = param_setters_map[prop_name] default_args = param_setter_defaults_map[prop_name] setter(env, handle, prop, *default_args) if self.actor_params_generator is not None: for env_id in env_ids: # check that we used all dims in sample if extern_offsets[env_id] > 0: extern_sample = self.extern_actor_params[env_id] if extern_offsets[env_id] != extern_sample.shape[0]: print('env_id', env_id, 'extern_offset', extern_offsets[env_id], 'vs extern_sample.shape', extern_sample.shape) raise Exception("Invalid extern_sample size") self.first_randomization = False
37,452
Python
43.586905
160
0.569476
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/base/__init__.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,558
Python
54.678569
80
0.784339
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: base class. Inherits Gym's VecTask class and abstract base class. Inherited by environment classes. Not directly executed. Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml. """ import hydra import math import numpy as np import os import sys import torch from gym import logger from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils from isaacgymenvs.tasks.base.vec_task import VecTask import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase from isaacgymenvs.tasks.factory.factory_schema_config_base import FactorySchemaConfigBase class FactoryBase(VecTask, FactoryABCBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize VecTask superclass.""" self.cfg = cfg self.cfg['headless'] = headless self._get_base_yaml_params() if self.cfg_base.mode.export_scene: sim_device = 'cpu' super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) # create_sim() is called here def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_base', node=FactorySchemaConfigBase) config_path = 'task/FactoryBase.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_base = hydra.compose(config_name=config_path) self.cfg_base = self.cfg_base['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_franka_table.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_franka_table = hydra.compose(config_name=asset_info_path) self.asset_info_franka_table = self.asset_info_franka_table['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_sim(self): """Set sim and PhysX params. Create sim object, ground plane, and envs.""" if self.cfg_base.mode.export_scene: self.sim_params.use_gpu_pipeline = False self.sim = super().create_sim(compute_device=self.device_id, graphics_device=self.graphics_device_id, physics_engine=self.physics_engine, sim_params=self.sim_params) self._create_ground_plane() self.create_envs() # defined in subclass def _create_ground_plane(self): """Set ground plane params. Add plane.""" plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.0 # default = 0.0 plane_params.static_friction = 1.0 # default = 1.0 plane_params.dynamic_friction = 1.0 # default = 1.0 plane_params.restitution = 0.0 # default = 0.0 self.gym.add_ground(self.sim, plane_params) def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') franka_file = 'factory_franka.urdf' franka_options = gymapi.AssetOptions() franka_options.flip_visual_attachments = True franka_options.fix_base_link = True franka_options.collapse_fixed_joints = False franka_options.thickness = 0.0 # default = 0.02 franka_options.density = 1000.0 # default = 1000.0 franka_options.armature = 0.01 # default = 0.0 franka_options.use_physx_armature = True if self.cfg_base.sim.add_damping: franka_options.linear_damping = 1.0 # default = 0.0; increased to improve stability franka_options.max_linear_velocity = 1.0 # default = 1000.0; reduced to prevent CUDA errors franka_options.angular_damping = 5.0 # default = 0.5; increased to improve stability franka_options.max_angular_velocity = 2 * math.pi # default = 64.0; reduced to prevent CUDA errors else: franka_options.linear_damping = 0.0 # default = 0.0 franka_options.max_linear_velocity = 1000.0 # default = 1000.0 franka_options.angular_damping = 0.5 # default = 0.5 franka_options.max_angular_velocity = 64.0 # default = 64.0 franka_options.disable_gravity = True franka_options.enable_gyroscopic_forces = True franka_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE franka_options.use_mesh_materials = True if self.cfg_base.mode.export_scene: franka_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE table_options = gymapi.AssetOptions() table_options.flip_visual_attachments = False # default = False table_options.fix_base_link = True table_options.thickness = 0.0 # default = 0.02 table_options.density = 1000.0 # default = 1000.0 table_options.armature = 0.0 # default = 0.0 table_options.use_physx_armature = True table_options.linear_damping = 0.0 # default = 0.0 table_options.max_linear_velocity = 1000.0 # default = 1000.0 table_options.angular_damping = 0.0 # default = 0.5 table_options.max_angular_velocity = 64.0 # default = 64.0 table_options.disable_gravity = False table_options.enable_gyroscopic_forces = True table_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE table_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: table_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE franka_asset = self.gym.load_asset(self.sim, urdf_root, franka_file, franka_options) table_asset = self.gym.create_box(self.sim, self.asset_info_franka_table.table_depth, self.asset_info_franka_table.table_width, self.cfg_base.env.table_height, table_options) return franka_asset, table_asset def acquire_base_tensors(self): """Acquire and wrap tensors. Create views.""" _root_state = self.gym.acquire_actor_root_state_tensor(self.sim) # shape = (num_envs * num_actors, 13) _body_state = self.gym.acquire_rigid_body_state_tensor(self.sim) # shape = (num_envs * num_bodies, 13) _dof_state = self.gym.acquire_dof_state_tensor(self.sim) # shape = (num_envs * num_dofs, 2) _dof_force = self.gym.acquire_dof_force_tensor(self.sim) # shape = (num_envs * num_dofs, 1) _contact_force = self.gym.acquire_net_contact_force_tensor(self.sim) # shape = (num_envs * num_bodies, 3) _jacobian = self.gym.acquire_jacobian_tensor(self.sim, 'franka') # shape = (num envs, num_bodies, 6, num_dofs) _mass_matrix = self.gym.acquire_mass_matrix_tensor(self.sim, 'franka') # shape = (num_envs, num_dofs, num_dofs) self.root_state = gymtorch.wrap_tensor(_root_state) self.body_state = gymtorch.wrap_tensor(_body_state) self.dof_state = gymtorch.wrap_tensor(_dof_state) self.dof_force = gymtorch.wrap_tensor(_dof_force) self.contact_force = gymtorch.wrap_tensor(_contact_force) self.jacobian = gymtorch.wrap_tensor(_jacobian) self.mass_matrix = gymtorch.wrap_tensor(_mass_matrix) self.root_pos = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 0:3] self.root_quat = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 3:7] self.root_linvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 7:10] self.root_angvel = self.root_state.view(self.num_envs, self.num_actors, 13)[..., 10:13] self.body_pos = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 0:3] self.body_quat = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 3:7] self.body_linvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 7:10] self.body_angvel = self.body_state.view(self.num_envs, self.num_bodies, 13)[..., 10:13] self.dof_pos = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, self.num_dofs, 2)[..., 1] self.dof_force_view = self.dof_force.view(self.num_envs, self.num_dofs, 1)[..., 0] self.contact_force = self.contact_force.view(self.num_envs, self.num_bodies, 3)[..., 0:3] self.arm_dof_pos = self.dof_pos[:, 0:7] self.arm_mass_matrix = self.mass_matrix[:, 0:7, 0:7] # for Franka arm (not gripper) self.hand_pos = self.body_pos[:, self.hand_body_id_env, 0:3] self.hand_quat = self.body_quat[:, self.hand_body_id_env, 0:4] self.hand_linvel = self.body_linvel[:, self.hand_body_id_env, 0:3] self.hand_angvel = self.body_angvel[:, self.hand_body_id_env, 0:3] self.hand_jacobian = self.jacobian[:, self.hand_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.left_finger_pos = self.body_pos[:, self.left_finger_body_id_env, 0:3] self.left_finger_quat = self.body_quat[:, self.left_finger_body_id_env, 0:4] self.left_finger_linvel = self.body_linvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_angvel = self.body_angvel[:, self.left_finger_body_id_env, 0:3] self.left_finger_jacobian = self.jacobian[:, self.left_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.right_finger_pos = self.body_pos[:, self.right_finger_body_id_env, 0:3] self.right_finger_quat = self.body_quat[:, self.right_finger_body_id_env, 0:4] self.right_finger_linvel = self.body_linvel[:, self.right_finger_body_id_env, 0:3] self.right_finger_angvel = self.body_angvel[:, self.right_finger_body_id_env, 0:3] self.right_finger_jacobian = self.jacobian[:, self.right_finger_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.left_finger_force = self.contact_force[:, self.left_finger_body_id_env, 0:3] self.right_finger_force = self.contact_force[:, self.right_finger_body_id_env, 0:3] self.gripper_dof_pos = self.dof_pos[:, 7:9] self.fingertip_centered_pos = self.body_pos[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_quat = self.body_quat[:, self.fingertip_centered_body_id_env, 0:4] self.fingertip_centered_linvel = self.body_linvel[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_angvel = self.body_angvel[:, self.fingertip_centered_body_id_env, 0:3] self.fingertip_centered_jacobian = self.jacobian[:, self.fingertip_centered_body_id_env - 1, 0:6, 0:7] # minus 1 because base is fixed self.fingertip_midpoint_pos = self.fingertip_centered_pos.detach().clone() # initial value self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal self.fingertip_midpoint_linvel = self.fingertip_centered_linvel.detach().clone() # initial value # From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity), # angular velocity of midpoint w.r.t. world is equal to sum of # angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world. # Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero. # Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world. self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation self.dof_torque = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device) self.ctrl_target_fingertip_midpoint_pos = torch.zeros((self.num_envs, 3), device=self.device) self.ctrl_target_fingertip_midpoint_quat = torch.zeros((self.num_envs, 4), device=self.device) self.ctrl_target_dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device) self.ctrl_target_gripper_dof_pos = torch.zeros((self.num_envs, 2), device=self.device) self.ctrl_target_fingertip_contact_wrench = torch.zeros((self.num_envs, 6), device=self.device) self.prev_actions = torch.zeros((self.num_envs, self.num_actions), device=self.device) def refresh_base_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_net_contact_force_tensor(self.sim) self.gym.refresh_jacobian_tensors(self.sim) self.gym.refresh_mass_matrix_tensors(self.sim) self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) * 0.5 self.fingertip_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length, device=self.device) # TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf) self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(self.fingertip_centered_angvel, (self.fingertip_midpoint_pos - self.fingertip_centered_pos), dim=1) self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 # approximation def parse_controller_spec(self): """Parse controller specification into lower-level controller configuration.""" cfg_ctrl_keys = {'num_envs', 'jacobian_type', 'gripper_prop_gains', 'gripper_deriv_gains', 'motor_ctrl_mode', 'gain_space', 'ik_method', 'joint_prop_gains', 'joint_deriv_gains', 'do_motion_ctrl', 'task_prop_gains', 'task_deriv_gains', 'do_inertial_comp', 'motion_ctrl_axes', 'do_force_ctrl', 'force_ctrl_method', 'wrench_prop_gains', 'force_ctrl_axes'} self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys} self.cfg_ctrl['num_envs'] = self.num_envs self.cfg_ctrl['jacobian_type'] = self.cfg_task.ctrl.all.jacobian_type self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device).repeat((self.num_envs, 1)) ctrl_type = self.cfg_task.ctrl.ctrl_type if ctrl_type == 'gym_default': self.cfg_ctrl['motor_ctrl_mode'] = 'gym' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.gym_default.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_prop_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['gripper_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'joint_space_ik': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_ik.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = False elif ctrl_type == 'joint_space_id': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'joint' self.cfg_ctrl['ik_method'] = self.cfg_task.ctrl.joint_space_id.ik_method self.cfg_ctrl['joint_prop_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['joint_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True elif ctrl_type == 'task_space_impedance': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.task_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = False self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = False elif ctrl_type == 'operational_space_motion': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.operational_space_motion.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.task_deriv_gains, device=self.device).repeat( (self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor( self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes, device=self.device).repeat( (self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = False elif ctrl_type == 'open_loop_force': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = False self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'open' self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'closed_loop_force': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = False self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'closed' self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) elif ctrl_type == 'hybrid_force_motion': self.cfg_ctrl['motor_ctrl_mode'] = 'manual' self.cfg_ctrl['gain_space'] = 'task' self.cfg_ctrl['do_motion_ctrl'] = True self.cfg_ctrl['task_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['task_deriv_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_inertial_comp'] = True self.cfg_ctrl['motion_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['do_force_ctrl'] = True self.cfg_ctrl['force_ctrl_method'] = 'closed' self.cfg_ctrl['wrench_prop_gains'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains, device=self.device).repeat((self.num_envs, 1)) self.cfg_ctrl['force_ctrl_axes'] = torch.tensor(self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes, device=self.device).repeat((self.num_envs, 1)) if self.cfg_ctrl['motor_ctrl_mode'] == 'gym': prop_gains = torch.cat((self.cfg_ctrl['joint_prop_gains'], self.cfg_ctrl['gripper_prop_gains']), dim=-1).to('cpu') deriv_gains = torch.cat((self.cfg_ctrl['joint_deriv_gains'], self.cfg_ctrl['gripper_deriv_gains']), dim=-1).to('cpu') # No tensor API for getting/setting actor DOF props; thus, loop required for env_ptr, franka_handle, prop_gain, deriv_gain in zip(self.env_ptrs, self.franka_handles, prop_gains, deriv_gains): franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle) franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_POS franka_dof_props['stiffness'] = prop_gain franka_dof_props['damping'] = deriv_gain self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props) elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual': # No tensor API for getting/setting actor DOF props; thus, loop required for env_ptr, franka_handle in zip(self.env_ptrs, self.franka_handles): franka_dof_props = self.gym.get_actor_dof_properties(env_ptr, franka_handle) franka_dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT franka_dof_props['stiffness'][:] = 0.0 # zero passive stiffness franka_dof_props['damping'][:] = 0.0 # zero passive damping self.gym.set_actor_dof_properties(env_ptr, franka_handle, franka_dof_props) def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" # Get desired Jacobian if self.cfg_ctrl['jacobian_type'] == 'geometric': self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian elif self.cfg_ctrl['jacobian_type'] == 'analytic': self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian( fingertip_quat=self.fingertip_quat, fingertip_jacobian=self.fingertip_midpoint_jacobian, num_envs=self.num_envs, device=self.device) # Set PD joint pos target or joint torque if self.cfg_ctrl['motor_ctrl_mode'] == 'gym': self._set_dof_pos_target() elif self.cfg_ctrl['motor_ctrl_mode'] == 'manual': self._set_dof_torque() def _set_dof_pos_target(self): """Set Franka DOF position target to move fingertips towards target pose.""" self.ctrl_target_dof_pos = fc.compute_dof_pos_target( cfg_ctrl=self.cfg_ctrl, arm_dof_pos=self.arm_dof_pos, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, jacobian=self.fingertip_midpoint_jacobian_tf, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, device=self.device) self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def _set_dof_torque(self): """Set Franka DOF torque to move fingertips towards target pose.""" self.dof_torque = fc.compute_dof_torque( cfg_ctrl=self.cfg_ctrl, dof_pos=self.dof_pos, dof_vel=self.dof_vel, fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, fingertip_midpoint_linvel=self.fingertip_midpoint_linvel, fingertip_midpoint_angvel=self.fingertip_midpoint_angvel, left_finger_force=self.left_finger_force, right_finger_force=self.right_finger_force, jacobian=self.fingertip_midpoint_jacobian_tf, arm_mass_matrix=self.arm_mass_matrix, ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench, device=self.device) self.gym.set_dof_actuation_force_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_torque), gymtorch.unwrap_tensor(self.franka_actor_ids_sim), len(self.franka_actor_ids_sim)) def print_sdf_warning(self): """Generate SDF warning message.""" logger.warn('Please be patient: SDFs may be generating, which may take a few minutes. Terminating prematurely may result in a corrupted SDF cache.') def enable_gravity(self, gravity_mag): """Enable gravity.""" sim_params = self.gym.get_sim_params(self.sim) sim_params.gravity.z = -gravity_mag self.gym.set_sim_params(self.sim, sim_params) def disable_gravity(self): """Disable gravity.""" sim_params = self.gym.get_sim_params(self.sim) sim_params.gravity.z = 0.0 self.gym.set_sim_params(self.sim, sim_params) def export_scene(self, label): """Export scene to USD.""" usd_export_options = gymapi.UsdExportOptions() usd_export_options.export_physics = False usd_exporter = self.gym.create_usd_exporter(usd_export_options) self.gym.export_usd_sim(usd_exporter, self.sim, label) sys.exit() def extract_poses(self): """Extract poses of all bodies.""" if not hasattr(self, 'export_pos'): self.export_pos = [] self.export_rot = [] self.frame_count = 0 pos = self.body_pos rot = self.body_quat self.export_pos.append(pos.cpu().numpy().copy()) self.export_rot.append(rot.cpu().numpy().copy()) self.frame_count += 1 if len(self.export_pos) == self.max_episode_length: output_dir = self.__class__.__name__ save_dir = os.path.join('usd', output_dir) os.makedirs(output_dir, exist_ok=True) print(f'Exporting poses to {output_dir}...') np.save(os.path.join(save_dir, 'body_position.npy'), np.array(self.export_pos)) np.save(os.path.join(save_dir, 'body_rotation.npy'), np.array(self.export_rot)) print('Export completed.') sys.exit()
32,041
Python
58.668529
156
0.601635
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_gears.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for gears env. Inherits base class and abstract environment class. Inherited by gear task class. Not directly executed. Configuration defined in FactoryEnvGears.yaml. Asset info defined in factory_asset_info_gears.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvGears(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvGears.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Hydra search path (cfg dir) self.asset_info_gears = hydra.compose(config_name=asset_info_path) self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() gear_small_asset, gear_medium_asset, gear_large_asset, base_asset = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset, gear_large_asset, base_asset, table_asset) def _import_env_assets(self): """Set gear and base asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') gear_small_file = 'factory_gear_small.urdf' gear_medium_file = 'factory_gear_medium.urdf' gear_large_file = 'factory_gear_large.urdf' if self.cfg_env.env.tight_or_loose == 'tight': base_file = 'factory_gear_base_tight.urdf' elif self.cfg_env.env.tight_or_loose == 'loose': base_file = 'factory_gear_base_loose.urdf' gear_options = gymapi.AssetOptions() gear_options.flip_visual_attachments = False gear_options.fix_base_link = False gear_options.thickness = 0.0 # default = 0.02 gear_options.density = self.cfg_env.env.gears_density # default = 1000.0 gear_options.armature = 0.0 # default = 0.0 gear_options.use_physx_armature = True gear_options.linear_damping = 0.0 # default = 0.0 gear_options.max_linear_velocity = 1000.0 # default = 1000.0 gear_options.angular_damping = 0.0 # default = 0.5 gear_options.max_angular_velocity = 64.0 # default = 64.0 gear_options.disable_gravity = False gear_options.enable_gyroscopic_forces = True gear_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE gear_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: gear_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE base_options = gymapi.AssetOptions() base_options.flip_visual_attachments = False base_options.fix_base_link = True base_options.thickness = 0.0 # default = 0.02 base_options.density = self.cfg_env.env.base_density # default = 1000.0 base_options.armature = 0.0 # default = 0.0 base_options.use_physx_armature = True base_options.linear_damping = 0.0 # default = 0.0 base_options.max_linear_velocity = 1000.0 # default = 1000.0 base_options.angular_damping = 0.0 # default = 0.5 base_options.max_angular_velocity = 64.0 # default = 64.0 base_options.disable_gravity = False base_options.enable_gyroscopic_forces = True base_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE base_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: base_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE gear_small_asset = self.gym.load_asset(self.sim, urdf_root, gear_small_file, gear_options) gear_medium_asset = self.gym.load_asset(self.sim, urdf_root, gear_medium_file, gear_options) gear_large_asset = self.gym.load_asset(self.sim, urdf_root, gear_large_file, gear_options) base_asset = self.gym.load_asset(self.sim, urdf_root, base_file, base_options) return gear_small_asset, gear_medium_asset, gear_large_asset, base_asset def _create_actors(self, lower, upper, num_per_row, franka_asset, gear_small_asset, gear_medium_asset, gear_large_asset, base_asset, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) gear_pose = gymapi.Transform() gear_pose.p.x = 0.0 gear_pose.p.y = self.cfg_env.env.gears_lateral_offset gear_pose.p.z = self.cfg_base.env.table_height gear_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) base_pose = gymapi.Transform() base_pose.p.x = 0.0 base_pose.p.y = 0.0 base_pose.p.z = self.cfg_base.env.table_height base_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.gear_small_handles = [] self.gear_medium_handles = [] self.gear_large_handles = [] self.base_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.gear_small_actor_ids_sim = [] # within-sim indices self.gear_medium_actor_ids_sim = [] # within-sim indices self.gear_large_actor_ids_sim = [] # within-sim indices self.base_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 gear_small_handle = self.gym.create_actor(env_ptr, gear_small_asset, gear_pose, 'gear_small', i, 0, 0) self.gear_small_actor_ids_sim.append(actor_count) actor_count += 1 gear_medium_handle = self.gym.create_actor(env_ptr, gear_medium_asset, gear_pose, 'gear_medium', i, 0, 0) self.gear_medium_actor_ids_sim.append(actor_count) actor_count += 1 gear_large_handle = self.gym.create_actor(env_ptr, gear_large_asset, gear_pose, 'gear_large', i, 0, 0) self.gear_large_actor_ids_sim.append(actor_count) actor_count += 1 base_handle = self.gym.create_actor(env_ptr, base_asset, base_pose, 'base', i, 0, 0) self.base_actor_ids_sim.append(actor_count) actor_count += 1 table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) gear_small_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_small_handle) gear_small_shape_props[0].friction = self.cfg_env.env.gears_friction gear_small_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_small_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_small_shape_props[0].restitution = 0.0 # default = 0.0 gear_small_shape_props[0].compliance = 0.0 # default = 0.0 gear_small_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_small_handle, gear_small_shape_props) gear_medium_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_medium_handle) gear_medium_shape_props[0].friction = self.cfg_env.env.gears_friction gear_medium_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_medium_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_medium_shape_props[0].restitution = 0.0 # default = 0.0 gear_medium_shape_props[0].compliance = 0.0 # default = 0.0 gear_medium_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_medium_handle, gear_medium_shape_props) gear_large_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, gear_large_handle) gear_large_shape_props[0].friction = self.cfg_env.env.gears_friction gear_large_shape_props[0].rolling_friction = 0.0 # default = 0.0 gear_large_shape_props[0].torsion_friction = 0.0 # default = 0.0 gear_large_shape_props[0].restitution = 0.0 # default = 0.0 gear_large_shape_props[0].compliance = 0.0 # default = 0.0 gear_large_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, gear_large_handle, gear_large_shape_props) base_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, base_handle) base_shape_props[0].friction = self.cfg_env.env.base_friction base_shape_props[0].rolling_friction = 0.0 # default = 0.0 base_shape_props[0].torsion_friction = 0.0 # default = 0.0 base_shape_props[0].restitution = 0.0 # default = 0.0 base_shape_props[0].compliance = 0.0 # default = 0.0 base_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, base_handle, base_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.gear_small_handles.append(gear_small_handle) self.gear_medium_handles.append(gear_medium_handle) self.gear_large_handles.append(gear_large_handle) self.base_handles.append(base_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_small_actor_ids_sim = torch.tensor(self.gear_small_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_medium_actor_ids_sim = torch.tensor(self.gear_medium_actor_ids_sim, dtype=torch.int32, device=self.device) self.gear_large_actor_ids_sim = torch.tensor(self.gear_large_actor_ids_sim, dtype=torch.int32, device=self.device) self.base_actor_ids_sim = torch.tensor(self.base_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.gear_small_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_small', gymapi.DOMAIN_ENV) self.gear_medium_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_medium', gymapi.DOMAIN_ENV) self.gear_large_actor_id_env = self.gym.find_actor_index(env_ptr, 'gear_large', gymapi.DOMAIN_ENV) self.base_actor_id_env = self.gym.find_actor_index(env_ptr, 'base', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.gear_small_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_small_handle, 'gear_small', gymapi.DOMAIN_ENV) self.gear_mediums_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_medium_handle, 'gear_small', gymapi.DOMAIN_ENV) self.gear_large_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, gear_large_handle, 'gear_small', gymapi.DOMAIN_ENV) self.base_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, base_handle, 'base', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.gear_small_pos = self.root_pos[:, self.gear_small_actor_id_env, 0:3] self.gear_small_quat = self.root_quat[:, self.gear_small_actor_id_env, 0:4] self.gear_small_linvel = self.root_linvel[:, self.gear_small_actor_id_env, 0:3] self.gear_small_angvel = self.root_angvel[:, self.gear_small_actor_id_env, 0:3] self.gear_medium_pos = self.root_pos[:, self.gear_medium_actor_id_env, 0:3] self.gear_medium_quat = self.root_quat[:, self.gear_medium_actor_id_env, 0:4] self.gear_medium_linvel = self.root_linvel[:, self.gear_medium_actor_id_env, 0:3] self.gear_medium_angvel = self.root_angvel[:, self.gear_medium_actor_id_env, 0:3] self.gear_large_pos = self.root_pos[:, self.gear_large_actor_id_env, 0:3] self.gear_large_quat = self.root_quat[:, self.gear_large_actor_id_env, 0:4] self.gear_large_linvel = self.root_linvel[:, self.gear_large_actor_id_env, 0:3] self.gear_large_angvel = self.root_angvel[:, self.gear_large_actor_id_env, 0:3] self.base_pos = self.root_pos[:, self.base_actor_id_env, 0:3] self.base_quat = self.root_quat[:, self.base_actor_id_env, 0:4] self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos, quat=self.gear_small_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_small_com_quat = self.gear_small_quat # always equal self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel, (self.gear_small_com_pos - self.gear_small_pos), dim=1) self.gear_small_com_angvel = self.gear_small_angvel # always equal self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos, quat=self.gear_medium_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_medium_com_quat = self.gear_medium_quat # always equal self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel, (self.gear_medium_com_pos - self.gear_medium_pos), dim=1) self.gear_medium_com_angvel = self.gear_medium_angvel # always equal self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos, quat=self.gear_large_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_large_com_quat = self.gear_large_quat # always equal self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel, (self.gear_large_com_pos - self.gear_large_pos), dim=1) self.gear_large_com_angvel = self.gear_large_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.gear_small_com_pos = fc.translate_along_local_z(pos=self.gear_small_pos, quat=self.gear_small_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_small_com_linvel = self.gear_small_linvel + torch.cross(self.gear_small_angvel, (self.gear_small_com_pos - self.gear_small_pos), dim=1) self.gear_medium_com_pos = fc.translate_along_local_z(pos=self.gear_medium_pos, quat=self.gear_medium_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_medium_com_linvel = self.gear_medium_linvel + torch.cross(self.gear_medium_angvel, (self.gear_medium_com_pos - self.gear_medium_pos), dim=1) self.gear_large_com_pos = fc.translate_along_local_z(pos=self.gear_large_pos, quat=self.gear_large_quat, offset=self.asset_info_gears.gear_base_height + self.asset_info_gears.gear_height * 0.5, device=self.device) self.gear_large_com_linvel = self.gear_large_linvel + torch.cross(self.gear_large_angvel, (self.gear_large_com_pos - self.gear_large_pos), dim=1)
25,262
Python
60.617073
150
0.586731
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_task.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for task class configurations. Used by Hydra. Defines template for task class YAML files. Not enforced. """ from __future__ import annotations from dataclasses import dataclass @dataclass class Sim: use_gpu_pipeline: bool # use GPU pipeline up_axis: str # up-down axis {x, y, z} dt: float # timestep size gravity: list[float] # gravity vector disable_gravity: bool # disable gravity for all actors @dataclass class Env: numObservations: int # number of observations per env; camel case required by VecTask numActions: int # number of actions per env; camel case required by VecTask numEnvs: int # number of envs; camel case required by VecTask @dataclass class Randomize: franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7) @dataclass class RL: pos_action_scale: list[float] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m rot_action_scale: list[float] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad force_action_scale: list[float] # scale on force targets (3), to convert [-1, 1] to +- x N torque_action_scale: list[float] # scale on torque targets (3), to convert [-1, 1] to +- x Nm clamp_rot: bool # clamp small values of rotation actions to zero clamp_rot_thresh: float # smallest acceptable value max_episode_length: int # max number of timesteps in each episode @dataclass class All: jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic} gripper_prop_gains: list[float] # proportional gains on left and right Franka gripper finger DOF position (2) gripper_deriv_gains: list[float] # derivative gains on left and right Franka gripper finger DOF position (2) @dataclass class GymDefault: joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7) joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7) @dataclass class JointSpaceIK: ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd} joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class JointSpaceID: ik_method: str joint_prop_gains: list[int] joint_deriv_gains: list[int] @dataclass class TaskSpaceImpedance: motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6) task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6) task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6) @dataclass class OperationalSpaceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] @dataclass class OpenLoopForce: force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6) @dataclass class ClosedLoopForce: force_ctrl_axes: list[bool] wrench_prop_gains: list[float] # proportional gains on Franka finger force (6) @dataclass class HybridForceMotion: motion_ctrl_axes: list[bool] task_prop_gains: list[float] task_deriv_gains: list[float] force_ctrl_axes: list[bool] wrench_prop_gains: list[float] @dataclass class Ctrl: ctrl_type: str # {gym_default, # joint_space_ik, # joint_space_id, # task_space_impedance, # operational_space_motion, # open_loop_force, # closed_loop_force, # hybrid_force_motion} gym_default: GymDefault joint_space_ik: JointSpaceIK joint_space_id: JointSpaceID task_space_impedance: TaskSpaceImpedance operational_space_motion: OperationalSpaceMotion open_loop_force: OpenLoopForce closed_loop_force: ClosedLoopForce hybrid_force_motion: HybridForceMotion @dataclass class FactorySchemaConfigTask: name: str physics_engine: str sim: Sim env: Env rl: RL ctrl: Ctrl
5,639
Python
33.814815
130
0.715552
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt place task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltPlace """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask from isaacgymenvs.utils import torch_jit_utils class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer is not None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltPlacePPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" # Nut-bolt tensors self.nut_base_pos_local = \ self.bolt_head_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths self.bolt_tip_pos_local = \ bolt_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) # Keypoint tensors self.keypoint_offsets = \ self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale self.keypoints_nut = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device) self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device) self.identity_quat = \ torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) self.actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) def _refresh_task_tensors(self): """Refresh tensors.""" # Compute pos of keypoints on gripper, nut, and bolt in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_quat, self.nut_pos, self.identity_quat, (keypoint_offset + self.nut_base_pos_local))[1] self.keypoints_bolt[:, idx] = torch_jit_utils.tf_combine(self.bolt_quat, self.bolt_pos, self.identity_quat, (keypoint_offset + self.bolt_tip_pos_local))[1] def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_pos, self.nut_quat, self.bolt_pos, self.bolt_quat] if self.cfg_task.rl.add_obs_bolt_tip_pos: obs_tensors += [self.bolt_tip_pos_local] self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Update reward and reset buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def _update_rew_buf(self): """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale # In this policy, episode length is constant across all envs is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if is_last_step: # Check if nut is close enough to bolt is_nut_close_to_bolt = self._check_nut_close_to_bolt() self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus self.extras['successes'] = torch.mean(is_nut_close_to_bolt.float()) def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) # Close gripper onto nut self.disable_gravity() # to prevent nut from falling for _ in range(self.cfg_task.env.num_gripper_close_sim_steps): self.ctrl_target_dof_pos[env_ids, 7:9] = 0.0 delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # no arm motion self._apply_actions_as_ctrl_targets(actions=delta_hand_pose, ctrl_target_gripper_dof_pos=0.0, do_scale=False) self.gym.simulate(self.sim) self.render() self.enable_gravity(gravity_mag=abs(self.cfg_base.sim.gravity[2])) self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = \ torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)), (self.nut_widths_max * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact dim=-1) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root states of nut and bolt.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) # Randomize root state of nut within gripper self.root_pos[env_ids, self.nut_actor_id_env, 0] = 0.0 self.root_pos[env_ids, self.nut_actor_id_env, 1] = 0.0 fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset nut_base_pos_local = self.bolt_head_heights.squeeze(-1) self.root_pos[env_ids, self.nut_actor_id_env, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local nut_noise_pos_in_gripper = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag( torch.tensor(self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device)) self.root_pos[env_ids, self.nut_actor_id_env, :] += nut_noise_pos_in_gripper[env_ids] nut_rot_euler = torch.tensor([0.0, 0.0, math.pi * 0.5], device=self.device).repeat(len(env_ids), 1) nut_noise_rot_in_gripper = \ 2 * (torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper nut_rot_euler[:, 2] += nut_noise_rot_in_gripper nut_rot_quat = torch_utils.quat_from_euler_xyz(nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2]) self.root_quat[env_ids, self.nut_actor_id_env] = nut_rot_quat # Randomize root state of bolt bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, dtype=torch.float32, device=self.device)) self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \ bolt_noise_xy[env_ids, 0] self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \ bolt_noise_xy[env_ids, 1] self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0 self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0 nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids], self.bolt_actor_ids_sim[env_ids]), dim=0) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim), len(nut_bolt_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _open_gripper(self, sim_steps=20): """Fully open gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.1, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20): """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # no arm motion self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) # Step sim for _ in range(sim_steps): self.render() self.gym.simulate(self.sim) def _lift_gripper(self, gripper_dof_pos=0.0, lift_distance=0.3, sim_steps=20): """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # lift along z # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) self.render() self.gym.simulate(self.sim) def _get_keypoint_offsets(self, num_keypoints): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 return keypoint_offsets def _get_keypoint_dist(self): """Get keypoint distances.""" keypoint_dist = torch.sum(torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1) return keypoint_dist def _check_nut_close_to_bolt(self): """Check if nut is close to bolt.""" keypoint_dist = torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1) is_nut_close_to_bolt = torch.where(torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf)) return is_nut_close_to_bolt def _randomize_gripper_pose(self, env_ids, sim_steps): """Move gripper to random pose.""" # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = \ torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \ + torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device) self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat( self.num_envs, 1) fingertip_midpoint_pos_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device)) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device).unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_rot_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device)) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2]) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets(actions=actions, ctrl_target_gripper_dof_pos=0.0, do_scale=False) self.gym.simulate(self.sim) self.render() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) # Set DOF state multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
23,304
Python
49.226293
141
0.596421
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_env.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for environment class configurations. Used by Hydra. Defines template for environment class YAML files. """ from dataclasses import dataclass @dataclass class Sim: disable_franka_collisions: bool # disable collisions between Franka and objects @dataclass class Env: env_name: str # name of scene @dataclass class FactorySchemaConfigEnv: sim: Sim env: Env
1,959
Python
37.431372
84
0.776927
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_task.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for task classes. Inherits ABC class. Inherited by task classes. Defines template for task classes. """ from abc import ABC, abstractmethod class FactoryABCTask(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize environment superclass.""" pass @abstractmethod def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def _acquire_task_tensors(self): """Acquire tensors.""" pass @abstractmethod def _refresh_task_tensors(self): """Refresh tensors.""" pass @abstractmethod def pre_physics_step(self): """Reset environments. Apply actions from policy as controller targets. Simulation step called after this method.""" pass @abstractmethod def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" pass @abstractmethod def compute_observations(self): """Compute observations.""" pass @abstractmethod def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" pass @abstractmethod def _update_rew_buf(self): """Compute reward at current timestep.""" pass @abstractmethod def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass @abstractmethod def reset_idx(self): """Reset specified environments.""" pass @abstractmethod def _reset_franka(self): """Reset DOF states and DOF targets of Franka.""" pass @abstractmethod def _reset_object(self): """Reset root state of object.""" pass @abstractmethod def _reset_buffers(self): """Reset buffers.""" pass @abstractmethod def _set_viewer_params(self): """Set viewer parameters.""" pass
3,598
Python
30.849557
124
0.691773
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_env.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for environment classes. Inherits ABC class. Inherited by environment classes. Defines template for environment classes. """ from abc import ABC, abstractmethod class FactoryABCEnv(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize base superclass. Acquire tensors.""" pass @abstractmethod def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def create_envs(self): """Set env options. Import assets. Create actors.""" pass @abstractmethod def _import_env_assets(self): """Set asset options. Import assets.""" pass @abstractmethod def _create_actors(self): """Set initial actor poses. Create actors. Set shape and DOF properties.""" pass @abstractmethod def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" pass @abstractmethod def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass
2,760
Python
36.31081
95
0.724638
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt screw task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltScrew Initial Franka/nut states are ideal for M16 nut-and-bolt. In this example, initial state randomization is not applied; thus, policy should succeed almost instantly. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer != None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltScrewPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" target_heights = self.cfg_base.env.table_height + self.bolt_head_heights + self.nut_heights * 0.5 self.target_pos = target_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1)) def _refresh_task_tensors(self): """Refresh tensors.""" self.fingerpad_midpoint_pos = fc.translate_along_local_z(pos=self.finger_midpoint_pos, quat=self.hand_quat, offset=self.asset_info_franka_table.franka_finger_length - self.asset_info_franka_table.franka_fingerpad_length * 0.5, device=self.device) self.finger_nut_keypoint_dist = self._get_keypoint_dist(body='finger_nut') self.nut_keypoint_dist = self._get_keypoint_dist(body='nut') self.nut_dist_to_target = torch.norm(self.target_pos - self.nut_com_pos, p=2, dim=-1) # distance between nut COM and target self.nut_dist_to_fingerpads = torch.norm(self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1) # distance between nut COM and midpoint between centers of fingerpads def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_com_pos, self.nut_com_quat, self.nut_com_linvel, self.nut_com_angvel] if self.cfg_task.rl.add_obs_finger_force: obs_tensors += [self.left_finger_force, self.right_finger_force] obs_tensors = torch.cat(obs_tensors, dim=-1) self.obs_buf[:, :obs_tensors.shape[-1]] = obs_tensors # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" # Get successful and failed envs at current timestep curr_successes = self._get_curr_successes() curr_failures = self._get_curr_failures(curr_successes) self._update_reset_buf(curr_successes, curr_failures) self._update_rew_buf(curr_successes) def _update_reset_buf(self, curr_successes, curr_failures): """Assign environments for reset if successful or failed.""" self.reset_buf[:] = torch.logical_or(curr_successes, curr_failures) def _update_rew_buf(self, curr_successes): """Compute reward at current timestep.""" keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist) action_penalty = torch.norm(self.actions, p=2, dim=-1) self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale \ + curr_successes * self.cfg_task.rl.success_bonus def reset_idx(self, env_ids): """Reset specified environments. Zero buffers.""" self._reset_franka(env_ids) self._reset_object(env_ids) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device).repeat((len(env_ids), 1)), (self.nut_widths_max[env_ids] * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact (self.nut_widths_max[env_ids] * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact dim=-1) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root state of nut.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids] self.root_pos[env_ids, self.nut_actor_id_env] = \ nut_pos * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat(len(env_ids), 1) nut_rot = self.cfg_task.randomize.nut_rot_initial * torch.ones((len(env_ids), 1), device=self.device) * math.pi / 180.0 self.root_quat[env_ids, self.nut_actor_id_env] = torch.cat((torch.zeros((len(env_ids), 1), device=self.device), torch.zeros((len(env_ids), 1), device=self.device), torch.sin(nut_rot * 0.5), torch.cos(nut_rot * 0.5)), dim=-1) self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0 self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0 self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(self.nut_actor_ids_sim), len(self.nut_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets or force/torque targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if self.cfg_task.rl.unidirectional_rot: rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if self.cfg_task.rl.unidirectional_force: force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _get_keypoint_dist(self, body): """Get keypoint distances.""" axis_length = self.asset_info_franka_table.franka_hand_length + self.asset_info_franka_table.franka_finger_length if body == 'finger' or body == 'nut': # Keypoint distance between finger/nut and target if body == 'finger': self.keypoint1 = self.fingertip_midpoint_pos self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device) elif body == 'nut': self.keypoint1 = self.nut_com_pos self.keypoint2 = fc.translate_along_local_z(pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device) self.keypoint1_targ = self.target_pos self.keypoint2_targ = self.keypoint1_targ + torch.tensor([0.0, 0.0, axis_length], device=self.device) elif body == 'finger_nut': # Keypoint distance between finger and nut self.keypoint1 = self.fingerpad_midpoint_pos self.keypoint2 = fc.translate_along_local_z(pos=self.keypoint1, quat=self.fingertip_midpoint_quat, offset=-axis_length, device=self.device) self.keypoint1_targ = self.nut_com_pos self.keypoint2_targ = fc.translate_along_local_z(pos=self.nut_com_pos, quat=self.nut_com_quat, offset=axis_length, device=self.device) self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0 self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0 self.keypoint3_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0 self.keypoint4_targ = self.keypoint1_targ + (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0 keypoint_dist = torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1) \ + torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1) \ + torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1) \ + torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1) return keypoint_dist def _get_curr_successes(self): """Get success mask at current timestep.""" curr_successes = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device) # If nut is close enough to target pos is_close = torch.where(self.nut_dist_to_target < self.thread_pitches.squeeze(-1), torch.ones_like(curr_successes), torch.zeros_like(curr_successes)) curr_successes = torch.logical_or(curr_successes, is_close) return curr_successes def _get_curr_failures(self, curr_successes): """Get failure mask at current timestep.""" curr_failures = torch.zeros((self.num_envs,), dtype=torch.bool, device=self.device) # If max episode length has been reached self.is_expired = torch.where(self.progress_buf[:] >= self.cfg_task.rl.max_episode_length, torch.ones_like(curr_failures), curr_failures) # If nut is too far from target pos self.is_far = torch.where(self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh, torch.ones_like(curr_failures), curr_failures) # If nut has slipped (distance-based definition) self.is_slipped = \ torch.where( self.nut_dist_to_fingerpads > self.asset_info_franka_table.franka_fingerpad_length * 0.5 + self.nut_heights.squeeze(-1) * 0.5, torch.ones_like(curr_failures), curr_failures) self.is_slipped = torch.logical_and(self.is_slipped, torch.logical_not(curr_successes)) # ignore slip if successful # If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt) self.is_fallen = torch.logical_and( torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1) > self.bolt_widths.squeeze(-1) * 0.5, self.nut_com_pos[:, 2] < self.cfg_base.env.table_height + self.bolt_head_heights.squeeze( -1) + self.bolt_shank_lengths.squeeze(-1) + self.nut_heights.squeeze(-1) * 0.5) curr_failures = torch.logical_or(curr_failures, self.is_expired) curr_failures = torch.logical_or(curr_failures, self.is_far) curr_failures = torch.logical_or(curr_failures, self.is_slipped) curr_failures = torch.logical_or(curr_failures, self.is_fallen) return curr_failures
19,807
Python
50.183462
183
0.584238
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for nut-bolt pick task. Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskNutBoltPick """ import hydra import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.utils import torch_jit_utils as torch_utils import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask from isaacgymenvs.utils import torch_jit_utils class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() self._acquire_task_tensors() self.parse_controller_spec() if self.cfg_task.sim.disable_gravity: self.disable_gravity() if self.viewer is not None: self._set_viewer_params() def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskNutBoltPickPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" # Grasp pose tensors nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat( (self.num_envs, 1)) self.nut_grasp_quat_local = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat( self.num_envs, 1) # Keypoint tensors self.keypoint_offsets = self._get_keypoint_offsets( self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale self.keypoints_gripper = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3), dtype=torch.float32, device=self.device) self.keypoints_nut = torch.zeros_like(self.keypoints_gripper, device=self.device) self.identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) def _refresh_task_tensors(self): """Refresh tensors.""" # Compute pose of nut grasping frame self.nut_grasp_quat, self.nut_grasp_pos = torch_jit_utils.tf_combine(self.nut_quat, self.nut_pos, self.nut_grasp_quat_local, self.nut_grasp_pos_local) # Compute pos of keypoints on gripper and nut in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): self.keypoints_gripper[:, idx] = torch_jit_utils.tf_combine(self.fingertip_midpoint_quat, self.fingertip_midpoint_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1))[1] self.keypoints_nut[:, idx] = torch_jit_utils.tf_combine(self.nut_grasp_quat, self.nut_grasp_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1))[1] def pre_physics_step(self, actions): """Reset environments. Apply actions from policy. Simulation step called after this method.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] self._apply_actions_as_ctrl_targets(actions=self.actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=True) def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward. Reset environments.""" self.progress_buf[:] += 1 # In this policy, episode length is constant is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if self.cfg_task.env.close_and_lift: # At this point, robot has executed RL policy. Now close gripper and lift (open-loop) if is_last_step: self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps) self._lift_gripper(sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps) self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" # Shallow copies of tensors obs_tensors = [self.fingertip_midpoint_pos, self.fingertip_midpoint_quat, self.fingertip_midpoint_linvel, self.fingertip_midpoint_angvel, self.nut_grasp_pos, self.nut_grasp_quat] self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations) return self.obs_buf def compute_reward(self): """Update reward and reset buffers.""" self._update_reset_buf() self._update_rew_buf() def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" # If max episode length has been reached self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) def _update_rew_buf(self): """Compute reward at current timestep.""" keypoint_reward = -self._get_keypoint_dist() action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \ - action_penalty * self.cfg_task.rl.action_penalty_scale # In this policy, episode length is constant across all envs is_last_step = (self.progress_buf[0] == self.max_episode_length - 1) if is_last_step: # Check if nut is picked up and above table lift_success = self._check_lift_success(height_multiple=3.0) self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus self.extras['successes'] = torch.mean(lift_success.float()) def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps) self._reset_buffers(env_ids) def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" self.dof_pos[env_ids] = torch.cat( (torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, device=self.device), torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device), torch.tensor([self.asset_info_franka_table.franka_gripper_width_max], device=self.device)), dim=-1).unsqueeze(0).repeat((self.num_envs, 1)) # shape = (num_envs, num_dofs) self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs) self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids] multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32)) def _reset_object(self, env_ids): """Reset root states of nut and bolt.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) # Randomize root state of nut nut_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] nut_noise_xy = nut_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.nut_pos_xy_initial_noise, device=self.device)) self.root_pos[env_ids, self.nut_actor_id_env, 0] = self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[ env_ids, 0] self.root_pos[env_ids, self.nut_actor_id_env, 1] = self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[ env_ids, 1] self.root_pos[ env_ids, self.nut_actor_id_env, 2] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1) self.root_quat[env_ids, self.nut_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.nut_actor_id_env] = 0.0 self.root_angvel[env_ids, self.nut_actor_id_env] = 0.0 # Randomize root state of bolt bolt_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] bolt_noise_xy = bolt_noise_xy @ torch.diag( torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device)) self.root_pos[env_ids, self.bolt_actor_id_env, 0] = self.cfg_task.randomize.bolt_pos_xy_initial[0] + \ bolt_noise_xy[env_ids, 0] self.root_pos[env_ids, self.bolt_actor_id_env, 1] = self.cfg_task.randomize.bolt_pos_xy_initial[1] + \ bolt_noise_xy[env_ids, 1] self.root_pos[env_ids, self.bolt_actor_id_env, 2] = self.cfg_base.env.table_height self.root_quat[env_ids, self.bolt_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32, device=self.device).repeat(len(env_ids), 1) self.root_linvel[env_ids, self.bolt_actor_id_env] = 0.0 self.root_angvel[env_ids, self.bolt_actor_id_env] = 0.0 nut_bolt_actor_ids_sim = torch.cat((self.nut_actor_ids_sim[env_ids], self.bolt_actor_ids_sim[env_ids]), dim=0) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(nut_bolt_actor_ids_sim), len(nut_bolt_actor_ids_sim)) def _reset_buffers(self, env_ids): """Reset buffers.""" self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale): """Apply actions from policy as position/rotation targets.""" # Interpret actions as target pos displacements and set pos target pos_actions = actions[:, 0:3] if do_scale: pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)) self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions # Interpret actions as target rot (axis-angle) displacements rot_actions = actions[:, 3:6] if do_scale: rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)) # Convert to quat and set rot target angle = torch.norm(rot_actions, p=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) if self.cfg_task.rl.clamp_rot: rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh, rot_actions_quat, torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1)) self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) if self.cfg_ctrl['do_force_ctrl']: # Interpret actions as target forces and target torques force_actions = actions[:, 6:9] if do_scale: force_actions = force_actions @ torch.diag( torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device)) torque_actions = actions[:, 9:12] if do_scale: torque_actions = torque_actions @ torch.diag( torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device)) self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1) self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos self.generate_ctrl_signals() def _get_keypoint_offsets(self, num_keypoints): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device) keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5 return keypoint_offsets def _get_keypoint_dist(self): """Get keypoint distance.""" keypoint_dist = torch.sum(torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1) return keypoint_dist def _close_gripper(self, sim_steps=20): """Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode).""" self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps) def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20): """Move gripper fingers to specified DOF position using controller.""" delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) # No hand motion self._apply_actions_as_ctrl_targets(delta_hand_pose, gripper_dof_pos, do_scale=False) # Step sim for _ in range(sim_steps): self.render() self.gym.simulate(self.sim) def _lift_gripper(self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20): """Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode).""" delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device) delta_hand_pose[:, 2] = lift_distance # Step sim for _ in range(sim_steps): self._apply_actions_as_ctrl_targets(delta_hand_pose, franka_gripper_width, do_scale=False) self.render() self.gym.simulate(self.sim) def _check_lift_success(self, height_multiple): """Check if nut is above table by more than specified multiple times height of nut.""" lift_success = torch.where( self.nut_pos[:, 2] > self.cfg_base.env.table_height + self.nut_heights.squeeze(-1) * height_multiple, torch.ones((self.num_envs,), device=self.device), torch.zeros((self.num_envs,), device=self.device)) return lift_success def _randomize_gripper_pose(self, env_ids, sim_steps): """Move gripper to random pose.""" # Set target pos above table self.ctrl_target_fingertip_midpoint_pos = \ torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) \ + torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device) self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_pos_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_pos_noise = \ fingertip_midpoint_pos_noise @ torch.diag(torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device)) self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise # Set target rot ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial, device=self.device).unsqueeze(0).repeat(self.num_envs, 1) fingertip_midpoint_rot_noise = \ 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1] fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag( torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device)) ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( ctrl_target_fingertip_midpoint_euler[:, 0], ctrl_target_fingertip_midpoint_euler[:, 1], ctrl_target_fingertip_midpoint_euler[:, 2]) # Step sim and render for _ in range(sim_steps): self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() pos_error, axis_angle_error = fc.get_pose_error( fingertip_midpoint_pos=self.fingertip_midpoint_pos, fingertip_midpoint_quat=self.fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat, jacobian_type=self.cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device) actions[:, :6] = delta_hand_pose self._apply_actions_as_ctrl_targets(actions=actions, ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max, do_scale=False) self.gym.simulate(self.sim) self.render() self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids]) # Set DOF state multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten() self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(multi_env_ids_int32), len(multi_env_ids_int32))
23,069
Python
50.039823
141
0.593654
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_class_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: abstract base class for base class. Inherits ABC class. Inherited by base class. Defines template for base class. """ from abc import ABC, abstractmethod class FactoryABCBase(ABC): @abstractmethod def __init__(self): """Initialize instance variables. Initialize VecTask superclass.""" pass @abstractmethod def _get_base_yaml_params(self): """Initialize instance variables from YAML files.""" pass @abstractmethod def create_sim(self): """Set sim and PhysX params. Create sim object, ground plane, and envs.""" pass @abstractmethod def _create_ground_plane(self): """Set ground plane params. Add plane.""" pass @abstractmethod def import_franka_assets(self): """Set Franka and table asset options. Import assets.""" pass @abstractmethod def acquire_base_tensors(self): """Acquire and wrap tensors. Create views.""" pass @abstractmethod def refresh_base_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. pass @abstractmethod def parse_controller_spec(self): """Parse controller specification into lower-level controller configuration.""" pass @abstractmethod def generate_ctrl_signals(self): """Get Jacobian. Set Franka DOF position targets or DOF torques.""" pass @abstractmethod def enable_gravity(self): """Enable gravity.""" pass @abstractmethod def disable_gravity(self): """Disable gravity.""" pass @abstractmethod def export_scene(self): """Export scene to USD.""" pass @abstractmethod def extract_poses(self): """Extract poses of all bodies.""" pass
3,432
Python
32.330097
88
0.697552
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_insertion.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for insertion task. Inherits insertion environment class and abstract task class (not enforced). Can be executed with python train.py task=FactoryTaskInsertion Only the environment is provided; training a successful RL policy is an open research problem left to the user. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.tasks.factory.factory_env_insertion import FactoryEnvInsertion from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskInsertion(FactoryEnvInsertion, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize task superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() if self.viewer != None: self._set_viewer_params() if self.cfg_base.mode.export_scene: self.export_scene(label='franka_task_insertion') def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_insertion = hydra.compose(config_name=asset_info_path) self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskInsertionPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" pass def _refresh_task_tensors(self): """Refresh tensors.""" pass def pre_physics_step(self, actions): """Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" return self.obs_buf # shape = (num_envs, num_observations) def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" self._update_rew_buf() self._update_reset_buf() def _update_rew_buf(self): """Compute reward at current timestep.""" pass def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" # shape of dof_pos = (num_envs, num_dofs) # shape of dof_vel = (num_envs, num_dofs) # Initialize Franka to middle of joint limits, plus joint noise franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0], self.franka_handles[0]) # same across all envs lower_lims = franka_dof_props['lower'] upper_lims = franka_dof_props['upper'] self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \ + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180 self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0 franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(franka_actor_ids_sim_int32), len(franka_actor_ids_sim_int32)) self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos)) def _reset_object(self, env_ids): """Reset root state of plug.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) if self.cfg_task.randomize.initial_state == 'random': self.root_pos[env_ids, self.plug_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy, self.cfg_task.randomize.plug_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.plug_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.plug_bias_z)), dim=1) elif self.cfg_task.randomize.initial_state == 'goal': self.root_pos[env_ids, self.plug_actor_id_env] = torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_linvel[env_ids, self.plug_actor_id_env] = 0.0 self.root_angvel[env_ids, self.plug_actor_id_env] = 0.0 plug_actor_ids_sim_int32 = self.plug_actor_ids_sim.to(dtype=torch.int32, device=self.device) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(plug_actor_ids_sim_int32[env_ids]), len(plug_actor_ids_sim_int32[env_ids])) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
9,283
Python
45.42
170
0.636971
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_insertion.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for insertion env. Inherits base class and abstract environment class. Inherited by insertion task class. Not directly executed. Configuration defined in FactoryEnvInsertion.yaml. Asset info defined in factory_asset_info_insertion.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvInsertion(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvInsertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_insertion = hydra.compose(config_name=asset_info_path) self.asset_info_insertion = self.asset_info_insertion['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() plug_assets, socket_assets = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset) def _import_env_assets(self): """Set plug and socket asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') plug_options = gymapi.AssetOptions() plug_options.flip_visual_attachments = False plug_options.fix_base_link = False plug_options.thickness = 0.0 # default = 0.02 plug_options.armature = 0.0 # default = 0.0 plug_options.use_physx_armature = True plug_options.linear_damping = 0.0 # default = 0.0 plug_options.max_linear_velocity = 1000.0 # default = 1000.0 plug_options.angular_damping = 0.0 # default = 0.5 plug_options.max_angular_velocity = 64.0 # default = 64.0 plug_options.disable_gravity = False plug_options.enable_gyroscopic_forces = True plug_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE plug_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: plug_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE socket_options = gymapi.AssetOptions() socket_options.flip_visual_attachments = False socket_options.fix_base_link = True socket_options.thickness = 0.0 # default = 0.02 socket_options.armature = 0.0 # default = 0.0 socket_options.use_physx_armature = True socket_options.linear_damping = 0.0 # default = 0.0 socket_options.max_linear_velocity = 1000.0 # default = 1000.0 socket_options.angular_damping = 0.0 # default = 0.5 socket_options.max_angular_velocity = 64.0 # default = 64.0 socket_options.disable_gravity = False socket_options.enable_gyroscopic_forces = True socket_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE socket_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: socket_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE plug_assets = [] socket_assets = [] for subassembly in self.cfg_env.env.desired_subassemblies: components = list(self.asset_info_insertion[subassembly]) plug_file = self.asset_info_insertion[subassembly][components[0]]['urdf_path'] + '.urdf' socket_file = self.asset_info_insertion[subassembly][components[1]]['urdf_path'] + '.urdf' plug_options.density = self.asset_info_insertion[subassembly][components[0]]['density'] socket_options.density = self.asset_info_insertion[subassembly][components[1]]['density'] plug_asset = self.gym.load_asset(self.sim, urdf_root, plug_file, plug_options) socket_asset = self.gym.load_asset(self.sim, urdf_root, socket_file, socket_options) plug_assets.append(plug_asset) socket_assets.append(socket_asset) return plug_assets, socket_assets def _create_actors(self, lower, upper, num_per_row, franka_asset, plug_assets, socket_assets, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.plug_handles = [] self.socket_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.plug_actor_ids_sim = [] # within-sim indices self.socket_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies)) subassembly = self.cfg_env.env.desired_subassemblies[j] components = list(self.asset_info_insertion[subassembly]) plug_pose = gymapi.Transform() plug_pose.p.x = 0.0 plug_pose.p.y = self.cfg_env.env.plug_lateral_offset plug_pose.p.z = self.cfg_base.env.table_height plug_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) plug_handle = self.gym.create_actor(env_ptr, plug_assets[j], plug_pose, 'plug', i, 0, 0) self.plug_actor_ids_sim.append(actor_count) actor_count += 1 socket_pose = gymapi.Transform() socket_pose.p.x = 0.0 socket_pose.p.y = 0.0 socket_pose.p.z = self.cfg_base.env.table_height socket_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) socket_handle = self.gym.create_actor(env_ptr, socket_assets[j], socket_pose, 'socket', i, 0, 0) self.socket_actor_ids_sim.append(actor_count) actor_count += 1 table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) plug_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, plug_handle) plug_shape_props[0].friction = self.asset_info_insertion[subassembly][components[0]]['friction'] plug_shape_props[0].rolling_friction = 0.0 # default = 0.0 plug_shape_props[0].torsion_friction = 0.0 # default = 0.0 plug_shape_props[0].restitution = 0.0 # default = 0.0 plug_shape_props[0].compliance = 0.0 # default = 0.0 plug_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, plug_handle, plug_shape_props) socket_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, socket_handle) socket_shape_props[0].friction = self.asset_info_insertion[subassembly][components[1]]['friction'] socket_shape_props[0].rolling_friction = 0.0 # default = 0.0 socket_shape_props[0].torsion_friction = 0.0 # default = 0.0 socket_shape_props[0].restitution = 0.0 # default = 0.0 socket_shape_props[0].compliance = 0.0 # default = 0.0 socket_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, socket_handle, socket_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.plug_handles.append(plug_handle) self.socket_handles.append(socket_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.plug_actor_ids_sim = torch.tensor(self.plug_actor_ids_sim, dtype=torch.int32, device=self.device) self.socket_actor_ids_sim = torch.tensor(self.socket_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.plug_actor_id_env = self.gym.find_actor_index(env_ptr, 'plug', gymapi.DOMAIN_ENV) self.socket_actor_id_env = self.gym.find_actor_index(env_ptr, 'socket', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.plug_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, plug_handle, 'plug', gymapi.DOMAIN_ENV) self.socket_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, socket_handle, 'socket', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.plug_pos = self.root_pos[:, self.plug_actor_id_env, 0:3] self.plug_quat = self.root_quat[:, self.plug_actor_id_env, 0:4] self.plug_linvel = self.root_linvel[:, self.plug_actor_id_env, 0:3] self.plug_angvel = self.root_angvel[:, self.plug_actor_id_env, 0:3] self.socket_pos = self.root_pos[:, self.socket_actor_id_env, 0:3] self.socket_quat = self.root_quat[:, self.socket_actor_id_env, 0:4] # TODO: Define socket height and plug height params in asset info YAML. # self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos, # quat=self.plug_quat, # offset=self.socket_heights + self.plug_heights * 0.5, # device=self.device) self.plug_com_quat = self.plug_quat # always equal # self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel, # (self.plug_com_pos - self.plug_pos), # dim=1) self.plug_com_angvel = self.plug_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. # TODO: Define socket height and plug height params in asset info YAML. # self.plug_com_pos = self.translate_along_local_z(pos=self.plug_pos, # quat=self.plug_quat, # offset=self.socket_heights + self.plug_heights * 0.5, # device=self.device) # self.plug_com_linvel = self.plug_linvel + torch.cross(self.plug_angvel, # (self.plug_com_pos - self.plug_pos), # dim=1)
18,207
Python
55.722741
143
0.612512
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_schema_config_base.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: schema for base class configuration. Used by Hydra. Defines template for base class YAML file. """ from dataclasses import dataclass @dataclass class Mode: export_scene: bool # export scene to USD export_states: bool # export states to NPY @dataclass class PhysX: solver_type: int # default = 1 (Temporal Gauss-Seidel) num_threads: int num_subscenes: int use_gpu: bool num_position_iterations: int # number of position iterations for solver (default = 4) num_velocity_iterations: int # number of velocity iterations for solver (default = 1) contact_offset: float # default = 0.02 rest_offset: float # default = 0.001 bounce_threshold_velocity: float # default = 0.01 max_depenetration_velocity: float # default = 100.0 friction_offset_threshold: float # default = 0.04 friction_correlation_distance: float # default = 0.025 max_gpu_contact_pairs: int # default = 1024 * 1024 default_buffer_size_multiplier: float contact_collection: int # 0: CC_NEVER (do not collect contact info), 1: CC_LAST_SUBSTEP (collect contact info on last substep), 2: CC_ALL_SUBSTEPS (collect contact info at all substeps) @dataclass class Sim: dt: float # timestep size (default = 1.0 / 60.0) num_substeps: int # number of substeps (default = 2) up_axis: str use_gpu_pipeline: bool gravity: list # gravitational acceleration vector add_damping: bool # add damping to stabilize gripper-object interactions physx: PhysX @dataclass class Env: env_spacing: float # lateral offset between envs franka_depth: float # depth offset of Franka base relative to env origin table_height: float # height of table franka_friction: float # coefficient of friction associated with Franka table_friction: float # coefficient of friction associated with table @dataclass class FactorySchemaConfigBase: mode: Mode sim: Sim env: Env
3,523
Python
39.505747
190
0.741981
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_env_nut_bolt.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: class for nut-bolt env. Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed. Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml. """ import hydra import numpy as np import os import torch from isaacgym import gymapi from isaacgymenvs.tasks.factory.factory_base import FactoryBase import isaacgymenvs.tasks.factory.factory_control as fc from isaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv from isaacgymenvs.tasks.factory.factory_schema_config_env import FactorySchemaConfigEnv class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize environment superclass. Acquire tensors.""" self._get_env_yaml_params() super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.acquire_base_tensors() # defined in superclass self._acquire_env_tensors() self.refresh_base_tensors() # defined in superclass self.refresh_env_tensors() def _get_env_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_env', node=FactorySchemaConfigEnv) config_path = 'task/FactoryEnvNutBolt.yaml' # relative to Hydra search path (cfg dir) self.cfg_env = hydra.compose(config_name=config_path) self.cfg_env = self.cfg_env['task'] # strip superfluous nesting asset_info_path = '../../assets/factory/yaml/factory_asset_info_nut_bolt.yaml' self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path) self.asset_info_nut_bolt = self.asset_info_nut_bolt['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting def create_envs(self): """Set env options. Import assets. Create actors.""" lower = gymapi.Vec3(-self.cfg_base.env.env_spacing, -self.cfg_base.env.env_spacing, 0.0) upper = gymapi.Vec3(self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing, self.cfg_base.env.env_spacing) num_per_row = int(np.sqrt(self.num_envs)) self.print_sdf_warning() franka_asset, table_asset = self.import_franka_assets() nut_asset, bolt_asset = self._import_env_assets() self._create_actors(lower, upper, num_per_row, franka_asset, nut_asset, bolt_asset, table_asset) def _import_env_assets(self): """Set nut and bolt asset options. Import assets.""" urdf_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'assets', 'factory', 'urdf') nut_options = gymapi.AssetOptions() nut_options.flip_visual_attachments = False nut_options.fix_base_link = False nut_options.thickness = 0.0 # default = 0.02 nut_options.armature = 0.0 # default = 0.0 nut_options.use_physx_armature = True nut_options.linear_damping = 0.0 # default = 0.0 nut_options.max_linear_velocity = 1000.0 # default = 1000.0 nut_options.angular_damping = 0.0 # default = 0.5 nut_options.max_angular_velocity = 64.0 # default = 64.0 nut_options.disable_gravity = False nut_options.enable_gyroscopic_forces = True nut_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE nut_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: nut_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE bolt_options = gymapi.AssetOptions() bolt_options.flip_visual_attachments = False bolt_options.fix_base_link = True bolt_options.thickness = 0.0 # default = 0.02 bolt_options.armature = 0.0 # default = 0.0 bolt_options.use_physx_armature = True bolt_options.linear_damping = 0.0 # default = 0.0 bolt_options.max_linear_velocity = 1000.0 # default = 1000.0 bolt_options.angular_damping = 0.0 # default = 0.5 bolt_options.max_angular_velocity = 64.0 # default = 64.0 bolt_options.disable_gravity = False bolt_options.enable_gyroscopic_forces = True bolt_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE bolt_options.use_mesh_materials = False if self.cfg_base.mode.export_scene: bolt_options.mesh_normal_mode = gymapi.COMPUTE_PER_FACE nut_assets = [] bolt_assets = [] for subassembly in self.cfg_env.env.desired_subassemblies: components = list(self.asset_info_nut_bolt[subassembly]) nut_file = self.asset_info_nut_bolt[subassembly][components[0]]['urdf_path'] + '.urdf' bolt_file = self.asset_info_nut_bolt[subassembly][components[1]]['urdf_path'] + '.urdf' nut_options.density = self.cfg_env.env.nut_bolt_density bolt_options.density = self.cfg_env.env.nut_bolt_density nut_asset = self.gym.load_asset(self.sim, urdf_root, nut_file, nut_options) bolt_asset = self.gym.load_asset(self.sim, urdf_root, bolt_file, bolt_options) nut_assets.append(nut_asset) bolt_assets.append(bolt_asset) return nut_assets, bolt_assets def _create_actors(self, lower, upper, num_per_row, franka_asset, nut_assets, bolt_assets, table_asset): """Set initial actor poses. Create actors. Set shape and DOF properties.""" franka_pose = gymapi.Transform() franka_pose.p.x = self.cfg_base.env.franka_depth franka_pose.p.y = 0.0 franka_pose.p.z = 0.0 franka_pose.r = gymapi.Quat(0.0, 0.0, 1.0, 0.0) table_pose = gymapi.Transform() table_pose.p.x = 0.0 table_pose.p.y = 0.0 table_pose.p.z = self.cfg_base.env.table_height * 0.5 table_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) self.env_ptrs = [] self.franka_handles = [] self.nut_handles = [] self.bolt_handles = [] self.table_handles = [] self.shape_ids = [] self.franka_actor_ids_sim = [] # within-sim indices self.nut_actor_ids_sim = [] # within-sim indices self.bolt_actor_ids_sim = [] # within-sim indices self.table_actor_ids_sim = [] # within-sim indices actor_count = 0 self.nut_heights = [] self.nut_widths_max = [] self.bolt_widths = [] self.bolt_head_heights = [] self.bolt_shank_lengths = [] self.thread_pitches = [] for i in range(self.num_envs): env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) if self.cfg_env.sim.disable_franka_collisions: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i + self.num_envs, 0, 0) else: franka_handle = self.gym.create_actor(env_ptr, franka_asset, franka_pose, 'franka', i, 0, 0) self.franka_actor_ids_sim.append(actor_count) actor_count += 1 j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies)) subassembly = self.cfg_env.env.desired_subassemblies[j] components = list(self.asset_info_nut_bolt[subassembly]) nut_pose = gymapi.Transform() nut_pose.p.x = 0.0 nut_pose.p.y = self.cfg_env.env.nut_lateral_offset nut_pose.p.z = self.cfg_base.env.table_height nut_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) nut_handle = self.gym.create_actor(env_ptr, nut_assets[j], nut_pose, 'nut', i, 0, 0) self.nut_actor_ids_sim.append(actor_count) actor_count += 1 nut_height = self.asset_info_nut_bolt[subassembly][components[0]]['height'] nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]]['width_max'] self.nut_heights.append(nut_height) self.nut_widths_max.append(nut_width_max) bolt_pose = gymapi.Transform() bolt_pose.p.x = 0.0 bolt_pose.p.y = 0.0 bolt_pose.p.z = self.cfg_base.env.table_height bolt_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) bolt_handle = self.gym.create_actor(env_ptr, bolt_assets[j], bolt_pose, 'bolt', i, 0, 0) self.bolt_actor_ids_sim.append(actor_count) actor_count += 1 bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]['width'] bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]]['head_height'] bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]]['shank_length'] self.bolt_widths.append(bolt_width) self.bolt_head_heights.append(bolt_head_height) self.bolt_shank_lengths.append(bolt_shank_length) thread_pitch = self.asset_info_nut_bolt[subassembly]['thread_pitch'] self.thread_pitches.append(thread_pitch) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, 'table', i, 0, 0) self.table_actor_ids_sim.append(actor_count) actor_count += 1 link7_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_link7', gymapi.DOMAIN_ACTOR) hand_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ACTOR) left_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ACTOR) right_finger_id = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ACTOR) self.shape_ids = [link7_id, hand_id, left_finger_id, right_finger_id] franka_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, franka_handle) for shape_id in self.shape_ids: franka_shape_props[shape_id].friction = self.cfg_base.env.franka_friction franka_shape_props[shape_id].rolling_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].torsion_friction = 0.0 # default = 0.0 franka_shape_props[shape_id].restitution = 0.0 # default = 0.0 franka_shape_props[shape_id].compliance = 0.0 # default = 0.0 franka_shape_props[shape_id].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, franka_handle, franka_shape_props) nut_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, nut_handle) nut_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction nut_shape_props[0].rolling_friction = 0.0 # default = 0.0 nut_shape_props[0].torsion_friction = 0.0 # default = 0.0 nut_shape_props[0].restitution = 0.0 # default = 0.0 nut_shape_props[0].compliance = 0.0 # default = 0.0 nut_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, nut_handle, nut_shape_props) bolt_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, bolt_handle) bolt_shape_props[0].friction = self.cfg_env.env.nut_bolt_friction bolt_shape_props[0].rolling_friction = 0.0 # default = 0.0 bolt_shape_props[0].torsion_friction = 0.0 # default = 0.0 bolt_shape_props[0].restitution = 0.0 # default = 0.0 bolt_shape_props[0].compliance = 0.0 # default = 0.0 bolt_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, bolt_handle, bolt_shape_props) table_shape_props = self.gym.get_actor_rigid_shape_properties(env_ptr, table_handle) table_shape_props[0].friction = self.cfg_base.env.table_friction table_shape_props[0].rolling_friction = 0.0 # default = 0.0 table_shape_props[0].torsion_friction = 0.0 # default = 0.0 table_shape_props[0].restitution = 0.0 # default = 0.0 table_shape_props[0].compliance = 0.0 # default = 0.0 table_shape_props[0].thickness = 0.0 # default = 0.0 self.gym.set_actor_rigid_shape_properties(env_ptr, table_handle, table_shape_props) self.franka_num_dofs = self.gym.get_actor_dof_count(env_ptr, franka_handle) self.gym.enable_actor_dof_force_sensors(env_ptr, franka_handle) self.env_ptrs.append(env_ptr) self.franka_handles.append(franka_handle) self.nut_handles.append(nut_handle) self.bolt_handles.append(bolt_handle) self.table_handles.append(table_handle) self.num_actors = int(actor_count / self.num_envs) # per env self.num_bodies = self.gym.get_env_rigid_body_count(env_ptr) # per env self.num_dofs = self.gym.get_env_dof_count(env_ptr) # per env # For setting targets self.franka_actor_ids_sim = torch.tensor(self.franka_actor_ids_sim, dtype=torch.int32, device=self.device) self.nut_actor_ids_sim = torch.tensor(self.nut_actor_ids_sim, dtype=torch.int32, device=self.device) self.bolt_actor_ids_sim = torch.tensor(self.bolt_actor_ids_sim, dtype=torch.int32, device=self.device) # For extracting root pos/quat self.nut_actor_id_env = self.gym.find_actor_index(env_ptr, 'nut', gymapi.DOMAIN_ENV) self.bolt_actor_id_env = self.gym.find_actor_index(env_ptr, 'bolt', gymapi.DOMAIN_ENV) # For extracting body pos/quat, force, and Jacobian self.nut_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, nut_handle, 'nut', gymapi.DOMAIN_ENV) self.bolt_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, bolt_handle, 'bolt', gymapi.DOMAIN_ENV) self.hand_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_hand', gymapi.DOMAIN_ENV) self.left_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_leftfinger', gymapi.DOMAIN_ENV) self.right_finger_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_rightfinger', gymapi.DOMAIN_ENV) self.fingertip_centered_body_id_env = self.gym.find_actor_rigid_body_index(env_ptr, franka_handle, 'panda_fingertip_centered', gymapi.DOMAIN_ENV) # For computing body COM pos self.nut_heights = torch.tensor(self.nut_heights, device=self.device).unsqueeze(-1) self.bolt_head_heights = torch.tensor(self.bolt_head_heights, device=self.device).unsqueeze(-1) # For setting initial state self.nut_widths_max = torch.tensor(self.nut_widths_max, device=self.device).unsqueeze(-1) self.bolt_shank_lengths = torch.tensor(self.bolt_shank_lengths, device=self.device).unsqueeze(-1) # For defining success or failure self.bolt_widths = torch.tensor(self.bolt_widths, device=self.device).unsqueeze(-1) self.thread_pitches = torch.tensor(self.thread_pitches, device=self.device).unsqueeze(-1) def _acquire_env_tensors(self): """Acquire and wrap tensors. Create views.""" self.nut_pos = self.root_pos[:, self.nut_actor_id_env, 0:3] self.nut_quat = self.root_quat[:, self.nut_actor_id_env, 0:4] self.nut_linvel = self.root_linvel[:, self.nut_actor_id_env, 0:3] self.nut_angvel = self.root_angvel[:, self.nut_actor_id_env, 0:3] self.bolt_pos = self.root_pos[:, self.bolt_actor_id_env, 0:3] self.bolt_quat = self.root_quat[:, self.bolt_actor_id_env, 0:4] self.nut_force = self.contact_force[:, self.nut_body_id_env, 0:3] self.bolt_force = self.contact_force[:, self.bolt_body_id_env, 0:3] self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos, quat=self.nut_quat, offset=self.bolt_head_heights + self.nut_heights * 0.5, device=self.device) self.nut_com_quat = self.nut_quat # always equal self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1) self.nut_com_angvel = self.nut_angvel # always equal def refresh_env_tensors(self): """Refresh tensors.""" # NOTE: Tensor refresh functions should be called once per step, before setters. self.nut_com_pos = fc.translate_along_local_z(pos=self.nut_pos, quat=self.nut_quat, offset=self.bolt_head_heights + self.nut_heights * 0.5, device=self.device) self.nut_com_linvel = self.nut_linvel + torch.cross(self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1)
19,505
Python
53.486033
141
0.613176
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_control.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: control module. Imported by base, environment, and task classes. Not directly executed. """ import math import torch from isaacgymenvs.utils import torch_jit_utils as torch_utils def compute_dof_pos_target(cfg_ctrl, arm_dof_pos, fingertip_midpoint_pos, fingertip_midpoint_quat, jacobian, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_gripper_dof_pos, device): """Compute Franka DOF position target to move fingertips towards target pose.""" ctrl_target_dof_pos = torch.zeros((cfg_ctrl['num_envs'], 9), device=device) pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl['ik_method'], jacobian=jacobian, device=device) ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints return ctrl_target_dof_pos def compute_dof_torque(cfg_ctrl, dof_pos, dof_vel, fingertip_midpoint_pos, fingertip_midpoint_quat, fingertip_midpoint_linvel, fingertip_midpoint_angvel, left_finger_force, right_finger_force, jacobian, arm_mass_matrix, ctrl_target_gripper_dof_pos, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, ctrl_target_fingertip_contact_wrench, device): """Compute Franka DOF torque to move fingertips towards target pose.""" # References: # 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # 2) Modern Robotics dof_torque = torch.zeros((cfg_ctrl['num_envs'], 9), device=device) if cfg_ctrl['gain_space'] == 'joint': pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72) delta_arm_dof_pos = _get_delta_dof_pos(delta_pose=delta_fingertip_pose, ik_method=cfg_ctrl['ik_method'], jacobian=jacobian, device=device) dof_torque[:, 0:7] = cfg_ctrl['joint_prop_gains'] * delta_arm_dof_pos + \ cfg_ctrl['joint_deriv_gains'] * (0.0 - dof_vel[:, 0:7]) if cfg_ctrl['do_inertial_comp']: # Set tau = M * tau, where M is the joint-space mass matrix arm_mass_matrix_joint = arm_mass_matrix dof_torque[:, 0:7] = (arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)).squeeze(-1) elif cfg_ctrl['gain_space'] == 'task': task_wrench = torch.zeros((cfg_ctrl['num_envs'], 6), device=device) if cfg_ctrl['do_motion_ctrl']: pos_error, axis_angle_error = get_pose_error( fingertip_midpoint_pos=fingertip_midpoint_pos, fingertip_midpoint_quat=fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat, jacobian_type=cfg_ctrl['jacobian_type'], rot_error_type='axis_angle') delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1) # Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98) task_wrench_motion = _apply_task_space_gains(delta_fingertip_pose=delta_fingertip_pose, fingertip_midpoint_linvel=fingertip_midpoint_linvel, fingertip_midpoint_angvel=fingertip_midpoint_angvel, task_prop_gains=cfg_ctrl['task_prop_gains'], task_deriv_gains=cfg_ctrl['task_deriv_gains']) if cfg_ctrl['do_inertial_comp']: # Set tau = Lambda * tau, where Lambda is the task-space mass matrix jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) arm_mass_matrix_task = torch.inverse(jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T) # ETH eq. 3.86; geometric Jacobian is assumed task_wrench_motion = (arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)).squeeze(-1) task_wrench = task_wrench + torch.tensor(cfg_ctrl['motion_ctrl_axes'], device=device).unsqueeze(0) * task_wrench_motion if cfg_ctrl['do_force_ctrl']: # Set tau = tau + F_t, where F_t is the target contact wrench task_wrench_force = torch.zeros((cfg_ctrl['num_envs'], 6), device=device) task_wrench_force = task_wrench_force + ctrl_target_fingertip_contact_wrench # open-loop force control (building towards ETH eq. 3.96-3.98) if cfg_ctrl['force_ctrl_method'] == 'closed': force_error, torque_error = _get_wrench_error( left_finger_force=left_finger_force, right_finger_force=right_finger_force, ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench, num_envs=cfg_ctrl['num_envs'], device=device) # Set tau = tau + k_p * contact_wrench_error task_wrench_force = task_wrench_force + cfg_ctrl['wrench_prop_gains'] * torch.cat( (force_error, torque_error), dim=1) # part of Modern Robotics eq. 11.61 task_wrench = task_wrench + torch.tensor(cfg_ctrl['force_ctrl_axes'], device=device).unsqueeze( 0) * task_wrench_force # Set tau = J^T * tau, i.e., map tau into joint space as desired jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1) dof_torque[:, 7:9] = cfg_ctrl['gripper_prop_gains'] * (ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]) + \ cfg_ctrl['gripper_deriv_gains'] * (0.0 - dof_vel[:, 7:9]) # gripper finger joints dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0) return dof_torque def get_pose_error(fingertip_midpoint_pos, fingertip_midpoint_quat, ctrl_target_fingertip_midpoint_pos, ctrl_target_fingertip_midpoint_quat, jacobian_type, rot_error_type): """Compute task-space error between target Franka fingertip pose and current pose.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # Compute pos error pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos # Compute rot error if jacobian_type == 'geometric': # See example 2.9.8; note use of J_g and transformation between rotation vectors # Compute quat error (i.e., difference quat) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html fingertip_midpoint_quat_norm = torch_utils.quat_mul(fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat))[:, 3] # scalar component fingertip_midpoint_quat_inv = torch_utils.quat_conjugate( fingertip_midpoint_quat) / fingertip_midpoint_quat_norm.unsqueeze(-1) quat_error = torch_utils.quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv) # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) elif jacobian_type == 'analytic': # See example 2.9.7; note use of J_a and difference of rotation vectors # Compute axis-angle error axis_angle_error = axis_angle_from_quat(ctrl_target_fingertip_midpoint_quat)\ - axis_angle_from_quat(fingertip_midpoint_quat) if rot_error_type == 'quat': return pos_error, quat_error elif rot_error_type == 'axis_angle': return pos_error, axis_angle_error def _get_wrench_error(left_finger_force, right_finger_force, ctrl_target_fingertip_contact_wrench, num_envs, device): """Compute task-space error between target Franka fingertip contact wrench and current wrench.""" fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device) fingertip_contact_wrench[:, 0:3] = left_finger_force + right_finger_force # net contact force on fingers # Cols 3 to 6 are all zeros, as we do not have enough information force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (-fingertip_contact_wrench[:, 0:3]) torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (-fingertip_contact_wrench[:, 3:6]) return force_error, torque_error def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device): """Get delta Franka DOF position from delta pose using specified IK method.""" # References: # 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf # 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47) if ik_method == 'pinv': # Jacobian pseudoinverse k_val = 1.0 jacobian_pinv = torch.linalg.pinv(jacobian) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'trans': # Jacobian transpose k_val = 1.0 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'dls': # damped least squares (Levenberg-Marquardt) lambda_val = 0.1 jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) lambda_matrix = (lambda_val ** 2) * torch.eye(n=jacobian.shape[1], device=device) delta_dof_pos = jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) elif ik_method == 'svd': # adaptive SVD k_val = 1.0 U, S, Vh = torch.linalg.svd(jacobian) S_inv = 1. / S min_singular_value = 1.0e-5 S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv)) jacobian_pinv = torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6] @ torch.diag_embed(S_inv) @ torch.transpose(U, dim0=1, dim1=2) delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_dof_pos = delta_dof_pos.squeeze(-1) return delta_dof_pos def _apply_task_space_gains(delta_fingertip_pose, fingertip_midpoint_linvel, fingertip_midpoint_angvel, task_prop_gains, task_deriv_gains): """Interpret PD gains as task-space gains. Apply to task-space error.""" task_wrench = torch.zeros_like(delta_fingertip_pose) # Apply gains to lin error components lin_error = delta_fingertip_pose[:, 0:3] task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + \ task_deriv_gains[:, 0:3] * (0.0 - fingertip_midpoint_linvel) # Apply gains to rot error components rot_error = delta_fingertip_pose[:, 3:6] task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + \ task_deriv_gains[:, 3:6] * (0.0 - fingertip_midpoint_angvel) return task_wrench def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device): """Convert geometric Jacobian to analytic Jacobian.""" # Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf # NOTE: Gym returns world-space geometric Jacobians by default batch = num_envs # Overview: # x = [x_p; x_r] # From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot # From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv) # Eq. 2.12 gives an expression for E_p_inv # Eq. 2.107 gives an expression for E_r_inv # Compute E_inv_top (i.e., [E_p_inv, 0]) I = torch.eye(3, device=device) E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3) E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2) # Compute E_inv_bottom (i.e., [0, E_r_inv]) fingertip_axis_angle = axis_angle_from_quat(fingertip_quat) fingertip_axis_angle_cross = get_skew_symm_matrix(fingertip_axis_angle, device=device) fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1) factor_1 = 1 / (fingertip_angle ** 2) factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (1 - torch.cos(fingertip_angle)) factor_3 = factor_1 * factor_2 E_r_inv = I \ - 1 * 0.5 * fingertip_axis_angle_cross \ + (fingertip_axis_angle_cross @ fingertip_axis_angle_cross) * factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3)) E_inv_bottom = torch.cat((torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2) E_inv = torch.cat((E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1).reshape((batch, 6, 6)) J_a = E_inv @ fingertip_jacobian return J_a def get_skew_symm_matrix(vec, device): """Convert vector to skew-symmetric matrix.""" # Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication batch = vec.shape[0] I = torch.eye(3, device=device) skew_symm = torch.transpose(torch.cross(vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1))) .reshape(batch, 3, 3), dim0=1, dim1=2) return skew_symm def translate_along_local_z(pos, quat, offset, device): """Translate global body position along local Z-axis and express in global coordinates.""" num_vecs = pos.shape[0] offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1)) _, translated_pos = torch_utils.tf_combine(q1=quat, t1=pos, q2=torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).repeat((num_vecs, 1)), t2=offset_vec) return translated_pos def axis_angle_from_euler(euler): """Convert tensor of Euler angles to tensor of axis-angles.""" quat = torch_utils.quat_from_euler_xyz(roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2]) quat = quat * torch.sign(quat[:, 3]).unsqueeze(-1) # smaller rotation axis_angle = axis_angle_from_quat(quat) return axis_angle def axis_angle_from_quat(quat, eps=1.0e-6): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544 mag = torch.linalg.norm(quat[:, 0:3], dim=1) half_angle = torch.atan2(mag, quat[:, 3]) angle = 2.0 * half_angle sin_half_angle_over_angle = torch.where(torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle ** 2.0 / 48) axis_angle = quat[:, 0:3] / sin_half_angle_over_angle.unsqueeze(-1) return axis_angle def axis_angle_from_quat_naive(quat): """Convert tensor of quaternions to tensor of axis-angles.""" # Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation # NOTE: Susceptible to undesirable behavior due to divide-by-zero mag = torch.linalg.vector_norm(quat[:, 0:3], dim=1) # zero when quat = [0, 0, 0, 1] axis = quat[:, 0:3] / mag.unsqueeze(-1) angle = 2.0 * torch.atan2(mag, quat[:, 3]) axis_angle = axis * angle.unsqueeze(-1) return axis_angle def get_rand_quat(num_quats, device): """Generate tensor of random quaternions.""" # Reference: http://planning.cs.uiuc.edu/node198.html u = torch.rand((num_quats, 3), device=device) quat = torch.zeros((num_quats, 4), device=device) quat[:, 0] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1]) quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1]) quat[:, 2] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2]) quat[:, 3] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2]) return quat def get_nonrand_quat(num_quats, rot_perturbation, device): """Generate tensor of non-random quaternions by composing random Euler rotations.""" quat = torch_utils.quat_from_euler_xyz( torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation, torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0 - rot_perturbation) return quat
20,557
Python
47.947619
163
0.608357
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/factory/factory_task_gears.py
# Copyright (c) 2021-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Factory: Class for gears task. Inherits gears environment class and abstract task class (not inforced). Can be executed with python train.py task=FactoryTaskGears Only the environment is provided; training a successful RL policy is an open research problem left to the user. """ import hydra import math import omegaconf import os import torch from isaacgym import gymapi, gymtorch from isaacgymenvs.tasks.factory.factory_env_gears import FactoryEnvGears from isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask from isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask class FactoryTaskGears(FactoryEnvGears, FactoryABCTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): """Initialize instance variables. Initialize task superclass.""" super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) self.cfg = cfg self._get_task_yaml_params() if self.viewer != None: self._set_viewer_params() if self.cfg_base.mode.export_scene: self.export_scene(label='factory_task_gears') def _get_task_yaml_params(self): """Initialize instance variables from YAML files.""" cs = hydra.core.config_store.ConfigStore.instance() cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask) self.cfg_task = omegaconf.OmegaConf.create(self.cfg) self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask asset_info_path = '../../assets/factory/yaml/factory_asset_info_gears.yaml' # relative to Gym's Hydra search path (cfg dir) self.asset_info_gears = hydra.compose(config_name=asset_info_path) self.asset_info_gears = self.asset_info_gears['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting ppo_path = 'train/FactoryTaskGearsPPO.yaml' # relative to Gym's Hydra search path (cfg dir) self.cfg_ppo = hydra.compose(config_name=ppo_path) self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting def _acquire_task_tensors(self): """Acquire tensors.""" pass def _refresh_task_tensors(self): """Refresh tensors.""" pass def pre_physics_step(self, actions): """Reset environments. Apply actions from policy as position/rotation targets, force/torque targets, and/or PD gains.""" env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self._actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1] def post_physics_step(self): """Step buffers. Refresh tensors. Compute observations and reward.""" self.progress_buf[:] += 1 self.refresh_base_tensors() self.refresh_env_tensors() self._refresh_task_tensors() self.compute_observations() self.compute_reward() def compute_observations(self): """Compute observations.""" return self.obs_buf # shape = (num_envs, num_observations) def compute_reward(self): """Detect successes and failures. Update reward and reset buffers.""" self._update_rew_buf() self._update_reset_buf() def _update_rew_buf(self): """Compute reward at current timestep.""" pass def _update_reset_buf(self): """Assign environments for reset if successful or failed.""" pass def reset_idx(self, env_ids): """Reset specified environments.""" self._reset_franka(env_ids) self._reset_object(env_ids) self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _reset_franka(self, env_ids): """Reset DOF states and DOF targets of Franka.""" # shape of dof_pos = (num_envs, num_dofs) # shape of dof_vel = (num_envs, num_dofs) # Initialize Franka to middle of joint limits, plus joint noise franka_dof_props = self.gym.get_actor_dof_properties(self.env_ptrs[0], self.franka_handles[0]) # same across all envs lower_lims = franka_dof_props['lower'] upper_lims = franka_dof_props['upper'] self.dof_pos[:, 0:self.franka_num_dofs] = torch.tensor((lower_lims + upper_lims) * 0.5, device=self.device) \ + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.joint_noise * math.pi / 180 self.dof_vel[env_ids, 0:self.franka_num_dofs] = 0.0 franka_actor_ids_sim_int32 = self.franka_actor_ids_sim.to(dtype=torch.int32, device=self.device)[env_ids] self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(franka_actor_ids_sim_int32), len(franka_actor_ids_sim_int32)) self.ctrl_target_dof_pos[env_ids, 0:self.franka_num_dofs] = self.dof_pos[env_ids, 0:self.franka_num_dofs] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.ctrl_target_dof_pos)) def _reset_object(self, env_ids): """Reset root state of gears.""" # shape of root_pos = (num_envs, num_actors, 3) # shape of root_quat = (num_envs, num_actors, 4) # shape of root_linvel = (num_envs, num_actors, 3) # shape of root_angvel = (num_envs, num_actors, 3) if self.cfg_task.randomize.initial_state == 'random': self.root_pos[env_ids, self.gear_small_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, - self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z) ), dim=1) self.root_pos[env_ids, self.gear_medium_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z) ), dim=1) self.root_pos[env_ids, self.gear_large_actor_id_env] = \ torch.cat(((torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, - self.cfg_task.randomize.gears_bias_y + (torch.rand((self.num_envs, 1), device=self.device) * 2.0 - 1.0) * self.cfg_task.randomize.gears_noise_xy, torch.ones((self.num_envs, 1), device=self.device) * (self.cfg_base.env.table_height + self.cfg_task.randomize.gears_bias_z)), dim=1) elif self.cfg_task.randomize.initial_state == 'goal': self.root_pos[env_ids, self.gear_small_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_pos[env_ids, self.gear_medium_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_pos[env_ids, self.gear_large_actor_id_env] = torch.tensor( [0.0, 0.0, self.cfg_base.env.table_height], device=self.device) self.root_linvel[env_ids, self.gear_small_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_small_actor_id_env] = 0.0 self.root_linvel[env_ids, self.gear_medium_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_medium_actor_id_env] = 0.0 self.root_linvel[env_ids, self.gear_large_actor_id_env] = 0.0 self.root_angvel[env_ids, self.gear_large_actor_id_env] = 0.0 gear_small_actor_ids_sim_int32 = self.gear_small_actor_ids_sim.to(dtype=torch.int32, device=self.device) gear_medium_actor_ids_sim_int32 = self.gear_medium_actor_ids_sim.to(dtype=torch.int32, device=self.device) gear_large_actor_ids_sim_int32 = self.gear_large_actor_ids_sim.to(dtype=torch.int32, device=self.device) gears_actor_ids_sim_int32 = torch.cat((gear_small_actor_ids_sim_int32[env_ids], gear_medium_actor_ids_sim_int32[env_ids], gear_large_actor_ids_sim_int32[env_ids])) self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(gears_actor_ids_sim_int32), len(gear_small_actor_ids_sim_int32[env_ids]) + len(gear_medium_actor_ids_sim_int32[env_ids]) + len(gear_large_actor_ids_sim_int32[env_ids]) ) def _reset_buffers(self, env_ids): """Reset buffers. """ self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def _set_viewer_params(self): """Set viewer parameters.""" cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0) cam_target = gymapi.Vec3(0.0, 0.0, 0.5) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
11,642
Python
50.290749
174
0.624549
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/generate_cuboids.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from os.path import join from typing import Callable, List from jinja2 import Environment, FileSystemLoader, select_autoescape FilterFunc = Callable[[List[int]], bool] def generate_assets( scales, min_volume, max_volume, generated_assets_dir, base_mesh, base_cube_size_m, filter_funcs: List[FilterFunc] ): template_dir = join(os.path.dirname(os.path.abspath(__file__)), "../../../assets/asset_templates") print(f"Assets template dir: {template_dir}") env = Environment( loader=FileSystemLoader(template_dir), autoescape=select_autoescape(), ) template = env.get_template("cube_multicolor_allegro.urdf.template") # <-- pass as function parameter? idx = 0 for x_scale in scales: for y_scale in scales: for z_scale in scales: volume = x_scale * y_scale * z_scale / (100 * 100 * 100) if volume > max_volume: continue if volume < min_volume: continue curr_scales = [x_scale, y_scale, z_scale] curr_scales.sort() filtered = False for filter_func in filter_funcs: if filter_func(curr_scales): filtered = True if filtered: continue asset = template.render( base_mesh=base_mesh, x_scale=base_cube_size_m * (x_scale / 100), y_scale=base_cube_size_m * (y_scale / 100), z_scale=base_cube_size_m * (z_scale / 100), ) fname = f"{idx:03d}_cube_{x_scale}_{y_scale}_{z_scale}.urdf" idx += 1 with open(join(generated_assets_dir, fname), "w") as fobj: fobj.write(asset) def filter_thin_plates(scales: List[int]) -> bool: """ Skip cuboids where one dimension is much smaller than the other two - these are very hard to grasp. We return true if object needs to be skipped. """ scales = sorted(scales) return scales[0] * 3 <= scales[1] def generate_default_cube(assets_dir, base_mesh, base_cube_size_m): scales = [100] min_volume = max_volume = 1.0 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, []) def generate_small_cuboids(assets_dir, base_mesh, base_cube_size_m): scales = [100, 50, 66, 75, 90, 110, 125, 150, 175, 200, 250, 300] min_volume = 1.0 max_volume = 2.5 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, []) def generate_big_cuboids(assets_dir, base_mesh, base_cube_size_m): scales = [100, 125, 150, 200, 250, 300, 350] min_volume = 2.5 max_volume = 15.0 generate_assets(scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates]) def filter_non_elongated(scales: List[int]) -> bool: """ Skip cuboids that are not elongated. One dimension should be significantly larger than the other two. We return true if object needs to be skipped. """ scales = sorted(scales) return scales[2] <= scales[0] * 3 or scales[2] <= scales[1] * 3 def generate_sticks(assets_dir, base_mesh, base_cube_size_m): scales = [100, 50, 75, 200, 300, 400, 500, 600] min_volume = 2.5 max_volume = 6.0 generate_assets( scales, min_volume, max_volume, assets_dir, base_mesh, base_cube_size_m, [filter_thin_plates, filter_non_elongated], )
5,157
Python
37.492537
117
0.645143
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_regrasping.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import List, Tuple import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaTwoArmsRegrasping(AllegroKukaTwoArmsBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_asset = None super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): """Regrasping task uses only a single object keypoint since we do not care about object orientation.""" return [[0, 0, 0]] def _load_additional_assets(self, object_asset_root, arm_y_offset: float): goal_asset_options = gymapi.AssetOptions() goal_asset_options.disable_gravity = True self.goal_asset = self.gym.load_asset( self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options ) goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset) goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset) return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): goal_start_pose = gymapi.Transform() goal_asset = self.goal_asset goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) self.gym.set_actor_scale(env_ptr, goal_handle, 0.5) self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: # sample random target location in some volume target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size # let the target be close to 1st or 2nd arm, randomly left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_ofs = 0.75 x_pos = torch.where( left_right_random > 0, x_ofs * torch.ones_like(left_right_random), -x_ofs * torch.ones_like(left_right_random), ) target_coords[:, 0] += x_pos.squeeze(dim=1) self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # we also reset the object to its initial position self.reset_object_pose(env_ids) # since we put the object back on the table, also reset the lifting reward self.lifted_object[env_ids] = False self.deferred_set_actor_root_state_tensor_indexed( [self.object_indices[env_ids], self.goal_object_indices[env_ids]] ) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: rew_buf, is_success = super().compute_kuka_reward() return rew_buf, is_success def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, )
6,376
Python
45.889706
120
0.692597
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import os import tempfile from copy import copy from os.path import join from typing import List, Tuple from isaacgym import gymapi, gymtorch, gymutil from torch import Tensor from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties from isaacgymenvs.tasks.base.vec_task import VecTask from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import ( generate_big_cuboids, generate_default_cube, generate_small_cuboids, generate_sticks, ) from isaacgymenvs.utils.torch_jit_utils import * class AllegroKukaTwoArmsBase(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.frame_since_restart: int = 0 # number of control steps since last restart across all actors self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"] self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"] self.num_arms = self.cfg["env"]["numArms"] assert self.num_arms == 2, f"Only two arms supported, got {self.num_arms}" self.arm_x_ofs = self.cfg["env"]["armXOfs"] self.arm_y_ofs = self.cfg["env"]["armYOfs"] # 4 joints for index, middle, ring, and thumb and 7 for kuka arm self.num_arm_dofs = 7 self.num_finger_dofs = 4 self.num_allegro_fingertips = 4 self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs self.num_allegro_kuka_actions = self.num_hand_arm_dofs * self.num_arms self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"] self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"] self.lifting_bonus = self.cfg["env"]["liftingBonus"] self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"] self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"] # not used in 2-arm task for now # to fix: add to config # self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"] # self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"] self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg) self.initial_tolerance = self.cfg["env"]["successTolerance"] self.success_tolerance = self.initial_tolerance self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"] self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"] self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"] self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"] self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"] self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) # currently not used in 2-hand env # self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.success_steps: int = self.cfg["env"]["successSteps"] # 1.0 means keypoints correspond to the corners of the object # larger values help the agent to prioritize rotation matching self.keypoint_scale = self.cfg["env"]["keypointScale"] # size of the object (i.e. cube) before scaling self.object_base_size = self.cfg["env"]["objectBaseSize"] # whether to sample random object dimensions self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"] self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"] self.with_big_cuboids = self.cfg["env"]["withBigCuboids"] self.with_sticks = self.cfg["env"]["withSticks"] if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # 0.05m box "table": "urdf/table_wide.urdf", "bucket": "urdf/objects/bucket.urdf", "lightbulb": "lightbulb/A60_E27_SI.urdf", "socket": "E27SocketSimple.urdf", "ball": "urdf/objects/ball.urdf", } self.keypoints_offsets = self._object_keypoint_offsets() self.num_keypoints = len(self.keypoints_offsets) self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] self.fingertip_offsets = np.array( [[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32 ) palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32) self.num_fingertips = len(self.allegro_fingertips) # can be only "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_state"]): raise Exception("Unknown type of observations!") print("Obs type:", self.obs_type) num_dof_pos = num_dof_vel = self.num_hand_arm_dofs * self.num_arms palm_pos_size = 3 * self.num_arms palm_rot_vel_angvel_size = 10 * self.num_arms obj_rot_vel_angvel_size = 10 fingertip_rel_pos_size = 3 * self.num_fingertips * self.num_arms keypoints_rel_palm_size = self.num_keypoints * 3 * self.num_arms keypoints_rel_goal_size = self.num_keypoints * 3 object_scales_size = 3 max_keypoint_dist_size = 1 lifted_object_flag_size = 1 progress_obs_size = 1 + 1 # commented out for now - not used in 2-hand env # closest_fingertip_distance_size = self.num_fingertips * self.num_arms reward_obs_size = 1 self.full_state_size = ( num_dof_pos + num_dof_vel + palm_pos_size + palm_rot_vel_angvel_size + obj_rot_vel_angvel_size + fingertip_rel_pos_size + keypoints_rel_palm_size + keypoints_rel_goal_size + object_scales_size + max_keypoint_dist_size + lifted_object_flag_size + progress_obs_size + reward_obs_size ) num_states = self.full_state_size self.num_obs_dict = { "full_state": self.full_state_size, } self.up_axis = "z" self.fingertip_obs = True self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions self.cfg["device_type"] = sim_device.split(":")[0] self.cfg["device_id"] = int(sim_device.split(":")[1]) self.cfg["headless"] = headless super().__init__( config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if self.viewer is not None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # volume to sample target position from target_volume_origin = np.array([0, 0.0, 0.8], dtype=np.float32) target_volume_extent = np.array([[-0.2, 0.2], [-0.5, 0.5], [-0.12, 0.25]], dtype=np.float32) self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float() self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float() # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.hand_arm_default_dof_pos = torch.zeros( [self.num_arms, self.num_hand_arm_dofs], dtype=torch.float, device=self.device ) desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[0, :7] = desired_kuka_pos desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.6, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[1, :7] = desired_kuka_pos self.pos_noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device) self.pos_noise_coeff[:, 0:7] = self.reset_dof_pos_noise_arm self.pos_noise_coeff[:, 7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers self.pos_noise_coeff = self.pos_noise_coeff.flatten() self.hand_arm_default_dof_pos = self.hand_arm_default_dof_pos.flatten() self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs * self.num_arms] # this will have dimensions [num_envs, num_arms * num_hand_arm_dofs] self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0] self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.palm_center_offset = torch.from_numpy(palm_offset).to(self.device).repeat((self.num_envs, 1)) self.palm_center_pos = torch.zeros((self.num_envs, self.num_arms, 3), dtype=torch.float, device=self.device) self.fingertip_offsets = torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1)) self.set_actor_root_state_object_indices: List[Tensor] = [] self.prev_targets = torch.zeros( (self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device ) self.cur_targets = torch.zeros( (self.num_envs, self.num_arms * self.num_hand_arm_dofs), dtype=torch.float, device=self.device ) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view( self.num_envs, -1 ) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_successes = torch.zeros_like(self.successes) # true objective value for the whole episode, plus saving values for the previous episode self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_true_objective = torch.zeros_like(self.true_objective) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]) ) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.obj_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) self.goal_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) # how many steps we were within the goal tolerance self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device) self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.closest_fingertip_dist = -torch.ones( [self.num_envs, self.num_arms, self.num_fingertips], dtype=torch.float, device=self.device ) reward_keys = [ "raw_fingertip_delta_rew", "raw_lifting_rew", "raw_keypoint_rew", "fingertip_delta_rew", "lifting_rew", "lift_bonus_rew", "keypoint_rew", "bonus_rew", ] self.rewards_episode = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys } self.last_curriculum_update = 0 self.episode_root_state_tensors = [[] for _ in range(self.num_envs)] self.episode_dof_states = [[] for _ in range(self.num_envs)] self.eval_stats: bool = self.cfg["env"]["evalStats"] if self.eval_stats: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros( self.max_consecutive_successes + 1, dtype=torch.float, device=self.device ) from tensorboardX import SummaryWriter self.eval_summary_dir = "./eval_summaries" # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) # AllegroKukaBase abstract interface - to be overriden in derived classes def _object_keypoint_offsets(self): raise NotImplementedError() def _object_start_pose(self, arms_y_ofs: float, table_pose_dy: float, table_pose_dz: float): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0.0 pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25 object_start_pose.p.y = arms_y_ofs + pose_dy object_start_pose.p.z = pose_dz return object_start_pose def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir): object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir) if not self.randomize_object_dimensions: object_asset_files = object_asset_files[:1] object_asset_scales = object_asset_scales[:1] # randomize order files_and_scales = list(zip(object_asset_files, object_asset_scales)) # use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same rng = np.random.default_rng(42) rng.shuffle(files_and_scales) object_asset_files, object_asset_scales = zip(*files_and_scales) return object_asset_files, object_asset_scales def _load_main_object_asset(self): """Load manipulated object and goal assets.""" object_asset_options = gymapi.AssetOptions() object_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) object_assets.append(object_asset_) object_rb_count = self.gym.get_asset_rigid_body_count( object_assets[0] ) # assuming all of them have the same rb count object_shapes_count = self.gym.get_asset_rigid_shape_count( object_assets[0] ) # assuming all of them have the same rb count return object_assets, object_rb_count, object_shapes_count def _load_additional_assets(self, object_asset_root, arm_y_offset: float) -> Tuple[int, int]: """ returns: tuple (num_rigid_bodies, num_shapes) """ return 0, 0 def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): pass def _after_envs_created(self): pass def _extra_reset_rules(self, resets): return resets def _reset_target(self, env_ids: Tensor) -> None: raise NotImplementedError() def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [] def _extra_curriculum(self): pass # AllegroKukaBase implementation def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return dict( success_tolerance=self.success_tolerance, ) def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f"Loaded env state value {key}:{value}") print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}") # noinspection PyMethodOverriding def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir): files = [] scales = [] try: filenames = os.listdir(generated_assets_dir) for fname in filenames: if fname.endswith(".urdf"): os.remove(join(generated_assets_dir, fname)) except Exception as exc: print(f"Exception {exc} while removing older procedurally-generated urdf assets") objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type]) objects_dir = join(object_assets_root, objects_rel_path) base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj") generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size) if self.with_small_cuboids: generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_big_cuboids: generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_sticks: generate_sticks(generated_assets_dir, base_mesh, self.object_base_size) filenames = os.listdir(generated_assets_dir) filenames = sorted(filenames) for fname in filenames: if fname.endswith(".urdf"): scale_tokens = os.path.splitext(fname)[0].split("_")[2:] files.append(join(generated_assets_dir, fname)) scales.append([float(scale_token) / 100 for scale_token in scale_tokens]) return files, scales def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets") object_asset_root = asset_root tmp_assets_dir = tempfile.TemporaryDirectory() self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales( object_asset_root, tmp_assets_dir.name ) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}") allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options) print(f"Loaded asset {allegro_kuka_asset}") num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset) num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset) num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset) assert ( self.num_hand_arm_dofs == num_hand_arm_dofs ), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected" max_agg_bodies = all_arms_bodies = num_hand_arm_bodies * self.num_arms max_agg_shapes = all_arms_shapes = num_hand_arm_shapes * self.num_arms allegro_rigid_body_names = [ self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(num_hand_arm_bodies) ] print(f"Allegro num rigid bodies: {num_hand_arm_bodies}") print(f"Allegro rigid bodies: {allegro_rigid_body_names}") # allegro_actuated_dof_names = [self.gym.get_asset_actuator_joint_name(allegro_asset, i) for i in range(self.num_allegro_dofs)] # self.allegro_actuated_dof_indices = [self.gym.find_asset_dof_index(allegro_asset, name) for name in allegro_actuated_dof_names] hand_arm_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset) arm_hand_dof_lower_limits = [] arm_hand_dof_upper_limits = [] for arm_idx in range(self.num_arms): for i in range(self.num_hand_arm_dofs): arm_hand_dof_lower_limits.append(hand_arm_dof_props["lower"][i]) arm_hand_dof_upper_limits.append(hand_arm_dof_props["upper"][i]) # self.allegro_actuated_dof_indices = to_torch(self.allegro_actuated_dof_indices, dtype=torch.long, device=self.device) self.arm_hand_dof_lower_limits = to_torch(arm_hand_dof_lower_limits, device=self.device) self.arm_hand_dof_upper_limits = to_torch(arm_hand_dof_upper_limits, device=self.device) arm_poses = [gymapi.Transform() for _ in range(self.num_arms)] arm_x_ofs, arm_y_ofs = self.arm_x_ofs, self.arm_y_ofs for arm_idx, arm_pose in enumerate(arm_poses): x_ofs = arm_x_ofs * (-1 if arm_idx == 0 else 1) arm_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(x_ofs, arm_y_ofs, 0) # arm_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) if arm_idx == 0: # rotate 1st arm 90 degrees to the left arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), math.pi / 2) else: # rotate 2nd arm 90 degrees to the right arm_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), -math.pi / 2) object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset() max_agg_bodies += object_rb_count max_agg_shapes += object_shapes_count # load auxiliary objects table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = False table_asset_options.fix_base_link = True table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options) table_pose = gymapi.Transform() table_pose.p = gymapi.Vec3() table_pose.p.x = 0.0 # table_pose_dy, table_pose_dz = -0.8, 0.38 table_pose_dy, table_pose_dz = 0.0, 0.38 table_pose.p.y = arm_y_ofs + table_pose_dy table_pose.p.z = table_pose_dz table_rb_count = self.gym.get_asset_rigid_body_count(table_asset) table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset) max_agg_bodies += table_rb_count max_agg_shapes += table_shapes_count additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, arm_y_ofs) max_agg_bodies += additional_rb max_agg_shapes += additional_shapes # set up object and goal positions self.object_start_pose = self._object_start_pose(arm_y_ofs, table_pose_dy, table_pose_dz) self.envs = [] object_init_state = [] object_scales = [] object_keypoint_offsets = [] allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7") fingertip_handles = [ self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips ] self.allegro_palm_handles = [] self.allegro_fingertip_handles = [] for arm_idx in range(self.num_arms): self.allegro_palm_handles.append(allegro_palm_handle + arm_idx * num_hand_arm_bodies) self.allegro_fingertip_handles.extend([h + arm_idx * num_hand_arm_bodies for h in fingertip_handles]) # does this rely on the fact that objects are added right after the arms in terms of create_actor()? self.object_rb_handles = list(range(all_arms_bodies, all_arms_bodies + object_rb_count)) self.arm_indices = torch.empty([self.num_envs, self.num_arms], dtype=torch.long, device=self.device) self.object_indices = torch.empty(self.num_envs, dtype=torch.long, device=self.device) assert self.num_envs >= 1 for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) # add arms for arm_idx in range(self.num_arms): arm = self.gym.create_actor(env_ptr, allegro_kuka_asset, arm_poses[arm_idx], f"arm{arm_idx}", i, -1, 0) populate_dof_properties(hand_arm_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs) self.gym.set_actor_dof_properties(env_ptr, arm, hand_arm_dof_props) allegro_hand_idx = self.gym.get_actor_index(env_ptr, arm, gymapi.DOMAIN_SIM) self.arm_indices[i, arm_idx] = allegro_hand_idx # add object object_asset_idx = i % len(object_assets) object_asset = object_assets[object_asset_idx] obj_pose = self.object_start_pose object_handle = self.gym.create_actor(env_ptr, object_asset, obj_pose, "object", i, 0, 0) pos, rot = obj_pose.p, obj_pose.r object_init_state.append([pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices[i] = object_idx object_scale = self.object_asset_scales[object_asset_idx] object_scales.append(object_scale) object_offsets = [] for keypoint in self.keypoints_offsets: keypoint = copy(keypoint) for coord_idx in range(3): keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2 object_offsets.append(keypoint) object_keypoint_offsets.append(object_offsets) # table object table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0) _table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # task-specific objects (i.e. goal object for reorientation task) self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx) self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big # noinspection PyUnboundLocalVariable object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13 ) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device) self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device) self._after_envs_created() try: # by this point we don't need the temporary folder for procedurally generated assets tmp_assets_dir.cleanup() except Exception: pass def _distance_delta_rewards(self, lifted_object: Tensor) -> Tensor: """Rewards for fingertips approaching the object or penalty for hand getting further away from the object.""" # this is positive if we got closer, negative if we're further away than the closest we've gotten fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances # update the values if finger tips got closer to the object self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances) # clip between zero and +inf to turn deltas into rewards fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10) fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1) fingertip_delta_rew = torch.sum(fingertip_delta_rew, dim=-1) # sum over all arms # vvvv this is commented out for 2 arms: we want the 2nd arm to be relatively close at all times # add this reward only before the object is lifted off the table # after this, we should be guided only by keypoint and bonus rewards # fingertip_delta_rew *= ~lifted_object return fingertip_delta_rew def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]: """Reward for lifting the object off the table.""" z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2] lifting_rew = torch.clip(z_lift, 0, 0.5) # this flag tells us if we lifted an object above a certain height compared to the initial position lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object # Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward # to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold. # This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor # (i.e. the effective future horizon for the agent) # For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps) # a value of 300 for the bonus reward seems reasonable just_lifted_above_threshold = lifted_object & ~self.lifted_object lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold # stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the # keypoint reward lifting_rew *= ~lifted_object # update the flag that describes whether we lifted an object above the table or not self.lifted_object = lifted_object return lifting_rew, lift_bonus_rew, lifted_object def _keypoint_reward(self, lifted_object: Tensor) -> Tensor: # this is positive if we got closer, negative if we're further away max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist # update the values if we got closer to the target self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist) # clip between zero and +inf to turn deltas into rewards max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100) # administer reward only when we already lifted an object from the table # to prevent the situation where the agent just rolls it around the table keypoint_rew = max_keypoint_deltas * lifted_object return keypoint_rew def _compute_resets(self, is_success): resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall if self.max_consecutive_successes > 0: # Reset progress buffer if max_consecutive_successes > 0 self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf) resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets) resets = self._extra_reset_rules(resets) return resets def _true_objective(self): raise NotImplementedError() def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward() fingertip_delta_rew = self._distance_delta_rewards(lifted_object) keypoint_rew = self._keypoint_reward(lifted_object) keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale # noinspection PyTypeChecker near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance self.near_goal_steps += near_goal is_success = self.near_goal_steps >= self.success_steps goal_resets = is_success self.successes += is_success self.reset_goal_buf[:] = goal_resets self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew self.rewards_episode["raw_lifting_rew"] += lifting_rew self.rewards_episode["raw_keypoint_rew"] += keypoint_rew fingertip_delta_rew *= self.distance_delta_rew_scale lifting_rew *= self.lifting_rew_scale keypoint_rew *= self.keypoint_rew_scale # Success bonus: orientation is within `success_tolerance` of goal orientation # We spread out the reward over "success_steps" bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps) reward = fingertip_delta_rew + lifting_rew + lift_bonus_rew + keypoint_rew + bonus_rew self.rew_buf[:] = reward resets = self._compute_resets(is_success) self.reset_buf[:] = resets self.extras["successes"] = self.prev_episode_successes.mean() self.true_objective = self._true_objective() self.extras["true_objective"] = self.true_objective # scalars for logging self.extras["true_objective_mean"] = self.true_objective.mean() self.extras["true_objective_min"] = self.true_objective.min() self.extras["true_objective_max"] = self.true_objective.max() rewards = [ (fingertip_delta_rew, "fingertip_delta_rew"), (lifting_rew, "lifting_rew"), (lift_bonus_rew, "lift_bonus_rew"), (keypoint_rew, "keypoint_rew"), (bonus_rew, "bonus_rew"), ] episode_cumulative = dict() for rew_value, rew_name in rewards: self.rewards_episode[rew_name] += rew_value episode_cumulative[rew_name] = rew_value self.extras["rewards_episode"] = self.rewards_episode self.extras["episode_cumulative"] = episode_cumulative return self.rew_buf, is_success def _eval_stats(self, is_success: Tensor) -> None: if self.eval_stats: frame: int = self.frame_since_restart n_frames = torch.empty_like(self.last_success_step).fill_(frame) self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, n_frames, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan self.total_resets = self.total_resets + self.reset_buf.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() self.total_num_resets += self.reset_buf reset_ids = self.reset_buf.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}") self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame) self.eval_summaries.add_scalar( "last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame ) self.eval_summaries.add_scalar( "reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame ) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar( "policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame ) self.eval_summaries.add_scalar( "policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame ) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_observations(self) -> Tuple[Tensor, int]: self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.object_state = self.root_state_tensor[self.object_indices, 0:13] self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self._palm_state = self.rigid_body_states[:, self.allegro_palm_handles] palm_pos = self._palm_state[..., 0:3] # [num_envs, num_arms, 3] self._palm_rot = self._palm_state[..., 3:7] # [num_envs, num_arms, 4] for arm_idx in range(self.num_arms): self.palm_center_pos[:, arm_idx] = palm_pos[:, arm_idx] + quat_rotate( self._palm_rot[:, arm_idx], self.palm_center_offset ) self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13] self.fingertip_pos = self.fingertip_state[:, :, 0:3] self.fingertip_rot = self.fingertip_state[:, :, 3:7] if hasattr(self, "fingertip_pos_rel_object"): self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object else: self.fingertip_pos_rel_object_prev = None self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device) for arm_idx in range(self.num_arms): for i in range(self.num_fingertips): finger_idx = arm_idx * self.num_fingertips + i self.fingertip_pos_offset[:, finger_idx] = self.fingertip_pos[:, finger_idx] + quat_rotate( self.fingertip_rot[:, finger_idx], self.fingertip_offsets[:, i] ) obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_arms * self.num_fingertips, 1) self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat self.curr_fingertip_distances = torch.norm( self.fingertip_pos_rel_object.view(self.num_envs, self.num_arms, self.num_fingertips, -1), dim=-1 ) # when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode self.closest_fingertip_dist = torch.where( self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist ) palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat( 1, 1, self.num_fingertips, 1 ) # [num_envs, num_arms, num_fingertips, 3] == [num_envs, 2, 4, 3] self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat.view( self.num_envs, self.num_arms * self.num_fingertips, 3 ) # [num_envs, num_arms * num_fingertips, 3] == [num_envs, 8, 3] if self.fingertip_pos_rel_object_prev is None: self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone() for i in range(self.num_keypoints): self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate( self.object_rot, self.object_keypoint_offsets[:, i] ) self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate( self.goal_rot, self.object_keypoint_offsets[:, i] ) self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos palm_center_repeat = self.palm_center_pos.unsqueeze(2).repeat(1, 1, self.num_keypoints, 1) obj_kp_pos_repeat = self.obj_keypoint_pos.unsqueeze(1).repeat(1, self.num_arms, 1, 1) self.keypoints_rel_palm = obj_kp_pos_repeat - palm_center_repeat self.keypoints_rel_palm = self.keypoints_rel_palm.view(self.num_envs, self.num_arms * self.num_keypoints, 3) # self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat.view( # self.num_envs, self.num_arms * self.num_keypoints, 3 # ) self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1) # furthest keypoint from the goal self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values # this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all) # make sure we initialize this value before using it for obs or rewards self.closest_keypoint_max_dist = torch.where( self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist ) if self.obs_type == "full_state": full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf) assert ( full_state_size == self.full_state_size ), f"Expected full state size {self.full_state_size}, actual: {full_state_size}" return self.obs_buf, reward_obs_ofs else: raise ValueError("Unkown observations type!") def compute_full_state(self, buf: Tensor) -> Tuple[int, int]: num_dofs = self.num_hand_arm_dofs * self.num_arms ofs: int = 0 # dof positions buf[:, ofs : ofs + num_dofs] = unscale( self.arm_hand_dof_pos[:, :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) ofs += num_dofs # dof velocities buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs] ofs += num_dofs # palm pos num_palm_coords = 3 * self.num_arms buf[:, ofs : ofs + num_palm_coords] = self.palm_center_pos.view(self.num_envs, num_palm_coords) ofs += num_palm_coords # palm rot, linvel, ang vel num_palm_rot_vel_angvel = 10 * self.num_arms buf[:, ofs : ofs + num_palm_rot_vel_angvel] = self._palm_state[..., 3:13].reshape( self.num_envs, num_palm_rot_vel_angvel ) ofs += num_palm_rot_vel_angvel # object rot, linvel, ang vel buf[:, ofs : ofs + 10] = self.object_state[:, 3:13] ofs += 10 # fingertip pos relative to the palm of the hand fingertip_rel_pos_size = 3 * self.num_arms * self.num_fingertips buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape( self.num_envs, fingertip_rel_pos_size ) ofs += fingertip_rel_pos_size # keypoint distances relative to the palm of the hand keypoint_rel_palm_size = 3 * self.num_arms * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_palm_size] = self.keypoints_rel_palm.reshape( self.num_envs, keypoint_rel_palm_size ) ofs += keypoint_rel_palm_size # keypoint distances relative to the goal keypoint_rel_pos_size = 3 * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # object scales buf[:, ofs : ofs + 3] = self.object_scales ofs += 3 # closest distance to the furthest of all keypoints achieved so far in this episode buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1) # print(f"closest_keypoint_max_dist: {self.closest_keypoint_max_dist[0]}") ofs += 1 # commented out for 2-hand version to minimize the number of observations # closest distance between a fingertip and an object achieved since last target reset # this should help the critic predict the anticipated fingertip reward # buf[:, ofs : ofs + self.num_fingertips] = self.closest_fingertip_dist # print(f"closest_fingertip_dist: {self.closest_fingertip_dist[0]}") # ofs += self.num_fingertips # indicates whether we already lifted the object from the table or not, should help the critic be more accurate buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1) # print(f"Lifted object: {self.lifted_object[0]}") ofs += 1 # this should help the critic predict the future rewards better and anticipate the episode termination buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1) ofs += 1 buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1) ofs += 1 # actions # buf[:, ofs : ofs + self.num_actions] = self.actions # ofs += self.num_actions # state_str = [f"{state.item():.3f}" for state in buf[0, : self.full_state_size]] # print(' '.join(state_str)) # this is where we will add the reward observation reward_obs_ofs = ofs ofs += 1 assert ofs == self.full_state_size return ofs, reward_obs_ofs def clamp_obs(self, obs_buf: Tensor) -> None: if self.clamp_abs_observations > 0: obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations) def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids: Tensor) -> None: self._reset_target(env_ids) self.reset_goal_buf[env_ids] = 0 self.near_goal_steps[env_ids] = 0 self.closest_keypoint_max_dist[env_ids] = -1 def reset_object_pose(self, env_ids): obj_indices = self.object_indices[env_ids] # reset object table_width = 1.1 obj_x_ofs = table_width / 2 - 0.2 left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_pos = torch.where( left_right_random > 0, obj_x_ofs * torch.ones_like(left_right_random), -obj_x_ofs * torch.ones_like(left_right_random), ) rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device) self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone() # indices 0..2 correspond to the object position self.root_state_tensor[obj_indices, 0:1] = x_pos + self.reset_position_noise_x * rand_pos_floats[:, 0:1] self.root_state_tensor[obj_indices, 1:2] = ( self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2] ) self.root_state_tensor[obj_indices, 2:3] = ( self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3] ) new_object_rot = self.get_random_quat(env_ids) # indices 3,4,5,6 correspond to the rotation quaternion self.root_state_tensor[obj_indices, 3:7] = new_object_rot self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13]) # since we reset the object, we also should update distances between fingers and the object self.closest_fingertip_dist[env_ids] = -1 def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None: self.set_actor_root_state_object_indices.extend(obj_indices) def set_actor_root_state_tensor_indexed(self) -> None: object_indices: List[Tensor] = self.set_actor_root_state_object_indices if not object_indices: # nothing to set return unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(unique_object_indices), len(unique_object_indices), ) self.set_actor_root_state_object_indices = [] def reset_idx(self, env_ids: Tensor) -> None: # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.reset_object_pose(env_ids) # flattened list of arm actors that we need to reset arm_indices = self.arm_indices[env_ids].to(torch.int32).flatten() # reset random force probabilities self.random_force_prob[env_ids] = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]) ) # reset allegro hand delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos rand_dof_floats = torch_rand_float( 0.0, 1.0, (len(env_ids), self.num_arms * self.num_hand_arm_dofs), device=self.device ) rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats allegro_pos = self.hand_arm_default_dof_pos + self.pos_noise_coeff * rand_delta self.arm_hand_dof_pos[env_ids, ...] = allegro_pos self.prev_targets[env_ids, ...] = allegro_pos self.cur_targets[env_ids, ...] = allegro_pos rand_vel_floats = torch_rand_float( -1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs * self.num_arms), device=self.device ) self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats arm_indices_gym = gymtorch.unwrap_tensor(arm_indices) num_arm_indices: int = len(arm_indices) self.gym.set_dof_position_target_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.prev_targets), arm_indices_gym, num_arm_indices ) self.gym.set_dof_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_state), arm_indices_gym, num_arm_indices ) object_indices = [self.object_indices[env_ids]] object_indices.extend(self._extra_object_indices(env_ids)) self.deferred_set_actor_root_state_tensor_indexed(object_indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.prev_episode_successes[env_ids] = self.successes[env_ids] self.successes[env_ids] = 0 self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids] self.true_objective[env_ids] = 0 self.lifted_object[env_ids] = False # -1 here indicates that the value is not initialized self.closest_keypoint_max_dist[env_ids] = -1 self.closest_fingertip_dist[env_ids] = -1 self.near_goal_steps[env_ids] = 0 for key in self.rewards_episode.keys(): # print(f"{env_ids}: {key}: {self.rewards_episode[key][env_ids]}") self.rewards_episode[key][env_ids] = 0 self.extras["scalars"] = dict() self.extras["scalars"]["success_tolerance"] = self.success_tolerance def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) self.reset_target_pose(reset_goal_env_ids) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.set_actor_root_state_tensor_indexed() if self.use_relative_control: raise NotImplementedError("Use relative control False for now") else: # TODO: this uses simplified finger control compared to the original code of 1-hand env num_dofs: int = self.num_hand_arm_dofs * self.num_arms # target position control for the hand DOFs self.cur_targets[..., :num_dofs] = scale( actions[..., :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) self.cur_targets[..., :num_dofs] = ( self.act_moving_average * self.cur_targets[..., :num_dofs] + (1.0 - self.act_moving_average) * self.prev_targets[..., :num_dofs] ) self.cur_targets[..., :num_dofs] = tensor_clamp( self.cur_targets[..., :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) self.prev_targets[...] = self.cur_targets[...] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = ( torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale ) self.gym.apply_rigid_body_force_tensors( self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE ) def post_physics_step(self): self.frame_since_restart += 1 self.progress_buf += 1 self.randomize_buf += 1 self._extra_curriculum() obs_buf, reward_obs_ofs = self.compute_observations() rewards, is_success = self.compute_kuka_reward() # add rewards to observations reward_obs_scale = 0.01 obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale self.clamp_obs(obs_buf) self._eval_stats(is_success) if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) axes_geom = gymutil.AxesGeometry(0.1) sphere_pose = gymapi.Transform() sphere_pose.r = gymapi.Quat(0, 0, 0, 1) sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0)) sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1)) palm_center_pos_cpu = self.palm_center_pos.cpu().numpy() palm_rot_cpu = self._palm_rot.cpu().numpy() for i in range(self.num_envs): palm_center_transform = gymapi.Transform() palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i]) palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i]) gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform) for j in range(self.num_fingertips): fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy() fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy() for i in range(self.num_envs): fingertip_transform = gymapi.Transform() fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i]) fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform) for j in range(self.num_keypoints): keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy() goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy() for i in range(self.num_envs): keypoint_transform = gymapi.Transform() keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform) goal_keypoint_transform = gymapi.Transform() goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform)
65,956
Python
45.579802
145
0.626099
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_base.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import io import math import os import random import tempfile from copy import copy from os.path import join from typing import List, Tuple from isaacgym import gymapi, gymtorch, gymutil from torch import Tensor from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import DofParameters, populate_dof_properties from isaacgymenvs.tasks.base.vec_task import VecTask from isaacgymenvs.tasks.allegro_kuka.generate_cuboids import ( generate_big_cuboids, generate_default_cube, generate_small_cuboids, generate_sticks, ) from isaacgymenvs.utils.torch_jit_utils import * class AllegroKukaBase(VecTask): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.cfg = cfg self.frame_since_restart: int = 0 # number of control steps since last restart across all actors self.hand_arm_asset_file: str = self.cfg["env"]["asset"]["kukaAllegro"] self.clamp_abs_observations: float = self.cfg["env"]["clampAbsObservations"] self.privileged_actions = self.cfg["env"]["privilegedActions"] self.privileged_actions_torque = self.cfg["env"]["privilegedActionsTorque"] # 4 joints for index, middle, ring, and thumb and 7 for kuka arm self.num_arm_dofs = 7 self.num_finger_dofs = 4 self.num_allegro_fingertips = 4 self.num_hand_dofs = self.num_finger_dofs * self.num_allegro_fingertips self.num_hand_arm_dofs = self.num_hand_dofs + self.num_arm_dofs self.num_allegro_kuka_actions = self.num_hand_arm_dofs if self.privileged_actions: self.num_allegro_kuka_actions += 3 self.randomize = self.cfg["task"]["randomize"] self.randomization_params = self.cfg["task"]["randomization_params"] self.distance_delta_rew_scale = self.cfg["env"]["distanceDeltaRewScale"] self.lifting_rew_scale = self.cfg["env"]["liftingRewScale"] self.lifting_bonus = self.cfg["env"]["liftingBonus"] self.lifting_bonus_threshold = self.cfg["env"]["liftingBonusThreshold"] self.keypoint_rew_scale = self.cfg["env"]["keypointRewScale"] self.kuka_actions_penalty_scale = self.cfg["env"]["kukaActionsPenaltyScale"] self.allegro_actions_penalty_scale = self.cfg["env"]["allegroActionsPenaltyScale"] self.dof_params: DofParameters = DofParameters.from_cfg(self.cfg) self.initial_tolerance = self.cfg["env"]["successTolerance"] self.success_tolerance = self.initial_tolerance self.target_tolerance = self.cfg["env"]["targetSuccessTolerance"] self.tolerance_curriculum_increment = self.cfg["env"]["toleranceCurriculumIncrement"] self.tolerance_curriculum_interval = self.cfg["env"]["toleranceCurriculumInterval"] self.save_states = self.cfg["env"]["saveStates"] self.save_states_filename = self.cfg["env"]["saveStatesFile"] self.should_load_initial_states = self.cfg["env"]["loadInitialStates"] self.load_states_filename = self.cfg["env"]["loadStatesFile"] self.initial_root_state_tensors = self.initial_dof_state_tensors = None self.initial_state_idx = self.num_initial_states = 0 self.reach_goal_bonus = self.cfg["env"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["fallDistance"] self.fall_penalty = self.cfg["env"]["fallPenalty"] self.reset_position_noise_x = self.cfg["env"]["resetPositionNoiseX"] self.reset_position_noise_y = self.cfg["env"]["resetPositionNoiseY"] self.reset_position_noise_z = self.cfg["env"]["resetPositionNoiseZ"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise_fingers = self.cfg["env"]["resetDofPosRandomIntervalFingers"] self.reset_dof_pos_noise_arm = self.cfg["env"]["resetDofPosRandomIntervalArm"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.hand_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] self.use_relative_control = self.cfg["env"]["useRelativeControl"] self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.success_steps: int = self.cfg["env"]["successSteps"] # 1.0 means keypoints correspond to the corners of the object # larger values help the agent to prioritize rotation matching self.keypoint_scale = self.cfg["env"]["keypointScale"] # size of the object (i.e. cube) before scaling self.object_base_size = self.cfg["env"]["objectBaseSize"] # whether to sample random object dimensions self.randomize_object_dimensions = self.cfg["env"]["randomizeObjectDimensions"] self.with_small_cuboids = self.cfg["env"]["withSmallCuboids"] self.with_big_cuboids = self.cfg["env"]["withBigCuboids"] self.with_sticks = self.cfg["env"]["withSticks"] self.with_dof_force_sensors = False # create fingertip force-torque sensors self.with_fingertip_force_sensors = False if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (self.control_freq_inv * self.sim_params.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) self.object_type = self.cfg["env"]["objectType"] assert self.object_type in ["block"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", # 0.05m box "table": "urdf/table_narrow.urdf", "bucket": "urdf/objects/bucket.urdf", "lightbulb": "lightbulb/A60_E27_SI.urdf", "socket": "E27SocketSimple.urdf", "ball": "urdf/objects/ball.urdf", } self.keypoints_offsets = self._object_keypoint_offsets() self.num_keypoints = len(self.keypoints_offsets) self.allegro_fingertips = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] self.fingertip_offsets = np.array( [[0.05, 0.005, 0], [0.05, 0.005, 0], [0.05, 0.005, 0], [0.06, 0.005, 0]], dtype=np.float32 ) self.palm_offset = np.array([-0.00, -0.02, 0.16], dtype=np.float32) assert self.num_allegro_fingertips == len(self.allegro_fingertips) # can be only "full_state" self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_state"]): raise Exception("Unknown type of observations!") print("Obs type:", self.obs_type) num_dof_pos = self.num_hand_arm_dofs num_dof_vel = self.num_hand_arm_dofs num_dof_forces = self.num_hand_arm_dofs if self.with_dof_force_sensors else 0 palm_pos_size = 3 palm_rot_vel_angvel_size = 10 obj_rot_vel_angvel_size = 10 fingertip_rel_pos_size = 3 * self.num_allegro_fingertips keypoint_info_size = self.num_keypoints * 3 + self.num_keypoints * 3 object_scales_size = 3 max_keypoint_dist_size = 1 lifted_object_flag_size = 1 progress_obs_size = 1 + 1 closest_fingertip_distance_size = self.num_allegro_fingertips reward_obs_size = 1 self.full_state_size = ( num_dof_pos + num_dof_vel + num_dof_forces + palm_pos_size + palm_rot_vel_angvel_size + obj_rot_vel_angvel_size + fingertip_rel_pos_size + keypoint_info_size + object_scales_size + max_keypoint_dist_size + lifted_object_flag_size + progress_obs_size + closest_fingertip_distance_size + reward_obs_size # + self.num_allegro_actions ) num_states = self.full_state_size self.num_obs_dict = { "full_state": self.full_state_size, } self.up_axis = "z" self.fingertip_obs = True self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = self.num_allegro_kuka_actions self.cfg["device_type"] = sim_device.split(":")[0] self.cfg["device_id"] = int(sim_device.split(":")[1]) self.cfg["headless"] = headless super().__init__( config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) if self.viewer is not None: cam_pos = gymapi.Vec3(10.0, 5.0, 1.0) cam_target = gymapi.Vec3(6.0, 5.0, 0.0) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) # volume to sample target position from target_volume_origin = np.array([0, 0.05, 0.8], dtype=np.float32) target_volume_extent = np.array([[-0.4, 0.4], [-0.05, 0.3], [-0.12, 0.25]], dtype=np.float32) self.target_volume_origin = torch.from_numpy(target_volume_origin).to(self.device).float() self.target_volume_extent = torch.from_numpy(target_volume_extent).to(self.device).float() # get gym GPU state tensors actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view( self.num_envs, self.num_allegro_fingertips * 6 ) if self.with_dof_force_sensors: dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view( self.num_envs, self.num_hand_arm_dofs ) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) # create some wrapper tensors for different slices self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.hand_arm_default_dof_pos = torch.zeros(self.num_hand_arm_dofs, dtype=torch.float, device=self.device) desired_kuka_pos = torch.tensor([-1.571, 1.571, -0.000, 1.376, -0.000, 1.485, 2.358]) # pose v1 # desired_kuka_pos = torch.tensor([-2.135, 0.843, 1.786, -0.903, -2.262, 1.301, -2.791]) # pose v2 self.hand_arm_default_dof_pos[:7] = desired_kuka_pos self.arm_hand_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, : self.num_hand_arm_dofs] self.arm_hand_dof_pos = self.arm_hand_dof_state[..., 0] self.arm_hand_dof_vel = self.arm_hand_dof_state[..., 1] self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) self.set_actor_root_state_object_indices: List[Tensor] = [] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view( self.num_envs, -1 ) self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_successes = torch.zeros_like(self.successes) # true objective value for the whole episode, plus saving values for the previous episode self.true_objective = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.prev_episode_true_objective = torch.zeros_like(self.true_objective) self.total_successes = 0 self.total_resets = 0 # object apply random forces parameters self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log(self.force_prob_range[1]) ) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.action_torques = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.obj_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) self.goal_keypoint_pos = torch.zeros( (self.num_envs, self.num_keypoints, 3), dtype=torch.float, device=self.device ) # how many steps we were within the goal tolerance self.near_goal_steps = torch.zeros(self.num_envs, dtype=torch.int, device=self.device) self.lifted_object = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.closest_keypoint_max_dist = -torch.ones(self.num_envs, dtype=torch.float, device=self.device) self.closest_fingertip_dist = -torch.ones( [self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device ) self.furthest_hand_dist = -torch.ones([self.num_envs], dtype=torch.float, device=self.device) self.finger_rew_coeffs = torch.ones( [self.num_envs, self.num_allegro_fingertips], dtype=torch.float, device=self.device ) reward_keys = [ "raw_fingertip_delta_rew", "raw_hand_delta_penalty", "raw_lifting_rew", "raw_keypoint_rew", "fingertip_delta_rew", "hand_delta_penalty", "lifting_rew", "lift_bonus_rew", "keypoint_rew", "bonus_rew", "kuka_actions_penalty", "allegro_actions_penalty", ] self.rewards_episode = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) for key in reward_keys } self.last_curriculum_update = 0 self.episode_root_state_tensors = [[] for _ in range(self.num_envs)] self.episode_dof_states = [[] for _ in range(self.num_envs)] self.eval_stats: bool = self.cfg["env"]["evalStats"] if self.eval_stats: self.last_success_step = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.success_time = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.total_num_resets = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.successes_count = torch.zeros( self.max_consecutive_successes + 1, dtype=torch.float, device=self.device ) from tensorboardX import SummaryWriter self.eval_summary_dir = "./eval_summaries" # remove the old directory if it exists if os.path.exists(self.eval_summary_dir): import shutil shutil.rmtree(self.eval_summary_dir) self.eval_summaries = SummaryWriter(self.eval_summary_dir, flush_secs=3) # AllegroKukaBase abstract interface - to be overriden in derived classes def _object_keypoint_offsets(self): raise NotImplementedError() def _object_start_pose(self, allegro_pose, table_pose_dy, table_pose_dz): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = allegro_pose.p.x pose_dy, pose_dz = table_pose_dy, table_pose_dz + 0.25 object_start_pose.p.y = allegro_pose.p.y + pose_dy object_start_pose.p.z = allegro_pose.p.z + pose_dz return object_start_pose def _main_object_assets_and_scales(self, object_asset_root, tmp_assets_dir): object_asset_files, object_asset_scales = self._box_asset_files_and_scales(object_asset_root, tmp_assets_dir) if not self.randomize_object_dimensions: object_asset_files = object_asset_files[:1] object_asset_scales = object_asset_scales[:1] # randomize order files_and_scales = list(zip(object_asset_files, object_asset_scales)) # use fixed seed here to make sure when we restart from checkpoint the distribution of object types is the same rng = np.random.default_rng(42) rng.shuffle(files_and_scales) object_asset_files, object_asset_scales = zip(*files_and_scales) return object_asset_files, object_asset_scales def _load_main_object_asset(self): """Load manipulated object and goal assets.""" object_asset_options = gymapi.AssetOptions() object_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) object_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) object_assets.append(object_asset_) object_rb_count = self.gym.get_asset_rigid_body_count( object_assets[0] ) # assuming all of them have the same rb count object_shapes_count = self.gym.get_asset_rigid_shape_count( object_assets[0] ) # assuming all of them have the same rb count return object_assets, object_rb_count, object_shapes_count def _load_additional_assets(self, object_asset_root, arm_pose): """ returns: tuple (num_rigid_bodies, num_shapes) """ return 0, 0 def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): pass def _after_envs_created(self): pass def _extra_reset_rules(self, resets): return resets def _reset_target(self, env_ids: Tensor) -> None: raise NotImplementedError() def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [] def _extra_curriculum(self): pass # AllegroKukaBase implementation def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return dict( success_tolerance=self.success_tolerance, ) def set_env_state(self, env_state): if env_state is None: return for key in self.get_env_state().keys(): value = env_state.get(key, None) if value is None: continue self.__dict__[key] = value print(f"Loaded env state value {key}:{value}") print(f"Success tolerance value after loading from checkpoint: {self.success_tolerance}") def create_sim(self): self.dt = self.sim_params.dt self.up_axis_idx = 2 # index of up axis: Y=1, Z=2 (same as in allegro_hand.py) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _box_asset_files_and_scales(self, object_assets_root, generated_assets_dir): files = [] scales = [] try: filenames = os.listdir(generated_assets_dir) for fname in filenames: if fname.endswith(".urdf"): os.remove(join(generated_assets_dir, fname)) except Exception as exc: print(f"Exception {exc} while removing older procedurally-generated urdf assets") objects_rel_path = os.path.dirname(self.asset_files_dict[self.object_type]) objects_dir = join(object_assets_root, objects_rel_path) base_mesh = join(objects_dir, "meshes", "cube_multicolor.obj") generate_default_cube(generated_assets_dir, base_mesh, self.object_base_size) if self.with_small_cuboids: generate_small_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_big_cuboids: generate_big_cuboids(generated_assets_dir, base_mesh, self.object_base_size) if self.with_sticks: generate_sticks(generated_assets_dir, base_mesh, self.object_base_size) filenames = os.listdir(generated_assets_dir) filenames = sorted(filenames) for fname in filenames: if fname.endswith(".urdf"): scale_tokens = os.path.splitext(fname)[0].split("_")[2:] files.append(join(generated_assets_dir, fname)) scales.append([float(scale_token) / 100 for scale_token in scale_tokens]) return files, scales def _create_envs(self, num_envs, spacing, num_per_row): if self.should_load_initial_states: self.load_initial_states() lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../assets") object_asset_root = asset_root tmp_assets_dir = tempfile.TemporaryDirectory() self.object_asset_files, self.object_asset_scales = self._main_object_assets_and_scales( object_asset_root, tmp_assets_dir.name ) asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.flip_visual_attachments = False asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS print(f"Loading asset {self.hand_arm_asset_file} from {asset_root}") allegro_kuka_asset = self.gym.load_asset(self.sim, asset_root, self.hand_arm_asset_file, asset_options) print(f"Loaded asset {allegro_kuka_asset}") self.num_hand_arm_bodies = self.gym.get_asset_rigid_body_count(allegro_kuka_asset) self.num_hand_arm_shapes = self.gym.get_asset_rigid_shape_count(allegro_kuka_asset) num_hand_arm_dofs = self.gym.get_asset_dof_count(allegro_kuka_asset) assert ( self.num_hand_arm_dofs == num_hand_arm_dofs ), f"Number of DOFs in asset {allegro_kuka_asset} is {num_hand_arm_dofs}, but {self.num_hand_arm_dofs} was expected" max_agg_bodies = self.num_hand_arm_bodies max_agg_shapes = self.num_hand_arm_shapes allegro_rigid_body_names = [ self.gym.get_asset_rigid_body_name(allegro_kuka_asset, i) for i in range(self.num_hand_arm_bodies) ] print(f"Allegro num rigid bodies: {self.num_hand_arm_bodies}") print(f"Allegro rigid bodies: {allegro_rigid_body_names}") allegro_hand_dof_props = self.gym.get_asset_dof_properties(allegro_kuka_asset) self.arm_hand_dof_lower_limits = [] self.arm_hand_dof_upper_limits = [] self.allegro_sensors = [] allegro_sensor_pose = gymapi.Transform() for i in range(self.num_hand_arm_dofs): self.arm_hand_dof_lower_limits.append(allegro_hand_dof_props["lower"][i]) self.arm_hand_dof_upper_limits.append(allegro_hand_dof_props["upper"][i]) self.arm_hand_dof_lower_limits = to_torch(self.arm_hand_dof_lower_limits, device=self.device) self.arm_hand_dof_upper_limits = to_torch(self.arm_hand_dof_upper_limits, device=self.device) allegro_pose = gymapi.Transform() allegro_pose.p = gymapi.Vec3(*get_axis_params(0.0, self.up_axis_idx)) + gymapi.Vec3(0.0, 0.8, 0) allegro_pose.r = gymapi.Quat(0, 0, 0, 1) object_assets, object_rb_count, object_shapes_count = self._load_main_object_asset() max_agg_bodies += object_rb_count max_agg_shapes += object_shapes_count # load auxiliary objects table_asset_options = gymapi.AssetOptions() table_asset_options.disable_gravity = False table_asset_options.fix_base_link = True table_asset = self.gym.load_asset(self.sim, asset_root, self.asset_files_dict["table"], table_asset_options) table_pose = gymapi.Transform() table_pose.p = gymapi.Vec3() table_pose.p.x = allegro_pose.p.x table_pose_dy, table_pose_dz = -0.8, 0.38 table_pose.p.y = allegro_pose.p.y + table_pose_dy table_pose.p.z = allegro_pose.p.z + table_pose_dz table_rb_count = self.gym.get_asset_rigid_body_count(table_asset) table_shapes_count = self.gym.get_asset_rigid_shape_count(table_asset) max_agg_bodies += table_rb_count max_agg_shapes += table_shapes_count additional_rb, additional_shapes = self._load_additional_assets(object_asset_root, allegro_pose) max_agg_bodies += additional_rb max_agg_shapes += additional_shapes # set up object and goal positions self.object_start_pose = self._object_start_pose(allegro_pose, table_pose_dy, table_pose_dz) self.allegro_hands = [] self.envs = [] object_init_state = [] self.allegro_hand_indices = [] object_indices = [] object_scales = [] object_keypoint_offsets = [] self.allegro_fingertip_handles = [ self.gym.find_asset_rigid_body_index(allegro_kuka_asset, name) for name in self.allegro_fingertips ] self.allegro_palm_handle = self.gym.find_asset_rigid_body_index(allegro_kuka_asset, "iiwa7_link_7") # this rely on the fact that objects are added right after the arms in terms of create_actor() self.object_rb_handles = list(range(self.num_hand_arm_bodies, self.num_hand_arm_bodies + object_rb_count)) for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) allegro_actor = self.gym.create_actor(env_ptr, allegro_kuka_asset, allegro_pose, "allegro", i, -1, 0) populate_dof_properties(allegro_hand_dof_props, self.dof_params, self.num_arm_dofs, self.num_hand_dofs) self.gym.set_actor_dof_properties(env_ptr, allegro_actor, allegro_hand_dof_props) allegro_hand_idx = self.gym.get_actor_index(env_ptr, allegro_actor, gymapi.DOMAIN_SIM) self.allegro_hand_indices.append(allegro_hand_idx) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: for ft_handle in self.allegro_fingertip_handles: env_sensors = [self.gym.create_force_sensor(env_ptr, ft_handle, allegro_sensor_pose)] self.allegro_sensors.append(env_sensors) if self.with_dof_force_sensors: self.gym.enable_actor_dof_force_sensors(env_ptr, allegro_actor) # add object object_asset_idx = i % len(object_assets) object_asset = object_assets[object_asset_idx] object_handle = self.gym.create_actor(env_ptr, object_asset, self.object_start_pose, "object", i, 0, 0) object_init_state.append( [ self.object_start_pose.p.x, self.object_start_pose.p.y, self.object_start_pose.p.z, self.object_start_pose.r.x, self.object_start_pose.r.y, self.object_start_pose.r.z, self.object_start_pose.r.w, 0, 0, 0, 0, 0, 0, ] ) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) object_indices.append(object_idx) object_scale = self.object_asset_scales[object_asset_idx] object_scales.append(object_scale) object_offsets = [] for keypoint in self.keypoints_offsets: keypoint = copy(keypoint) for coord_idx in range(3): keypoint[coord_idx] *= object_scale[coord_idx] * self.object_base_size * self.keypoint_scale / 2 object_offsets.append(keypoint) object_keypoint_offsets.append(object_offsets) # table object table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table_object", i, 0, 0) table_object_idx = self.gym.get_actor_index(env_ptr, table_handle, gymapi.DOMAIN_SIM) # task-specific objects (i.e. goal object for reorientation task) self._create_additional_objects(env_ptr, env_idx=i, object_asset_idx=object_asset_idx) self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.allegro_hands.append(allegro_actor) # we are not using new mass values after DR when calculating random forces applied to an object, # which should be ok as long as the randomization range is not too big object_rb_props = self.gym.get_actor_rigid_body_properties(self.envs[0], object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.object_init_state = to_torch(object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13 ) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.allegro_fingertip_handles = to_torch(self.allegro_fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = to_torch(self.object_rb_masses, dtype=torch.float, device=self.device) self.allegro_hand_indices = to_torch(self.allegro_hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(object_indices, dtype=torch.long, device=self.device) self.object_scales = to_torch(object_scales, dtype=torch.float, device=self.device) self.object_keypoint_offsets = to_torch(object_keypoint_offsets, dtype=torch.float, device=self.device) self._after_envs_created() try: # by this point we don't need the temporary folder for procedurally generated assets tmp_assets_dir.cleanup() except Exception: pass def _distance_delta_rewards(self, lifted_object: Tensor) -> Tuple[Tensor, Tensor]: """Rewards for fingertips approaching the object or penalty for hand getting further away from the object.""" # this is positive if we got closer, negative if we're further away than the closest we've gotten fingertip_deltas_closest = self.closest_fingertip_dist - self.curr_fingertip_distances # update the values if finger tips got closer to the object self.closest_fingertip_dist = torch.minimum(self.closest_fingertip_dist, self.curr_fingertip_distances) # again, positive is closer, negative is further away # here we use index of the 1st finger, when the distance is large it doesn't matter which one we use hand_deltas_furthest = self.furthest_hand_dist - self.curr_fingertip_distances[:, 0] # update the values if finger tips got further away from the object self.furthest_hand_dist = torch.maximum(self.furthest_hand_dist, self.curr_fingertip_distances[:, 0]) # clip between zero and +inf to turn deltas into rewards fingertip_deltas = torch.clip(fingertip_deltas_closest, 0, 10) fingertip_deltas *= self.finger_rew_coeffs fingertip_delta_rew = torch.sum(fingertip_deltas, dim=-1) # add this reward only before the object is lifted off the table # after this, we should be guided only by keypoint and bonus rewards fingertip_delta_rew *= ~lifted_object # clip between zero and -inf to turn deltas into penalties hand_delta_penalty = torch.clip(hand_deltas_furthest, -10, 0) hand_delta_penalty *= ~lifted_object # multiply by the number of fingers so two rewards are on the same scale hand_delta_penalty *= self.num_allegro_fingertips return fingertip_delta_rew, hand_delta_penalty def _lifting_reward(self) -> Tuple[Tensor, Tensor, Tensor]: """Reward for lifting the object off the table.""" z_lift = 0.05 + self.object_pos[:, 2] - self.object_init_state[:, 2] lifting_rew = torch.clip(z_lift, 0, 0.5) # this flag tells us if we lifted an object above a certain height compared to the initial position lifted_object = (z_lift > self.lifting_bonus_threshold) | self.lifted_object # Since we stop rewarding the agent for height after the object is lifted, we should give it large positive reward # to compensate for "lost" opportunity to get more lifting reward for sitting just below the threshold. # This bonus depends on the max lifting reward (lifting reward coeff * threshold) and the discount factor # (i.e. the effective future horizon for the agent) # For threshold 0.15, lifting reward coeff = 3 and gamma 0.995 (effective horizon ~500 steps) # a value of 300 for the bonus reward seems reasonable just_lifted_above_threshold = lifted_object & ~self.lifted_object lift_bonus_rew = self.lifting_bonus * just_lifted_above_threshold # stop giving lifting reward once we crossed the threshold - now the agent can focus entirely on the # keypoint reward lifting_rew *= ~lifted_object # update the flag that describes whether we lifted an object above the table or not self.lifted_object = lifted_object return lifting_rew, lift_bonus_rew, lifted_object def _keypoint_reward(self, lifted_object: Tensor) -> Tensor: # this is positive if we got closer, negative if we're further away max_keypoint_deltas = self.closest_keypoint_max_dist - self.keypoints_max_dist # update the values if we got closer to the target self.closest_keypoint_max_dist = torch.minimum(self.closest_keypoint_max_dist, self.keypoints_max_dist) # clip between zero and +inf to turn deltas into rewards max_keypoint_deltas = torch.clip(max_keypoint_deltas, 0, 100) # administer reward only when we already lifted an object from the table # to prevent the situation where the agent just rolls it around the table keypoint_rew = max_keypoint_deltas * lifted_object return keypoint_rew def _action_penalties(self) -> Tuple[Tensor, Tensor]: kuka_actions_penalty = ( torch.sum(torch.abs(self.arm_hand_dof_vel[..., 0:7]), dim=-1) * self.kuka_actions_penalty_scale ) allegro_actions_penalty = ( torch.sum(torch.abs(self.arm_hand_dof_vel[..., 7 : self.num_hand_arm_dofs]), dim=-1) * self.allegro_actions_penalty_scale ) return -1 * kuka_actions_penalty, -1 * allegro_actions_penalty def _compute_resets(self, is_success): resets = torch.where(self.object_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # fall if self.max_consecutive_successes > 0: # Reset progress buffer if max_consecutive_successes > 0 self.progress_buf = torch.where(is_success > 0, torch.zeros_like(self.progress_buf), self.progress_buf) resets = torch.where(self.successes >= self.max_consecutive_successes, torch.ones_like(resets), resets) resets = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(resets), resets) resets = self._extra_reset_rules(resets) return resets def _true_objective(self): raise NotImplementedError() def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: lifting_rew, lift_bonus_rew, lifted_object = self._lifting_reward() fingertip_delta_rew, hand_delta_penalty = self._distance_delta_rewards(lifted_object) keypoint_rew = self._keypoint_reward(lifted_object) keypoint_success_tolerance = self.success_tolerance * self.keypoint_scale # noinspection PyTypeChecker near_goal: Tensor = self.keypoints_max_dist <= keypoint_success_tolerance self.near_goal_steps += near_goal is_success = self.near_goal_steps >= self.success_steps goal_resets = is_success self.successes += is_success self.reset_goal_buf[:] = goal_resets self.rewards_episode["raw_fingertip_delta_rew"] += fingertip_delta_rew self.rewards_episode["raw_hand_delta_penalty"] += hand_delta_penalty self.rewards_episode["raw_lifting_rew"] += lifting_rew self.rewards_episode["raw_keypoint_rew"] += keypoint_rew fingertip_delta_rew *= self.distance_delta_rew_scale hand_delta_penalty *= self.distance_delta_rew_scale * 0 # currently disabled lifting_rew *= self.lifting_rew_scale keypoint_rew *= self.keypoint_rew_scale kuka_actions_penalty, allegro_actions_penalty = self._action_penalties() # Success bonus: orientation is within `success_tolerance` of goal orientation # We spread out the reward over "success_steps" bonus_rew = near_goal * (self.reach_goal_bonus / self.success_steps) reward = ( fingertip_delta_rew + hand_delta_penalty # + sign here because hand_delta_penalty is negative + lifting_rew + lift_bonus_rew + keypoint_rew + kuka_actions_penalty + allegro_actions_penalty + bonus_rew ) self.rew_buf[:] = reward resets = self._compute_resets(is_success) self.reset_buf[:] = resets self.extras["successes"] = self.prev_episode_successes.mean() self.true_objective = self._true_objective() self.extras["true_objective"] = self.true_objective # scalars for logging self.extras["true_objective_mean"] = self.true_objective.mean() self.extras["true_objective_min"] = self.true_objective.min() self.extras["true_objective_max"] = self.true_objective.max() rewards = [ (fingertip_delta_rew, "fingertip_delta_rew"), (hand_delta_penalty, "hand_delta_penalty"), (lifting_rew, "lifting_rew"), (lift_bonus_rew, "lift_bonus_rew"), (keypoint_rew, "keypoint_rew"), (kuka_actions_penalty, "kuka_actions_penalty"), (allegro_actions_penalty, "allegro_actions_penalty"), (bonus_rew, "bonus_rew"), ] episode_cumulative = dict() for rew_value, rew_name in rewards: self.rewards_episode[rew_name] += rew_value episode_cumulative[rew_name] = rew_value self.extras["rewards_episode"] = self.rewards_episode self.extras["episode_cumulative"] = episode_cumulative return self.rew_buf, is_success def _eval_stats(self, is_success: Tensor) -> None: if self.eval_stats: frame: int = self.frame_since_restart n_frames = torch.empty_like(self.last_success_step).fill_(frame) self.success_time = torch.where(is_success, n_frames - self.last_success_step, self.success_time) self.last_success_step = torch.where(is_success, n_frames, self.last_success_step) mask_ = self.success_time > 0 if any(mask_): avg_time_mean = ((self.success_time * mask_).sum(dim=0) / mask_.sum(dim=0)).item() else: avg_time_mean = math.nan self.total_resets = self.total_resets + self.reset_buf.sum() self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum() self.total_num_resets += self.reset_buf reset_ids = self.reset_buf.nonzero().squeeze() last_successes = self.successes[reset_ids].long() self.successes_count[last_successes] += 1 if frame % 100 == 0: # The direct average shows the overall result more quickly, but slightly undershoots long term # policy performance. print(f"Max num successes: {self.successes.max().item()}") print(f"Average consecutive successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Total num resets: {self.total_num_resets.sum().item()} --> {self.total_num_resets}") print(f"Reset percentage: {(self.total_num_resets > 0).sum() / self.num_envs:.2%}") print(f"Last ep successes: {self.prev_episode_successes.mean().item():.2f}") print(f"Last ep true objective: {self.prev_episode_true_objective.mean().item():.2f}") self.eval_summaries.add_scalar("last_ep_successes", self.prev_episode_successes.mean().item(), frame) self.eval_summaries.add_scalar( "last_ep_true_objective", self.prev_episode_true_objective.mean().item(), frame ) self.eval_summaries.add_scalar( "reset_stats/reset_percentage", (self.total_num_resets > 0).sum() / self.num_envs, frame ) self.eval_summaries.add_scalar("reset_stats/min_num_resets", self.total_num_resets.min().item(), frame) self.eval_summaries.add_scalar("policy_speed/avg_success_time_frames", avg_time_mean, frame) frame_time = self.control_freq_inv * self.dt self.eval_summaries.add_scalar( "policy_speed/avg_success_time_seconds", avg_time_mean * frame_time, frame ) self.eval_summaries.add_scalar( "policy_speed/avg_success_per_minute", 60.0 / (avg_time_mean * frame_time), frame ) print(f"Policy speed (successes per minute): {60.0 / (avg_time_mean * frame_time):.2f}") # create a matplotlib bar chart of the self.successes_count import matplotlib.pyplot as plt plt.bar(list(range(self.max_consecutive_successes + 1)), self.successes_count.cpu().numpy()) plt.title("Successes histogram") plt.xlabel("Successes") plt.ylabel("Frequency") plt.savefig(f"{self.eval_summary_dir}/successes_histogram.png") plt.clf() def compute_observations(self) -> Tuple[Tensor, int]: self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self.obs_type == "full_state": if self.with_fingertip_force_sensors: self.gym.refresh_force_sensor_tensor(self.sim) if self.with_dof_force_sensors: self.gym.refresh_dof_force_tensor(self.sim) self.object_state = self.root_state_tensor[self.object_indices, 0:13] self.object_pose = self.root_state_tensor[self.object_indices, 0:7] self.object_pos = self.root_state_tensor[self.object_indices, 0:3] self.object_rot = self.root_state_tensor[self.object_indices, 3:7] self.object_linvel = self.root_state_tensor[self.object_indices, 7:10] self.object_angvel = self.root_state_tensor[self.object_indices, 10:13] self.goal_pose = self.goal_states[:, 0:7] self.goal_pos = self.goal_states[:, 0:3] self.goal_rot = self.goal_states[:, 3:7] self.palm_center_offset = torch.from_numpy(self.palm_offset).to(self.device).repeat((self.num_envs, 1)) self._palm_state = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:13] self._palm_pos = self.rigid_body_states[:, self.allegro_palm_handle][:, 0:3] self._palm_rot = self.rigid_body_states[:, self.allegro_palm_handle][:, 3:7] self.palm_center_pos = self._palm_pos + quat_rotate(self._palm_rot, self.palm_center_offset) self.fingertip_state = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:13] self.fingertip_pos = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 0:3] self.fingertip_rot = self.rigid_body_states[:, self.allegro_fingertip_handles][:, :, 3:7] if not isinstance(self.fingertip_offsets, torch.Tensor): self.fingertip_offsets = ( torch.from_numpy(self.fingertip_offsets).to(self.device).repeat((self.num_envs, 1, 1)) ) if hasattr(self, "fingertip_pos_rel_object"): self.fingertip_pos_rel_object_prev[:, :, :] = self.fingertip_pos_rel_object else: self.fingertip_pos_rel_object_prev = None self.fingertip_pos_offset = torch.zeros_like(self.fingertip_pos).to(self.device) for i in range(self.num_allegro_fingertips): self.fingertip_pos_offset[:, i] = self.fingertip_pos[:, i] + quat_rotate( self.fingertip_rot[:, i], self.fingertip_offsets[:, i] ) obj_pos_repeat = self.object_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1) self.fingertip_pos_rel_object = self.fingertip_pos_offset - obj_pos_repeat self.curr_fingertip_distances = torch.norm(self.fingertip_pos_rel_object, dim=-1) # when episode ends or target changes we reset this to -1, this will initialize it to the actual distance on the 1st frame of the episode self.closest_fingertip_dist = torch.where( self.closest_fingertip_dist < 0.0, self.curr_fingertip_distances, self.closest_fingertip_dist ) self.furthest_hand_dist = torch.where( self.furthest_hand_dist < 0.0, self.curr_fingertip_distances[:, 0], self.furthest_hand_dist ) palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_allegro_fingertips, 1) self.fingertip_pos_rel_palm = self.fingertip_pos_offset - palm_center_repeat if self.fingertip_pos_rel_object_prev is None: self.fingertip_pos_rel_object_prev = self.fingertip_pos_rel_object.clone() for i in range(self.num_keypoints): self.obj_keypoint_pos[:, i] = self.object_pos + quat_rotate( self.object_rot, self.object_keypoint_offsets[:, i] ) self.goal_keypoint_pos[:, i] = self.goal_pos + quat_rotate( self.goal_rot, self.object_keypoint_offsets[:, i] ) self.keypoints_rel_goal = self.obj_keypoint_pos - self.goal_keypoint_pos palm_center_repeat = self.palm_center_pos.unsqueeze(1).repeat(1, self.num_keypoints, 1) self.keypoints_rel_palm = self.obj_keypoint_pos - palm_center_repeat self.keypoint_distances_l2 = torch.norm(self.keypoints_rel_goal, dim=-1) # furthest keypoint from the goal self.keypoints_max_dist = self.keypoint_distances_l2.max(dim=-1).values # this is the closest the keypoint had been to the target in the current episode (for the furthest keypoint of all) # make sure we initialize this value before using it for obs or rewards self.closest_keypoint_max_dist = torch.where( self.closest_keypoint_max_dist < 0.0, self.keypoints_max_dist, self.closest_keypoint_max_dist ) if self.obs_type == "full_state": full_state_size, reward_obs_ofs = self.compute_full_state(self.obs_buf) assert ( full_state_size == self.full_state_size ), f"Expected full state size {self.full_state_size}, actual: {full_state_size}" return self.obs_buf, reward_obs_ofs else: raise ValueError("Unkown observations type!") def compute_full_state(self, buf: Tensor) -> Tuple[int, int]: num_dofs = self.num_hand_arm_dofs ofs = 0 # dof positions buf[:, ofs : ofs + num_dofs] = unscale( self.arm_hand_dof_pos[:, :num_dofs], self.arm_hand_dof_lower_limits[:num_dofs], self.arm_hand_dof_upper_limits[:num_dofs], ) ofs += num_dofs # dof velocities buf[:, ofs : ofs + num_dofs] = self.arm_hand_dof_vel[:, :num_dofs] ofs += num_dofs if self.with_dof_force_sensors: # dof forces buf[:, ofs : ofs + num_dofs] = self.dof_force_tensor[:, :num_dofs] ofs += num_dofs # palm pos buf[:, ofs : ofs + 3] = self.palm_center_pos ofs += 3 # palm rot, linvel, ang vel buf[:, ofs : ofs + 10] = self._palm_state[:, 3:13] ofs += 10 # object rot, linvel, ang vel buf[:, ofs : ofs + 10] = self.object_state[:, 3:13] ofs += 10 # fingertip pos relative to the palm of the hand fingertip_rel_pos_size = 3 * self.num_allegro_fingertips buf[:, ofs : ofs + fingertip_rel_pos_size] = self.fingertip_pos_rel_palm.reshape( self.num_envs, fingertip_rel_pos_size ) ofs += fingertip_rel_pos_size # keypoint distances relative to the palm of the hand keypoint_rel_pos_size = 3 * self.num_keypoints buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_palm.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # keypoint distances relative to the goal buf[:, ofs : ofs + keypoint_rel_pos_size] = self.keypoints_rel_goal.reshape( self.num_envs, keypoint_rel_pos_size ) ofs += keypoint_rel_pos_size # object scales buf[:, ofs : ofs + 3] = self.object_scales ofs += 3 # closest distance to the furthest keypoint, achieved so far in this episode buf[:, ofs : ofs + 1] = self.closest_keypoint_max_dist.unsqueeze(-1) ofs += 1 # closest distance between a fingertip and an object achieved since last target reset # this should help the critic predict the anticipated fingertip reward buf[:, ofs : ofs + self.num_allegro_fingertips] = self.closest_fingertip_dist ofs += self.num_allegro_fingertips # indicates whether we already lifted the object from the table or not, should help the critic be more accurate buf[:, ofs : ofs + 1] = self.lifted_object.unsqueeze(-1) ofs += 1 # this should help the critic predict the future rewards better and anticipate the episode termination buf[:, ofs : ofs + 1] = torch.log(self.progress_buf / 10 + 1).unsqueeze(-1) ofs += 1 buf[:, ofs : ofs + 1] = torch.log(self.successes + 1).unsqueeze(-1) ofs += 1 # this is where we will add the reward observation reward_obs_ofs = ofs ofs += 1 assert ofs == self.full_state_size return ofs, reward_obs_ofs def clamp_obs(self, obs_buf: Tensor) -> None: if self.clamp_abs_observations > 0: obs_buf.clamp_(-self.clamp_abs_observations, self.clamp_abs_observations) def get_random_quat(self, env_ids): # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py # https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L261 uvw = torch_rand_float(0, 1.0, (len(env_ids), 3), device=self.device) q_w = torch.sqrt(1.0 - uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 1])) q_x = torch.sqrt(1.0 - uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 1])) q_y = torch.sqrt(uvw[:, 0]) * (torch.sin(2 * np.pi * uvw[:, 2])) q_z = torch.sqrt(uvw[:, 0]) * (torch.cos(2 * np.pi * uvw[:, 2])) new_rot = torch.cat((q_x.unsqueeze(-1), q_y.unsqueeze(-1), q_z.unsqueeze(-1), q_w.unsqueeze(-1)), dim=-1) return new_rot def reset_target_pose(self, env_ids: Tensor) -> None: self._reset_target(env_ids) self.reset_goal_buf[env_ids] = 0 self.near_goal_steps[env_ids] = 0 self.closest_keypoint_max_dist[env_ids] = -1 def reset_object_pose(self, env_ids): obj_indices = self.object_indices[env_ids] # reset object rand_pos_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 3), device=self.device) self.root_state_tensor[obj_indices] = self.object_init_state[env_ids].clone() # indices 0..2 correspond to the object position self.root_state_tensor[obj_indices, 0:1] = ( self.object_init_state[env_ids, 0:1] + self.reset_position_noise_x * rand_pos_floats[:, 0:1] ) self.root_state_tensor[obj_indices, 1:2] = ( self.object_init_state[env_ids, 1:2] + self.reset_position_noise_y * rand_pos_floats[:, 1:2] ) self.root_state_tensor[obj_indices, 2:3] = ( self.object_init_state[env_ids, 2:3] + self.reset_position_noise_z * rand_pos_floats[:, 2:3] ) new_object_rot = self.get_random_quat(env_ids) # indices 3,4,5,6 correspond to the rotation quaternion self.root_state_tensor[obj_indices, 3:7] = new_object_rot self.root_state_tensor[obj_indices, 7:13] = torch.zeros_like(self.root_state_tensor[obj_indices, 7:13]) # since we reset the object, we also should update distances between fingers and the object self.closest_fingertip_dist[env_ids] = -1 self.furthest_hand_dist[env_ids] = -1 def deferred_set_actor_root_state_tensor_indexed(self, obj_indices: List[Tensor]) -> None: self.set_actor_root_state_object_indices.extend(obj_indices) def set_actor_root_state_tensor_indexed(self) -> None: object_indices: List[Tensor] = self.set_actor_root_state_object_indices if not object_indices: # nothing to set return unique_object_indices = torch.unique(torch.cat(object_indices).to(torch.int32)) self.gym.set_actor_root_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.root_state_tensor), gymtorch.unwrap_tensor(unique_object_indices), len(unique_object_indices), ) self.set_actor_root_state_object_indices = [] def reset_idx(self, env_ids: Tensor) -> None: # randomization can happen only at reset time, since it can reset actor positions on GPU if self.randomize: self.apply_randomizations(self.randomization_params) # randomize start object poses self.reset_target_pose(env_ids) # reset rigid body forces self.rb_forces[env_ids, :, :] = 0.0 # reset object self.reset_object_pose(env_ids) hand_indices = self.allegro_hand_indices[env_ids].to(torch.int32) # reset random force probabilities self.random_force_prob[env_ids] = torch.exp( (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]) ) # reset allegro hand delta_max = self.arm_hand_dof_upper_limits - self.hand_arm_default_dof_pos delta_min = self.arm_hand_dof_lower_limits - self.hand_arm_default_dof_pos rand_dof_floats = torch_rand_float(0.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device) rand_delta = delta_min + (delta_max - delta_min) * rand_dof_floats noise_coeff = torch.zeros_like(self.hand_arm_default_dof_pos, device=self.device) noise_coeff[0:7] = self.reset_dof_pos_noise_arm noise_coeff[7 : self.num_hand_arm_dofs] = self.reset_dof_pos_noise_fingers allegro_pos = self.hand_arm_default_dof_pos + noise_coeff * rand_delta self.arm_hand_dof_pos[env_ids, :] = allegro_pos rand_vel_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_hand_arm_dofs), device=self.device) self.arm_hand_dof_vel[env_ids, :] = self.reset_dof_vel_noise * rand_vel_floats self.prev_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos self.cur_targets[env_ids, : self.num_hand_arm_dofs] = allegro_pos if self.should_load_initial_states: if len(env_ids) > self.num_initial_states: print(f"Not enough initial states to load {len(env_ids)}/{self.num_initial_states}...") else: if self.initial_state_idx + len(env_ids) > self.num_initial_states: self.initial_state_idx = 0 dof_states_to_load = self.initial_dof_state_tensors[ self.initial_state_idx : self.initial_state_idx + len(env_ids) ] self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]])[ env_ids ] = dof_states_to_load.clone() root_state_tensors_to_load = self.initial_root_state_tensors[ self.initial_state_idx : self.initial_state_idx + len(env_ids) ] cube_object_idx = self.object_indices[0] self.root_state_tensor.reshape([self.num_envs, -1, *self.root_state_tensor.shape[1:]])[ env_ids, cube_object_idx ] = root_state_tensors_to_load[:, cube_object_idx].clone() self.initial_state_idx += len(env_ids) self.gym.set_dof_position_target_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.prev_targets), gymtorch.unwrap_tensor(hand_indices), len(env_ids) ) self.gym.set_dof_state_tensor_indexed( self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(hand_indices), len(env_ids) ) object_indices = [self.object_indices[env_ids]] object_indices.extend(self._extra_object_indices(env_ids)) self.deferred_set_actor_root_state_tensor_indexed(object_indices) self.progress_buf[env_ids] = 0 self.reset_buf[env_ids] = 0 self.prev_episode_successes[env_ids] = self.successes[env_ids] self.successes[env_ids] = 0 self.prev_episode_true_objective[env_ids] = self.true_objective[env_ids] self.true_objective[env_ids] = 0 self.lifted_object[env_ids] = False # -1 here indicates that the value is not initialized self.closest_keypoint_max_dist[env_ids] = -1 self.closest_fingertip_dist[env_ids] = -1 self.furthest_hand_dist[env_ids] = -1 self.near_goal_steps[env_ids] = 0 for key in self.rewards_episode.keys(): self.rewards_episode[key][env_ids] = 0 if self.save_states: self.dump_env_states(env_ids) self.extras["scalars"] = dict() self.extras["scalars"]["success_tolerance"] = self.success_tolerance def pre_physics_step(self, actions): self.actions = actions.clone().to(self.device) if self.privileged_actions: torque_actions = actions[:, :3] actions = actions[:, 3:] reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) reset_goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) self.reset_target_pose(reset_goal_env_ids) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.set_actor_root_state_tensor_indexed() if self.use_relative_control: raise NotImplementedError("Use relative control False for now") else: # target position control for the hand DOFs self.cur_targets[:, 7 : self.num_hand_arm_dofs] = scale( actions[:, 7 : self.num_hand_arm_dofs], self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs], self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs], ) self.cur_targets[:, 7 : self.num_hand_arm_dofs] = ( self.act_moving_average * self.cur_targets[:, 7 : self.num_hand_arm_dofs] + (1.0 - self.act_moving_average) * self.prev_targets[:, 7 : self.num_hand_arm_dofs] ) self.cur_targets[:, 7 : self.num_hand_arm_dofs] = tensor_clamp( self.cur_targets[:, 7 : self.num_hand_arm_dofs], self.arm_hand_dof_lower_limits[7 : self.num_hand_arm_dofs], self.arm_hand_dof_upper_limits[7 : self.num_hand_arm_dofs], ) targets = self.prev_targets[:, :7] + self.hand_dof_speed_scale * self.dt * self.actions[:, :7] self.cur_targets[:, :7] = tensor_clamp( targets, self.arm_hand_dof_lower_limits[:7], self.arm_hand_dof_upper_limits[:7] ) self.prev_targets[:, :] = self.cur_targets[:, :] self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets)) if self.force_scale > 0.0: self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval) # apply new forces force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero() self.rb_forces[force_indices, self.object_rb_handles, :] = ( torch.randn(self.rb_forces[force_indices, self.object_rb_handles, :].shape, device=self.device) * self.object_rb_masses * self.force_scale ) self.gym.apply_rigid_body_force_tensors( self.sim, gymtorch.unwrap_tensor(self.rb_forces), None, gymapi.LOCAL_SPACE ) # apply torques if self.privileged_actions: torque_actions = torque_actions.unsqueeze(1) torque_amount = self.privileged_actions_torque torque_actions *= torque_amount self.action_torques[:, self.object_rb_handles, :] = torque_actions self.gym.apply_rigid_body_force_tensors( self.sim, None, gymtorch.unwrap_tensor(self.action_torques), gymapi.ENV_SPACE ) def post_physics_step(self): self.frame_since_restart += 1 self.progress_buf += 1 self.randomize_buf += 1 self._extra_curriculum() obs_buf, reward_obs_ofs = self.compute_observations() rewards, is_success = self.compute_kuka_reward() # add rewards to observations reward_obs_scale = 0.01 obs_buf[:, reward_obs_ofs : reward_obs_ofs + 1] = rewards.unsqueeze(-1) * reward_obs_scale self.clamp_obs(obs_buf) self._eval_stats(is_success) if self.save_states: self.accumulate_env_states() if self.viewer and self.debug_viz: # draw axes on target object self.gym.clear_lines(self.viewer) self.gym.refresh_rigid_body_state_tensor(self.sim) axes_geom = gymutil.AxesGeometry(0.1) sphere_pose = gymapi.Transform() sphere_pose.r = gymapi.Quat(0, 0, 0, 1) sphere_geom = gymutil.WireframeSphereGeometry(0.01, 8, 8, sphere_pose, color=(1, 1, 0)) sphere_geom_white = gymutil.WireframeSphereGeometry(0.02, 8, 8, sphere_pose, color=(1, 1, 1)) palm_center_pos_cpu = self.palm_center_pos.cpu().numpy() palm_rot_cpu = self._palm_rot.cpu().numpy() for i in range(self.num_envs): palm_center_transform = gymapi.Transform() palm_center_transform.p = gymapi.Vec3(*palm_center_pos_cpu[i]) palm_center_transform.r = gymapi.Quat(*palm_rot_cpu[i]) gymutil.draw_lines(sphere_geom_white, self.gym, self.viewer, self.envs[i], palm_center_transform) for j in range(self.num_allegro_fingertips): fingertip_pos_cpu = self.fingertip_pos_offset[:, j].cpu().numpy() fingertip_rot_cpu = self.fingertip_rot[:, j].cpu().numpy() for i in range(self.num_envs): fingertip_transform = gymapi.Transform() fingertip_transform.p = gymapi.Vec3(*fingertip_pos_cpu[i]) fingertip_transform.r = gymapi.Quat(*fingertip_rot_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], fingertip_transform) for j in range(self.num_keypoints): keypoint_pos_cpu = self.obj_keypoint_pos[:, j].cpu().numpy() goal_keypoint_pos_cpu = self.goal_keypoint_pos[:, j].cpu().numpy() for i in range(self.num_envs): keypoint_transform = gymapi.Transform() keypoint_transform.p = gymapi.Vec3(*keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], keypoint_transform) goal_keypoint_transform = gymapi.Transform() goal_keypoint_transform.p = gymapi.Vec3(*goal_keypoint_pos_cpu[i]) gymutil.draw_lines(sphere_geom, self.gym, self.viewer, self.envs[i], goal_keypoint_transform) def accumulate_env_states(self): root_state_tensor = self.root_state_tensor.reshape( [self.num_envs, -1, *self.root_state_tensor.shape[1:]] ).clone() dof_state = self.dof_state.reshape([self.num_envs, -1, *self.dof_state.shape[1:]]).clone() for env_idx in range(self.num_envs): env_root_state_tensor = root_state_tensor[env_idx] self.episode_root_state_tensors[env_idx].append(env_root_state_tensor) env_dof_state = dof_state[env_idx] self.episode_dof_states[env_idx].append(env_dof_state) def dump_env_states(self, env_ids): def write_tensor_to_bin_stream(tensor, stream): bin_buff = io.BytesIO() torch.save(tensor, bin_buff) bin_buff = bin_buff.getbuffer() stream.write(int(len(bin_buff)).to_bytes(4, "big")) stream.write(bin_buff) with open(self.save_states_filename, "ab") as save_states_file: bin_stream = io.BytesIO() for env_idx in env_ids: ep_len = len(self.episode_root_state_tensors[env_idx]) if ep_len <= 20: continue states_to_save = min(ep_len // 10, 50) state_indices = random.sample(range(ep_len), states_to_save) print(f"Adding {states_to_save} states {state_indices}") bin_stream.write(int(states_to_save).to_bytes(4, "big")) root_states = [self.episode_root_state_tensors[env_idx][si] for si in state_indices] dof_states = [self.episode_dof_states[env_idx][si] for si in state_indices] root_states = torch.stack(root_states) dof_states = torch.stack(dof_states) write_tensor_to_bin_stream(root_states, bin_stream) write_tensor_to_bin_stream(dof_states, bin_stream) self.episode_root_state_tensors[env_idx] = [] self.episode_dof_states[env_idx] = [] bin_data = bin_stream.getbuffer() if bin_data.nbytes > 0: print(f"Writing {len(bin_data)} to file {self.save_states_filename}") save_states_file.write(bin_data) def load_initial_states(self): loaded_root_states = [] loaded_dof_states = [] with open(self.load_states_filename, "rb") as states_file: def read_nbytes(n_): res = states_file.read(n_) if len(res) < n_: raise RuntimeError( f"Could not read {n_} bytes from the binary file. Perhaps reached the end of file" ) return res while True: try: num_states = int.from_bytes(read_nbytes(4), byteorder="big") print(f"num_states_chunk {num_states}") root_states_len = int.from_bytes(read_nbytes(4), byteorder="big") print(f"root tensors len {root_states_len}") root_states_bytes = read_nbytes(root_states_len) dof_states_len = int.from_bytes(read_nbytes(4), byteorder="big") print(f"dof_states_len {dof_states_len}") dof_states_bytes = read_nbytes(dof_states_len) except Exception as exc: print(exc) break finally: # parse binary buffers def parse_tensors(bin_data): with io.BytesIO(bin_data) as buffer: tensors = torch.load(buffer) return tensors root_state_tensors = parse_tensors(root_states_bytes) dof_state_tensors = parse_tensors(dof_states_bytes) loaded_root_states.append(root_state_tensors) loaded_dof_states.append(dof_state_tensors) self.initial_root_state_tensors = torch.cat(loaded_root_states) self.initial_dof_state_tensors = torch.cat(loaded_dof_states) assert self.initial_dof_state_tensors.shape[0] == self.initial_root_state_tensors.shape[0] self.num_initial_states = len(self.initial_root_state_tensors) print(f"{self.num_initial_states} states loaded from file {self.load_states_filename}!")
73,269
Python
44.994978
145
0.619785
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_two_arms_reorientation.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from typing import List import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_two_arms import AllegroKukaTwoArmsBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaTwoArmsReorientation(AllegroKukaTwoArmsBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_assets = [] super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): return [ [1, 1, 1], [1, 1, -1], [-1, -1, 1], [-1, -1, -1], ] def _load_additional_assets(self, object_asset_root, arm_pose): object_asset_options = gymapi.AssetOptions() object_asset_options.disable_gravity = True self.goal_assets = [] for object_asset_file in self.object_asset_files: object_asset_dir = os.path.dirname(object_asset_file) object_asset_fname = os.path.basename(object_asset_file) goal_asset_ = self.gym.load_asset(self.sim, object_asset_dir, object_asset_fname, object_asset_options) self.goal_assets.append(goal_asset_) goal_rb_count = self.gym.get_asset_rigid_body_count( self.goal_assets[0] ) # assuming all of them have the same rb count goal_shapes_count = self.gym.get_asset_rigid_shape_count( self.goal_assets[0] ) # assuming all of them have the same rb count return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): self.goal_displacement = gymapi.Vec3(-0.35, -0.06, 0.12) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device ) goal_start_pose = gymapi.Transform() goal_start_pose.p = self.object_start_pose.p + self.goal_displacement goal_start_pose.p.z -= 0.04 goal_asset = self.goal_assets[object_asset_idx] goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.object_type != "block": self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: # sample random target location in some volume target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size # let the target be close to 1st or 2nd arm, randomly left_right_random = torch_rand_float(-1.0, 1.0, (len(env_ids), 1), device=self.device) x_ofs = 0.75 x_pos = torch.where( left_right_random > 0, x_ofs * torch.ones_like(left_right_random), -x_ofs * torch.ones_like(left_right_random), ) target_coords[:, 0] += x_pos.squeeze(dim=1) self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # new_rot = randomize_rotation( # rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] # ) # new implementation by Ankur: new_rot = self.get_random_quat(env_ids) self.goal_states[env_ids, 3:7] = new_rot self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7] self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like( self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] ) object_indices_to_reset = [self.goal_object_indices[env_ids]] self.deferred_set_actor_root_state_tensor_indexed(object_indices_to_reset) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, ) def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective
7,306
Python
44.955975
120
0.673008
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_utils.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from dataclasses import dataclass from typing import Tuple, Dict, List from torch import Tensor @dataclass class DofParameters: """Joint/dof parameters.""" allegro_stiffness: float kuka_stiffness: float allegro_effort: float kuka_effort: List[float] # separate per DOF allegro_damping: float kuka_damping: float dof_friction: float allegro_armature: float kuka_armature: float @staticmethod def from_cfg(cfg: Dict) -> DofParameters: return DofParameters( allegro_stiffness=cfg["env"]["allegroStiffness"], kuka_stiffness=cfg["env"]["kukaStiffness"], allegro_effort=cfg["env"]["allegroEffort"], kuka_effort=cfg["env"]["kukaEffort"], allegro_damping=cfg["env"]["allegroDamping"], kuka_damping=cfg["env"]["kukaDamping"], dof_friction=cfg["env"]["dofFriction"], allegro_armature=cfg["env"]["allegroArmature"], kuka_armature=cfg["env"]["kukaArmature"], ) def populate_dof_properties(hand_arm_dof_props, params: DofParameters, arm_dofs: int, hand_dofs: int) -> None: assert len(hand_arm_dof_props["stiffness"]) == arm_dofs + hand_dofs hand_arm_dof_props["stiffness"][0:arm_dofs].fill(params.kuka_stiffness) hand_arm_dof_props["stiffness"][arm_dofs:].fill(params.allegro_stiffness) assert len(params.kuka_effort) == arm_dofs hand_arm_dof_props["effort"][0:arm_dofs] = params.kuka_effort hand_arm_dof_props["effort"][arm_dofs:].fill(params.allegro_effort) hand_arm_dof_props["damping"][0:arm_dofs].fill(params.kuka_damping) hand_arm_dof_props["damping"][arm_dofs:].fill(params.allegro_damping) if params.dof_friction >= 0: hand_arm_dof_props["friction"].fill(params.dof_friction) hand_arm_dof_props["armature"][0:arm_dofs].fill(params.kuka_armature) hand_arm_dof_props["armature"][arm_dofs:].fill(params.allegro_armature) def tolerance_curriculum( last_curriculum_update: int, frames_since_restart: int, curriculum_interval: int, prev_episode_successes: Tensor, success_tolerance: float, initial_tolerance: float, target_tolerance: float, tolerance_curriculum_increment: float, ) -> Tuple[float, int]: """ Returns: new tolerance, new last_curriculum_update """ if frames_since_restart - last_curriculum_update < curriculum_interval: return success_tolerance, last_curriculum_update mean_successes_per_episode = prev_episode_successes.mean() if mean_successes_per_episode < 3.0: # this policy is not good enough with the previous tolerance value, keep training for now... return success_tolerance, last_curriculum_update # decrease the tolerance now success_tolerance *= tolerance_curriculum_increment success_tolerance = min(success_tolerance, initial_tolerance) success_tolerance = max(success_tolerance, target_tolerance) print(f"Prev episode successes: {mean_successes_per_episode}, success tolerance: {success_tolerance}") last_curriculum_update = frames_since_restart return success_tolerance, last_curriculum_update def interp_0_1(x_curr: float, x_initial: float, x_target: float) -> float: """ Outputs 1 when x_curr == x_target (curriculum completed) Outputs 0 when x_curr == x_initial (just started training) Interpolates value in between. """ span = x_initial - x_target return (x_initial - x_curr) / span def tolerance_successes_objective( success_tolerance: float, initial_tolerance: float, target_tolerance: float, successes: Tensor ) -> Tensor: """ Objective for the PBT. This basically prioritizes tolerance over everything else when we execute the curriculum, after that it's just #successes. """ # this grows from 0 to 1 as we reach the target tolerance if initial_tolerance > target_tolerance: # makeshift unit tests: eps = 1e-5 assert abs(interp_0_1(initial_tolerance, initial_tolerance, target_tolerance)) < eps assert abs(interp_0_1(target_tolerance, initial_tolerance, target_tolerance) - 1.0) < eps mid_tolerance = (initial_tolerance + target_tolerance) / 2 assert abs(interp_0_1(mid_tolerance, initial_tolerance, target_tolerance) - 0.5) < eps tolerance_objective = interp_0_1(success_tolerance, initial_tolerance, target_tolerance) else: tolerance_objective = 1.0 if success_tolerance > target_tolerance: # add succeses with a small coefficient to differentiate between policies at the beginning of training # increment in tolerance improvement should always give higher value than higher successes with the # previous tolerance, that's why this coefficient is very small true_objective = (successes * 0.01) + tolerance_objective else: # basically just the successes + tolerance objective so that true_objective never decreases when we cross # the threshold true_objective = successes + tolerance_objective return true_objective
6,689
Python
41.075471
113
0.712214
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/tasks/allegro_kuka/allegro_kuka_regrasping.py
# Copyright (c) 2018-2023, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from typing import List, Tuple import torch from isaacgym import gymapi from torch import Tensor from isaacgymenvs.utils.torch_jit_utils import to_torch, torch_rand_float from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_base import AllegroKukaBase from isaacgymenvs.tasks.allegro_kuka.allegro_kuka_utils import tolerance_curriculum, tolerance_successes_objective class AllegroKukaRegrasping(AllegroKukaBase): def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render): self.goal_object_indices = [] self.goal_asset = None super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render) def _object_keypoint_offsets(self): """Regrasping task uses only a single object keypoint since we do not care about object orientation.""" return [[0, 0, 0]] def _load_additional_assets(self, object_asset_root, arm_pose): goal_asset_options = gymapi.AssetOptions() goal_asset_options.disable_gravity = True self.goal_asset = self.gym.load_asset( self.sim, object_asset_root, self.asset_files_dict["ball"], goal_asset_options ) goal_rb_count = self.gym.get_asset_rigid_body_count(self.goal_asset) goal_shapes_count = self.gym.get_asset_rigid_shape_count(self.goal_asset) return goal_rb_count, goal_shapes_count def _create_additional_objects(self, env_ptr, env_idx, object_asset_idx): goal_start_pose = gymapi.Transform() goal_asset = self.goal_asset goal_handle = self.gym.create_actor( env_ptr, goal_asset, goal_start_pose, "goal_object", env_idx + self.num_envs, 0, 0 ) self.gym.set_actor_scale(env_ptr, goal_handle, 0.5) self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) def _after_envs_created(self): self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def _reset_target(self, env_ids: Tensor) -> None: target_volume_origin = self.target_volume_origin target_volume_extent = self.target_volume_extent target_volume_min_coord = target_volume_origin + target_volume_extent[:, 0] target_volume_max_coord = target_volume_origin + target_volume_extent[:, 1] target_volume_size = target_volume_max_coord - target_volume_min_coord rand_pos_floats = torch_rand_float(0.0, 1.0, (len(env_ids), 3), device=self.device) target_coords = target_volume_min_coord + rand_pos_floats * target_volume_size self.goal_states[env_ids, 0:3] = target_coords self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] # we also reset the object to its initial position self.reset_object_pose(env_ids) # since we put the object back on the table, also reset the lifting reward self.lifted_object[env_ids] = False self.deferred_set_actor_root_state_tensor_indexed( [self.object_indices[env_ids], self.goal_object_indices[env_ids]] ) def _extra_object_indices(self, env_ids: Tensor) -> List[Tensor]: return [self.goal_object_indices[env_ids]] def compute_kuka_reward(self) -> Tuple[Tensor, Tensor]: rew_buf, is_success = super().compute_kuka_reward() # TODO: customize reward? return rew_buf, is_success def _true_objective(self) -> Tensor: true_objective = tolerance_successes_objective( self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.successes ) return true_objective def _extra_curriculum(self): self.success_tolerance, self.last_curriculum_update = tolerance_curriculum( self.last_curriculum_update, self.frame_since_restart, self.tolerance_curriculum_interval, self.prev_episode_successes, self.success_tolerance, self.initial_tolerance, self.target_tolerance, self.tolerance_curriculum_increment, )
5,893
Python
46.532258
120
0.702019