file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
Ngochuy2137/omni_isaac_examples/tests/test_hello_world.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import omni.kit
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
from omni.isaac.core.utils.stage import create_new_stage_async, is_stage_loading, update_stage_async
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
from omni.isaac.examples.hello_world import HelloWorld
class TestHelloWorldExampleExtension(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
await create_new_stage_async()
await update_stage_async()
self._sample = HelloWorld()
self._sample.set_world_settings(physics_dt=1.0 / 60.0, stage_units_in_meters=1.0)
await self._sample.load_world_async()
await update_stage_async()
while is_stage_loading():
await update_stage_async()
return
# After running each test
async def tearDown(self):
# In some cases the test will end before the asset is loaded, in this case wait for assets to load
while is_stage_loading():
print("tearDown, assets still loading, waiting to finish...")
await asyncio.sleep(1.0)
await self._sample.clear_async()
await update_stage_async()
self._sample = None
pass
async def test_reset(self):
await self._sample.reset_async()
await update_stage_async()
await update_stage_async()
await self._sample.reset_async()
await update_stage_async()
await update_stage_async()
pass
| 2,168 | Python | 37.052631 | 119 | 0.700646 |
Ngochuy2137/omni_isaac_examples/tests/test_omnigraph_keyboard.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import omni.kit
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
from omni.isaac.core.utils.stage import create_new_stage_async, is_stage_loading, update_stage_async
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
from omni.isaac.examples.omnigraph_keyboard import OmnigraphKeyboard
class TestOmnigraphKeyboardExampleExtension(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
await create_new_stage_async()
await update_stage_async()
self._sample = OmnigraphKeyboard()
self._sample.set_world_settings(physics_dt=1.0 / 60.0, stage_units_in_meters=1.0)
await self._sample.load_world_async()
await update_stage_async()
while is_stage_loading():
await update_stage_async()
return
# After running each test
async def tearDown(self):
# In some cases the test will end before the asset is loaded, in this case wait for assets to load
while is_stage_loading():
print("tearDown, assets still loading, waiting to finish...")
await asyncio.sleep(1.0)
await self._sample.clear_async()
await update_stage_async()
self._sample = None
pass
async def test_reset(self):
await self._sample.reset_async()
await update_stage_async()
await update_stage_async()
await self._sample.reset_async()
await update_stage_async()
await update_stage_async()
pass
| 2,196 | Python | 37.543859 | 119 | 0.704463 |
Ngochuy2137/omni_isaac_examples/hello_world/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.hello_world.hello_world import HelloWorld
from omni.isaac.examples.hello_world.hello_world_extension import HelloWorldExtension
| 585 | Python | 47.833329 | 85 | 0.823932 |
Ngochuy2137/omni_isaac_examples/hello_world/hello_world.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.base_sample import BaseSample
import numpy as np
# Can be used to create a new cube or to point to an already existing cube in stage.
from omni.isaac.core.objects import DynamicCuboid
# Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
class HelloWorld(BaseSample):
def __init__(self) -> None:
super().__init__()
return
def setup_scene(self):
world = self.get_world()
world.scene.add_default_ground_plane()
fancy_cube = world.scene.add(
DynamicCuboid(
prim_path="/World/my_fancy_cube", # The prim path of the cube in the USD stage
name="fancy_cube", # The unique name used to retrieve the object from the scene later on
position=np.array([0, 0, 1.0]), # Using the current stage units which is in meters by default.
scale=np.array([0.5015, 0.5015, 0.5015]), # most arguments accept mainly numpy arrays.
color=np.array([0, 0, 1.0]), # RGB channels, going from 0-1
))
return
async def setup_post_load(self):
self._world = self.get_world()
self._cube = self._world.scene.get_object("fancy_cube")
# "sim_step" is the name of the event
# "callback_fn" is the callback function
self._world.add_physics_callback("sim_step", callback_fn=self.print_cube_info) #callback names have to be unique
return
# here we define the physics callback to be called before each physics step, all physics callbacks must take
# step_size as an argument
# Tham số này thường là kích thước bước thời gian (time step) của mỗi bước vật lý trong mô phỏng.
# Tuy nhiên, trong ví dụ này, step_size không được sử dụng trực tiếp trong hàm.
def print_cube_info(self, step_size):
position, orientation = self._cube.get_world_pose()
linear_velocity = self._cube.get_linear_velocity()
# will be shown on terminal
print("Cube position is : " + str(position))
print("Cube's orientation is : " + str(orientation))
print("Cube's linear velocity is : " + str(linear_velocity))
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
def world_cleanup(self):
return
| 2,806 | Python | 41.530302 | 120 | 0.666785 |
Ngochuy2137/omni_isaac_examples/hello_world/hello_world_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.hello_world import HelloWorld
class HelloWorldExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="",
submenu_name="",
name="Hello World",
title="Hello World Example",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/core_api_tutorials/tutorial_core_hello_world.html",
overview="This Example introduces the user on how to do cool stuff with Isaac Sim through scripting in asynchronous mode.",
file_path=os.path.abspath(__file__),
sample=HelloWorld(),
)
return
| 1,214 | Python | 39.499999 | 135 | 0.706755 |
Ngochuy2137/omni_isaac_examples/simple_stack/simple_stack_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import os
import omni.ui as ui
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.simple_stack import SimpleStack
from omni.isaac.ui.ui_utils import btn_builder
class SimpleStackExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="Manipulation",
submenu_name="",
name="Simple Stack",
title="Stack Two Cubes",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/core_api_tutorials/tutorial_core_adding_manipulator.html",
overview="This Example shows how to stack two cubes using Franka robot in Isaac Sim.\n\nPress the 'Open in IDE' button to view the source code.",
sample=SimpleStack(),
file_path=os.path.abspath(__file__),
number_of_extra_frames=1,
)
self.task_ui_elements = {}
frame = self.get_frame(index=0)
self.build_task_controls_ui(frame)
return
def _on_stacking_button_event(self):
asyncio.ensure_future(self.sample._on_stacking_event_async())
self.task_ui_elements["Start Stacking"].enabled = False
return
def post_reset_button_event(self):
self.task_ui_elements["Start Stacking"].enabled = True
return
def post_load_button_event(self):
self.task_ui_elements["Start Stacking"].enabled = True
return
def post_clear_button_event(self):
self.task_ui_elements["Start Stacking"].enabled = False
return
def build_task_controls_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Task Controls"
frame.visible = True
dict = {
"label": "Start Stacking",
"type": "button",
"text": "Start Stacking",
"tooltip": "Start Stacking",
"on_clicked_fn": self._on_stacking_button_event,
}
self.task_ui_elements["Start Stacking"] = btn_builder(**dict)
self.task_ui_elements["Start Stacking"].enabled = False
| 2,712 | Python | 37.211267 | 157 | 0.629425 |
Ngochuy2137/omni_isaac_examples/simple_stack/simple_stack.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.franka.controllers.stacking_controller import StackingController
from omni.isaac.franka.tasks import Stacking
class SimpleStack(BaseSample):
def __init__(self) -> None:
super().__init__()
self._controller = None
self._articulation_controller = None
def setup_scene(self):
world = self.get_world()
world.add_task(Stacking(name="stacking_task"))
return
async def setup_post_load(self):
self._franka_task = self._world.get_task(name="stacking_task")
self._task_params = self._franka_task.get_params()
my_franka = self._world.scene.get_object(self._task_params["robot_name"]["value"])
self._controller = StackingController(
name="stacking_controller",
gripper=my_franka.gripper,
robot_articulation=my_franka,
picking_order_cube_names=self._franka_task.get_cube_names(),
robot_observation_name=my_franka.name,
)
self._articulation_controller = my_franka.get_articulation_controller()
return
def _on_stacking_physics_step(self, step_size):
observations = self._world.get_observations()
actions = self._controller.forward(observations=observations)
self._articulation_controller.apply_action(actions)
if self._controller.is_done():
self._world.pause()
return
async def _on_stacking_event_async(self):
world = self.get_world()
world.add_physics_callback("sim_step", self._on_stacking_physics_step)
await world.play_async()
return
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
self._controller.reset()
return
def world_cleanup(self):
self._controller = None
return
| 2,403 | Python | 36.562499 | 90 | 0.669996 |
Ngochuy2137/omni_isaac_examples/simple_stack/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.simple_stack.simple_stack import SimpleStack
from omni.isaac.examples.simple_stack.simple_stack_extension import SimpleStackExtension
| 591 | Python | 48.333329 | 88 | 0.825719 |
Ngochuy2137/omni_isaac_examples/kaya_gamepad/kaya_gamepad_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.kaya_gamepad import KayaGamepad
class KayaGamepadExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
overview = "This Example shows how to drive a NVIDIA Kaya robot using a Gamepad in Isaac Sim."
overview += "\n\nConnect a gamepad to the robot, and the press PLAY to begin simulating."
overview += "\n\nPress the 'Open in IDE' button to view the source code."
super().start_extension(
menu_name="Input Devices",
submenu_name="",
name="Kaya Gamepad",
title="NVIDIA Kaya Gamepad Example",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/gui_tutorials/tutorial_advanced_input_devices.html",
overview=overview,
file_path=os.path.abspath(__file__),
sample=KayaGamepad(),
)
return
| 1,420 | Python | 40.794116 | 124 | 0.698592 |
Ngochuy2137/omni_isaac_examples/kaya_gamepad/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.kaya_gamepad.kaya_gamepad import KayaGamepad
from omni.isaac.examples.kaya_gamepad.kaya_gamepad_extension import KayaGamepadExtension
| 591 | Python | 48.333329 | 88 | 0.825719 |
Ngochuy2137/omni_isaac_examples/kaya_gamepad/kaya_gamepad.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import omni.graph.core as og
import omni.usd
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.examples.base_sample import BaseSample
class KayaGamepad(BaseSample):
def __init__(self) -> None:
super().__init__()
def setup_scene(self):
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
kaya_usd = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
kaya_ogn_usd = assets_root_path + "/Isaac/Robots/Kaya/kaya_ogn_gamepad.usd"
stage = omni.usd.get_context().get_stage()
graph_prim = stage.DefinePrim("/World", "Xform")
graph_prim.GetReferences().AddReference(kaya_ogn_usd)
def world_cleanup(self):
pass
| 1,268 | Python | 35.257142 | 83 | 0.705836 |
Ngochuy2137/omni_isaac_examples/cortex/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
| 431 | Python | 46.999995 | 76 | 0.812065 |
Ngochuy2137/omni_isaac_examples/cortex/cortex_base.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import gc
from abc import abstractmethod
from omni.isaac.core import World
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.stage import create_new_stage_async, update_stage_async
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.examples import base_sample
class CortexBase(base_sample.BaseSample):
async def load_world_async(self):
"""
Function called when clicking load buttton.
The difference between this class and Base Sample is that we initialize a CortexWorld specialization.
"""
if CortexWorld.instance() is None:
await create_new_stage_async()
self._world = CortexWorld(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = CortexWorld.instance()
self._current_tasks = self._world.get_current_tasks()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load()
if len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
| 1,685 | Python | 41.149999 | 109 | 0.71454 |
Ngochuy2137/omni_isaac_examples/franka_cortex/franka_cortex.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import numpy as np
import omni
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import Behavior, CortexWorld, LogicalStateMonitor
from omni.isaac.cortex.dfb import DfDiagnosticsMonitor
from omni.isaac.cortex.robot import CortexFranka, add_franka_to_stage
from omni.isaac.cortex.tools import SteadyRate
from omni.isaac.examples.cortex.cortex_base import CortexBase
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
class ContextStateMonitor(DfDiagnosticsMonitor):
"""
State monitor to read the context and pass it to the UI.
For these behaviors, the context has a `diagnostic_message` that contains the text to be displayed, and each
behavior implements its own monitor to update that.
"""
def __init__(self, print_dt, diagnostic_fn=None):
super().__init__(print_dt=print_dt)
self.diagnostic_fn = diagnostic_fn
def print_diagnostics(self, context):
if self.diagnostic_fn:
self.diagnostic_fn(context)
class FrankaCortex(CortexBase):
def __init__(self, monitor_fn=None):
super().__init__()
self._monitor_fn = monitor_fn
self.behavior = None
self.robot = None
self.context_monitor = ContextStateMonitor(print_dt=0.25, diagnostic_fn=self._on_monitor_update)
def setup_scene(self):
world = self.get_world()
self.robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
position=np.array([x, -0.4, width / 2]),
)
)
self.robot.register_obstacle(obj)
world.scene.add_default_ground_plane()
async def load_behavior(self, behavior):
world = self.get_world()
self.behavior = behavior
self.decider_network = load_behavior_module(self.behavior).make_decider_network(self.robot)
self.decider_network.context.add_monitor(self.context_monitor.monitor)
world.add_decider_network(self.decider_network)
def clear_behavior(self):
world = self.get_world()
world._logical_state_monitors.clear()
world._behaviors.clear()
async def setup_post_load(self, soft=False):
world = self.get_world()
prim_path = "/World/Franka"
if not self.robot:
self.robot = world._robots["franka"]
self.decider_network = load_behavior_module(self.behavior).make_decider_network(self.robot)
self.decider_network.context.add_monitor(self.context_monitor.monitor)
world.add_decider_network(self.decider_network)
await omni.kit.app.get_app().next_update_async()
def _on_monitor_update(self, context):
diagnostic = ""
decision_stack = ""
if hasattr(context, "diagnostics_message"):
diagnostic = context.diagnostics_message
if self.decider_network._decider_state.stack:
decision_stack = "\n".join(
[
"{0}{1}".format(" " * i, element)
for i, element in enumerate(str(i) for i in self.decider_network._decider_state.stack)
]
)
if self._monitor_fn:
self._monitor_fn(diagnostic, decision_stack)
def _on_physics_step(self, step_size):
world = self.get_world()
world.step(False, False)
async def on_event_async(self):
world = self.get_world()
await omni.kit.app.get_app().next_update_async()
world.reset_cortex()
world.add_physics_callback("sim_step", self._on_physics_step)
await world.play_async()
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
def world_cleanup(self):
pass
| 4,998 | Python | 36.029629 | 112 | 0.631253 |
Ngochuy2137/omni_isaac_examples/franka_cortex/franka_cortex_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# from omni.isaac.examples.ur10_palletizing.ur10_palletizing import BinStacking
import asyncio
import os
import omni
import omni.ui as ui
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.franka_cortex.franka_cortex import FrankaCortex
from omni.isaac.ui.ui_utils import btn_builder, cb_builder, dropdown_builder, get_style, str_builder
class FrankaCortexExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
ext_manager = omni.kit.app.get_app().get_extension_manager()
sample_behaviors_id = ext_manager.get_enabled_extension_id("omni.isaac.cortex.sample_behaviors")
behavior_path = (
omni.kit.app.get_app().get_extension_manager().get_extension_path(sample_behaviors_id)
+ "/omni/isaac/cortex/sample_behaviors/franka"
)
self.behavior_map = {
"Block Stacking": f"{behavior_path}/block_stacking_behavior.py",
"Simple State Machine": f"{behavior_path}/simple/simple_state_machine.py",
"Simple Decider Network": f"{behavior_path}/simple/simple_decider_network.py",
"Peck State Machine": f"{behavior_path}/peck_state_machine.py",
"Peck Decider Network": f"{behavior_path}/peck_decider_network.py",
"Peck Game": f"{behavior_path}/peck_game.py",
}
self.selected_behavior = "Block Stacking"
super().start_extension(
menu_name="Cortex",
submenu_name="",
name="Franka Cortex Examples",
title="Franka Cortex Examples",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/cortex_tutorials/tutorial_cortex_4_franka_block_stacking.html#isaac-sim-app-tutorial-cortex-4-franka-block-stacking",
overview="This Example shows how to Use Cortex for multiple behaviors robot and Cortex behaviors in Isaac Sim.\n\nPress the 'Open in IDE' button to view the source code.",
sample=FrankaCortex(self.on_diagnostics),
file_path=os.path.abspath(__file__),
number_of_extra_frames=2,
)
self.task_ui_elements = {}
frame = self.get_frame(index=0)
self.build_task_controls_ui(frame)
self.loaded = False
return
def _on_load_world(self):
self._sample.behavior = self.get_behavior()
self.loaded = True
super()._on_load_world()
def on_diagnostics(self, diagnostic, decision_stack):
if diagnostic:
self.diagostic_model.set_value(diagnostic)
self.state_model.set_value(decision_stack)
self.diagnostics_panel.visible = bool(diagnostic)
def get_world(self):
return CortexWorld.instance()
def get_behavior(self):
return self.behavior_map[self.selected_behavior]
def _on_start_button_event(self):
asyncio.ensure_future(self.sample.on_event_async())
self.task_ui_elements["Start"].enabled = False
return
def post_reset_button_event(self):
self.task_ui_elements["Start"].enabled = True
return
def post_load_button_event(self):
self.task_ui_elements["Start"].enabled = True
return
def post_clear_button_event(self):
self.task_ui_elements["Start"].enabled = False
return
def __on_selected_behavior_changed(self, selected_index):
self.selected_behavior = selected_index
if self.loaded:
asyncio.ensure_future(self._sample.load_behavior(self.get_behavior()))
self.on_diagnostics("", "")
def build_task_controls_ui(self, frame):
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self.task_ui_elements["Selected Behavior"] = dropdown_builder(
"Selected Behavior",
items=[
"Block Stacking",
"Simple State Machine",
"Simple Decider Network",
"Peck State Machine",
"Peck Decider Network",
"Peck Game",
],
on_clicked_fn=self.__on_selected_behavior_changed,
)
dict = {
"label": "Load World",
"type": "button",
"text": "Load",
"tooltip": "Load World and Task",
"on_clicked_fn": self._on_load_world,
}
self._buttons["Load World"] = btn_builder(**dict)
self._buttons["Load World"].enabled = True
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._buttons["Reset"] = btn_builder(**dict)
self._buttons["Reset"].enabled = False
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Task Controls"
frame.visible = True
dict = {
"label": "Start",
"type": "button",
"text": "Start",
"tooltip": "Start",
"on_clicked_fn": self._on_start_button_event,
}
self.task_ui_elements["Start"] = btn_builder(**dict)
self.task_ui_elements["Start"].enabled = False
with self.get_frame(index=1):
self.get_frame(index=1).title = "Diagnostics"
self.get_frame(index=1).visible = True
self._diagnostics = ui.VStack(spacing=5)
# self._diagnostics.enabled = False
with self._diagnostics:
ui.Label("Decision Stack", height=20)
self.state_model = ui.SimpleStringModel()
ui.StringField(self.state_model, multiline=True, height=120)
self.diagnostics_panel = ui.VStack(spacing=5)
with self.diagnostics_panel:
ui.Label("Diagnostic message", height=20)
self.diagostic_model = ui.SimpleStringModel()
ui.StringField(self.diagostic_model, multiline=True, height=200)
| 6,913 | Python | 42.2125 | 189 | 0.581947 |
Ngochuy2137/omni_isaac_examples/franka_cortex/__init__.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.examples.franka_cortex.franka_cortex_extension import FrankaCortexExtension
| 524 | Python | 46.727268 | 91 | 0.822519 |
JJGIV2010/goBilda-extension/README.md | ## NVIDIA Omniverse GoBilda Extension
The NVIDIA Omniverse GoBilda Extension is a work-in-progress open source extension that aims to provide universal scene description components for the GoBilda platform. The extension is designed to assist students, engineers, and educators in building prototypes of robots by offering a physics environment and useful metrics such as cost.

### Features
- Universal scene description components for the GoBilda platform
- Import GoBilda parts into your Omniverse scene
- Get information about the assembly and useful metrics such as cost
- Simulate the assembly using the physics environment in NVIDIA Omniverse
- Save time and money by ensuring part compatibility through simulation
### Usage
1. Enable the extension by following the instructions in the README.
2. Use the file menu bar to navigate to "goBilda > *part category* " and select a part to add it to the scene.
3. After you have setup your assembly, use the file menu bar to navigate to "goBilda > tools > stage info window" to view useful information about the assembly such as total cost or weight.
4. Explore the GoBilda menu to access additional information about the scene such as enabling viewport widgets.
5. Customize individual parts by adding physics, materials, and attributes that might be useful for your particular project.
6. Explore the python classes included in the repo that are currently used to author variant and variant sets for the GoBilda parts.
8. Update or import a new STEP file for a part by using the import STEP file option in the GoBilda menu.
### Requirements
- NVIDIA Omniverse
### Installation
The installation steps will be provided in the completed README once the extension is finished.
**Note:** This extension is currently a work-in-progress, and additional features and documentation will be added in future updates.
---
**Note to developers:** Please check out the repo to see source code, examples or if you would like to contribute.
| 2,038 | Markdown | 52.657893 | 339 | 0.795878 |
JJGIV2010/goBilda-extension/tools/scripts/link_app.py | import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 | Python | 32.5 | 133 | 0.562389 |
JJGIV2010/goBilda-extension/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
JJGIV2010/goBilda-extension/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 | Python | 31.568965 | 103 | 0.68697 |
JJGIV2010/goBilda-extension/exts/goBilda/goBilda/extension.py | import omni.ext
import omni.ui as ui
import asyncio
import carb.input
import omni.kit.menu.utils
import omni.kit.undo
import omni.kit.commands
import omni.usd
from omni.kit.menu.utils import MenuItemDescription
from pxr import Sdf
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class GoBildaExtension(omni.ext.IExt):
def __init__(self):
self.extensionID = None
def on_startup(self, ext_id):
print("[goBilda] GoBilda startup")
# Register a menu item under the "Extensions" menu
self.extensionID = ext_id
# self.aboutWindow()
self.init_menu(ext_id)
self.stage = omni.usd.get_context().get_stage()
_menu_list = None
_sub_menu_list = None
# Menu name.
_menu_name = "goBilda"
def comingSoon(self):
self._window = ui.Window("goBilda Extension", width=500, textwrap=True)
with self._window.frame:
with ui.VStack():
####### Image : Omniverse logo ########
with ui.HStack():
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
img = ui.Image(alignment=ui.Alignment.CENTER)
img.source_url = ext_path + "/data/goBildaLogo.png"
ui.Label("""
Coming soon!
""", textwrap=True)
def aboutWindow(self):
self._window = ui.Window("goBilda Extension", width=500, textwrap=True)
with self._window.frame:
with ui.VStack():
####### Image : Omniverse logo ########
with ui.HStack():
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
img = ui.Image(alignment=ui.Alignment.CENTER)
img.source_url = ext_path + "/data/goBildaLogo.png"
ui.Label("""
Welcome to the unofficial extension for goBilda parts! If you're unfamiliar, goBilda is an open-source robotics prototyping platform perfect for designing robots, machines, and much more.
With our extension, you can seamlessly integrate goBilda parts into your Omniverse scene. This tool provides valuable insight into your assembly, aids in simulating your design, and even ensures part compatibility via simulation. The result? A smoother design process that saves you both time and money.
Check out the official goBilda website for more information: https://www.gobilda.com/
Ready to start building? Follow these simple steps:
Congratulations, you've already enabled the extension! If necessary, resetting the extension is as easy as disabling and enabling it again.
To select a part, navigate through the file menu bar: goBilda > parts > select a part.
Your chosen part will be incorporated into the scene.
For additional details about the scene, feel free to explore the goBilda menu.
Customize each part to fit your project needs by adding physics, materials, and attributes as needed.
To update or import a new step file for a fresh part, simply head over to the goBilda menu and choose the import step file option.
Thanks for checking out the extension. Now, let the fun begin. Happy building!
""", textwrap=True)
def stageInfoWindow(self):
self.stageInfoWindow = ui.Window("Stage Info", width=500, textwrap=True)
with self.stageInfoWindow.frame:
ui.Label("Stage Info")
def viewportOverlay(self):
print("viewport overlay place holder")
def addToStage(self, component):
"""
This function adds a component to the stage
:param component:
:return:
"""
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
path = f"{ext_path}/data/models/{component}/_"
primSourceLocation = path + component + "_allVariants.usda"
omni.kit.commands.execute("CreatePayload",
path_to=Sdf.Path(f"/World/Components/_{component}"),
# Prim path for where to create the reference
asset_path=primSourceLocation,
# The file path to reference. Relative paths are accepted too.
usd_context=omni.usd.get_context()
)
def getCost(self):
"""
This function gets the cost of the parts in the scene
:return:
"""
totalCost = 0
print("analyzing components in scene for cost")
# get all the components in the scene
components = self.stage.GetPrimAtPath("/World/Components").GetChildren()
# get the cost of each component
for component in components:
cost = component.GetAttribute("cost").Get()
totalCost = totalCost + cost
print("total cost of components in scene: " + str(totalCost))
return totalCost
def getWeight(self):
"""
This function gets the total weight of the parts in the scene
:return:
"""
totalWeight = 0
print("analyzing components in scene for weight")
# get all the components in the scene
components = self.stage.GetPrimAtPath("/World/Components").GetChildren()
# get the weight of each component
for component in components:
weight = component.GetAttribute("weight").Get()
totalWeight = totalWeight + weight
print("total weight of components in scene: " + str(totalWeight))
return totalWeight
def init_menu(self, ext_id):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
self.channelSubMenu = [
MenuItemDescription(name="UChannel", onclick_fn=lambda: self.addToStage("1120")),
MenuItemDescription(name="LowUChannel", onclick_fn=lambda: self.addToStage("1121")),
]
self.goRailSubMenu = [
MenuItemDescription(name="GoRailClosed", onclick_fn=lambda: self.addToStage("1109")),
MenuItemDescription(name="GoRailOpen", onclick_fn=lambda: self.addToStage("1118")),
]
self.beamsSubMenu = [
MenuItemDescription(name="U Beams", onclick_fn=lambda: self.addToStage("1101")),
MenuItemDescription(name="L Beams", onclick_fn=lambda: self.addToStage("1103")),
MenuItemDescription(name="Flat Beams", onclick_fn=lambda: self.addToStage("1102")),
MenuItemDescription(name="Square Beams", onclick_fn=lambda: self.addToStage("1106")),
MenuItemDescription(name="Shaft Beams", onclick_fn=lambda: self.addToStage("1119")),
]
self.shaftsAndTubingSubMenu = [
MenuItemDescription(name="Steel Round", onclick_fn=lambda: self.addToStage("2100")),
MenuItemDescription(name="Steel D", onclick_fn=lambda: self.addToStage("2101")),
MenuItemDescription(name="Steel Rex", onclick_fn=lambda: self.addToStage("2102")),
MenuItemDescription(name="Aluminum Rex", onclick_fn=lambda: self.addToStage("2104")),
MenuItemDescription(name="Hub Shafts", onclick_fn=lambda: self.addToStage("2110")),
MenuItemDescription(name="Aluminum Tubing", onclick_fn=lambda: self.addToStage("4100")),
MenuItemDescription(name="goTube", onclick_fn=lambda: self.addToStage("4103")),
MenuItemDescription(name="goRail", sub_menu=self.goRailSubMenu)
]
self.mountsSubMenu = [
MenuItemDescription(name="Block Mounts", onclick_fn=lambda: self.addToStage("1203")),
MenuItemDescription(name="Dual Block Mounts", onclick_fn=lambda: self.addToStage("1205")),
MenuItemDescription(name="One Side Two Post Pattern", onclick_fn=lambda: self.addToStage("1400")),
MenuItemDescription(name="Two Side Two Post Pattern", onclick_fn=lambda: self.addToStage("1401")),
MenuItemDescription(name="Gusseted Angle Pattern", onclick_fn=lambda: self.addToStage("1204"))
]
self.structureSubMenu = [
MenuItemDescription(name="Channel", sub_menu=self.channelSubMenu),
MenuItemDescription(name="goRail", sub_menu=self.goRailSubMenu),
MenuItemDescription(name="Beams", sub_menu=self.beamsSubMenu),
MenuItemDescription(name="Shafting & Tubing", sub_menu=self.shaftsAndTubingSubMenu),
MenuItemDescription(name="Mounts", sub_menu=self.mountsSubMenu)
]
self.motionSubMenu = [
MenuItemDescription(name="Servos", onclick_fn=lambda: self.addToStage("2000"))
]
self.electronicsSubMenu = [
# MenuItemDescription(name="Motor Controllers", onclick_fn=lambda: self.addToStage("motorControllers")),
# MenuItemDescription(name="Servo Electronics", onclick_fn=lambda: self.addToStage("servoElectronics")),
# MenuItemDescription(name="Signal Mixers", onclick_fn=lambda: self.addToStage("signalMixers")),
# MenuItemDescription(name="Batteries", onclick_fn=lambda: self.addToStage("batteries")),
# MenuItemDescription(name="Voltage Regulators", onclick_fn=lambda: self.addToStage("voltageRegulators")),
# MenuItemDescription(name="Power Distribution Boards", onclick_fn=lambda: self.addToStage("powerDistributionBoards")),
# MenuItemDescription(name="Wiring", onclick_fn=lambda: self.addToStage("wiring")),
# MenuItemDescription(name="Switches", onclick_fn=lambda: self.addToStage("switches")),
# MenuItemDescription(name="Lights", onclick_fn=lambda: self.addToStage("lights")),
]
self.hardwareSubMenu = [
# MenuItemDescription(name="Screws", onclick_fn=lambda: self.addToStage("screws")),
# MenuItemDescription(name="M4 Threaded Rods", onclick_fn=lambda: self.addToStage("threadedRods")),
# MenuItemDescription(name="Washers", onclick_fn=lambda: self.addToStage("washers")),
# MenuItemDescription(name="Shaft Spacers & Shims", onclick_fn=lambda: self.addToStage("sahftSpacersAndShims")),
# MenuItemDescription(name="Hole Reducers", onclick_fn=lambda: self.addToStage("holeReducers")),
# MenuItemDescription(name="Nuts", onclick_fn=lambda: self.addToStage("nuts")),
# MenuItemDescription(name="Springs", onclick_fn=lambda: self.addToStage("springs")),
# MenuItemDescription(name="Threaded Plates", onclick_fn=lambda: self.addToStage("threadedPlates")),
# MenuItemDescription(name="Standoffs & Spacers", onclick_fn=lambda: self.addToStage("standoffsAndSpacers")),
# MenuItemDescription(name="Collars", onclick_fn=lambda: self.addToStage("collars")),
# MenuItemDescription(name="Hinges", onclick_fn=lambda: self.addToStage("hinges")),
# MenuItemDescription(name="Tools", onclick_fn=lambda: self.addToStage("tools")),
# MenuItemDescription(name="Flexible Tubing", onclick_fn=lambda: self.addToStage("flexibleTubing")),
# MenuItemDescription(name="Cable", onclick_fn=lambda: self.addToStage("cable")),
# MenuItemDescription(name="Wire Management", onclick_fn=lambda: self.addToStage("wireManagement")),
# MenuItemDescription(name="Grommets", onclick_fn=lambda: self.addToStage("grommets")),
# MenuItemDescription(name="Rubber Feet", onclick_fn=lambda: self.addToStage("rubberFeet")),
# MenuItemDescription(name="Magnets", onclick_fn=lambda: self.addToStage("magnets"))
]
self.stageToolsSubMenu = [
MenuItemDescription(name="Stage Info Window", onclick_fn=lambda: self.stageInfoWindow()),
MenuItemDescription(name="Viewport Overlay", onclick_fn=lambda: self.viewportOverlay())
]
self._menu_list = [
MenuItemDescription(name="Tools", sub_menu=self.stageToolsSubMenu),
MenuItemDescription(),
MenuItemDescription(name="Structure", sub_menu=self.structureSubMenu),
MenuItemDescription(name="Motion", sub_menu=self.motionSubMenu),
MenuItemDescription(name="Electronics", sub_menu=self.electronicsSubMenu),
MenuItemDescription(name="Hardware", sub_menu=self.hardwareSubMenu),
MenuItemDescription(),
MenuItemDescription(name="About",
onclick_fn=lambda: self.aboutWindow()),
]
# Rebuild with additional menu items.
omni.kit.menu.utils.add_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
def on_standards_option_select(self):
enabled = True
def on_standards_option_checked(self):
enabled = False
return enabled
def on_standards_normally_open_option_select(self):
enabled = False
def on_standards_normally_closed_option_checked(self):
enabled = True
return enabled
def term_menu(self):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
# Remove and rebuild the added menu items.
omni.kit.menu.utils.remove_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
def on_shutdown(self):
print("[goBilda] GoBilda shutdown")
| 14,200 | Python | 51.018315 | 319 | 0.632887 |
JJGIV2010/goBilda-extension/exts/goBilda/goBilda/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
JJGIV2010/goBilda-extension/exts/goBilda/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "goBilda extension"
description="An extension to help students, artists, educators and engineers model, simulate and analyze goBilda assemblies using universal scene description and nvidia omniverse."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/JJGIV2010/goBilda-extension.git"
# One of categories for UI.
category = "Simulation"
# Keywords for the extension
keywords = ["robotic", "goBilda", "motor", "servo", "simulation", "bracket", "wheel"]
# Icon to show in the extension manager
icon = "data/icon.png"
# Preview to show in the extension manager
preview_image = "data/preview.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import goBilda.goBilda".
[[python.module]]
name = "goBilda"
| 1,109 | TOML | 31.647058 | 180 | 0.751127 |
JJGIV2010/goBilda-extension/exts/goBilda/docs/readme.md | ## NVIDIA Omniverse GoBilda Extension
The NVIDIA Omniverse GoBilda Extension is a work-in-progress open source extension that aims to provide universal scene description components for the GoBilda platform. The extension is designed to assist students, engineers, and educators in building prototypes of robots by offering a physics environment and useful metrics such as cost.
### Features
- Universal scene description components for the GoBilda platform
- Import GoBilda parts into your Omniverse scene
- Get information about the assembly and useful metrics such as cost
- Simulate the assembly using the physics environment in NVIDIA Omniverse
- Save time and money by ensuring part compatibility through simulation
### Usage
1. Enable the extension by following the instructions in the README.
2. Use the file menu bar to navigate to "goBilda > *part category* " and select a part to add it to the scene.
3. After you have setup your assembly, use the file menu bar to navigate to "goBilda > tools > stage info window" to view useful information about the assembly such as total cost or weight.
4. Explore the GoBilda menu to access additional information about the scene such as enabling viewport widgets.
5. Customize individual parts by adding physics, materials, and attributes that might be useful for your particular project.
6. Explore the python classes included in the repo that are currently used to author variant and variant sets for the GoBilda parts.
8. Update or import a new STEP file for a part by using the import STEP file option in the GoBilda menu.
### Requirements
- NVIDIA Omniverse
### Installation
The installation steps will be provided in the completed README once the extension is finished.
**Note:** This extension is currently a work-in-progress, and additional features and documentation will be added in future updates.
---
**Note to developers:** Please check out the repo to see source code, examples or if you would like to contribute.
| 1,989 | Markdown | 54.277776 | 339 | 0.796883 |
leggedrobotics/viplanner/pyproject.toml | [build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "viplanner"
version = "0.1.0"
description = "Visual Imperative Planner for Legged Robots"
authors = [{name = "Pascal Roth", email = "[email protected]"}]
license = {file = "LICENSE.txt"}
readme = "README.md"
requires-python = ">=3.7"
keywords = ["robotics", "planning", "legged-robots"]
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
dependencies = [
"torch",
"torchvision",
"PyYAML==6.0",
"tqdm",
"matplotlib",
"networkx",
"scipy",
"open3d==0.17.0",
"wandb==0.14.0",
"opencv-python-headless",
]
[project.optional-dependencies]
inference = [
"mmcv==2.0.0",
"mmengine",
"mmdet",
]
standard = [
"pypose",
]
jetson = [
"torch==1.11",
]
[project.urls]
homepage = "https://github.com/pascal-roth/viplanner"
repository = "https://github.com/pascal-roth/viplanner.git"
[tool.setuptools.packages]
find = {}
| 1,203 | TOML | 21.296296 | 60 | 0.625935 |
leggedrobotics/viplanner/TRAINING.md | # Training and Evaluation
Here an overview of the steps involved in training the policy is provided.
## Cost-Map Building
Cost-Map building is an essential step in guiding optimization and representing the environment.
Cost-Maps can be built from either depth and semantic images (i.e., data generated in simulation) or (semantically annotated) point clouds (i.e., real-world data).
If depth and semantic images of the simulation are available, then first 3D reconstruction has to be performed, following the steps described in Point 1. If the (semantically annotated) pointclouds are generated, then the cost-map can be build directly from the pointcloud, following the steps described in Point 2.
1. **Simulation: Depth Reconstruction** <br>
The reconstruction is executed in two steps, controlled by the config parameter defined in [ReconstructionCfg Class](viplanner/config/costmap_cfg.py):
1. Generate colored point cloud by warping each semantic images onto the depth image (account for cameras in different frames)
2. Projection into 3D space and voxelization
The process expects following datastructure:
``` graphql
env_name
├── camera_extrinsic.txt # format: x y z qx qy qz qw
├── intrinsics.txt # expects ROS CameraInfo format --> P-Matrix
├── depth # either png and/ or npy, if both npy is used
| ├── xxxx.png # images saved with 4 digits, e.g. 0000.png
| ├── xxxx.npy # arrays saved with 4 digits, e.g. 0000.npy
├── semantics # optional
├── xxxx.png # images saved with 4 digits, e.g. 0000.png
```
when both depth and semantic images are available, then define sem_suffic and depth_suffix in ReconstructionCfg to differentiate between the two with the following structure:
``` graphql
env_name
├── camera_extrinsic{depth_suffix}.txt # format: x y z qx qy qz qw
├── camera_extrinsic{sem_suffix}.txt # format: x y z qx qy qz qw
├── intrinsics.txt # P-Matrix for intrinsics of depth and semantic images
├── depth # either png and/ or npy, if both npy is used
| ├── xxxx{depth_suffix}.png # images saved with 4 digits, e.g. 0000.png
| ├── xxxx{depth_suffix}.npy # arrays saved with 4 digits, e.g. 0000.npy
├── semantics # optional
├── xxxx{sem_suffix}.png # images saved with 4 digits, e.g. 0000.png
```
2. **Real-World: Open3D-Slam**
To create an annotated 3D Point-Cloud from real-world data, i.e., LiDAR scans and semantics generated from the RGB camera stream, use tools such as [Open3D Slam](https://github.com/leggedrobotics/open3d_slam).
3. **Cost-Building** <br>
Fully automated, either a geometric or semantic cost map can be generated running the following command:
```
python viplanner/cost_builder.py
```
With configs set in [CostMapConfig](viplanner/config/costmap_cfg.py). We provided some standard values, however, before running the script, please adjust the config to your needs and local environment paths.
Cost-Maps will be saved within the environment folder, with the following structure:
``` graphql
maps
├── cloud
│ ├── cost_{map_name}.txt # 3d visualization of cost map
├── data
│ ├── cost_{map_name}_map.txt # cost map
│ ├── cost_{map_name}_ground.txt # ground height estimated from pointcloud
└── params
├── config_cost_{map_name}.yaml # CostMapConfig used to generate cost map
```
## Training
Configurations of the training given in [TrainCfg](viplanner/config/learning_cfg.py). Training can be started using the example training script [train.py](viplanner/train.py).
``` bash
python viplanner/train.py
```
For the training a directory structure as follows is expected/ will be created:
``` graphql
file_path # TrainCfg.file_path or env variable EXPERIMENT_DIRECTORY
├── data
│ ├── env_name # structure as defined in Cost-Map Building
├── models
│ ├── model_name
│ | ├── model.pth # trained model
│ | ├── model.yaml # TrainCfg used to train model
├── logs
│ ├── model_name
```
It is important that the model name is unique, otherwise the previous training will be overwritten.
Also always copy the `model.pt` and `model.yaml` because the configs are necessary to reload the model.
| 4,753 | Markdown | 46.54 | 315 | 0.634547 |
leggedrobotics/viplanner/README.md | # ViPlanner: Visual Semantic Imperative Learning for Local Navigation
<p align="center">
<a href="https://leggedrobotics.github.io/viplanner.github.io/">Project Page</a> •
<a href="https://arxiv.org/abs/2310.00982">arXiv</a> •
<a href="https://youtu.be/8KO4NoDw6CM">Video</a> •
<a href="#citing-viplanner">BibTeX</a>
Click on image for demo video!
[](https://youtu.be/8KO4NoDw6CM)
</p>
ViPlanner is a robust learning-based local path planner based on semantic and depth images.
Fully trained in simulation, the planner can be applied in dynamic indoor as well outdoor environments.
We provide it as an extension for [NVIDIA Isaac-Sim](https://developer.nvidia.com/isaac-sim) within the [Orbit](https://isaac-orbit.github.io/) project (details [here](./omniverse/README.md)).
Furthermore, a ready to use [ROS Noetic](http://wiki.ros.org/noetic) package is available within this repo for direct integration on any robot (tested and developed on ANYmal C and D).
**Keywords:** Visual Navigation, Local Planning, Imperative Learning
## Install
- Install `pyproject.toml` with pip by running:
```bash
pip install .
```
or
```bash
pip install -e .[standard]
```
if you want to edit the code. To apply the planner in the ROS-Node, install it with the inference setting:
```bash
pip install -e .[standard,inference]
```
Make sure the CUDA toolkit is of the same version as used to compile torch. We assume 11.7. If you are using a different version, adjust the string for the mmcv install as given . If the toolkit is not found, set the `CUDA_HOME` environment variable, as follows:
```
export CUDA_HOME=/usr/local/cuda
```
On the Jetson, please use
```bash
pip install -e .[inference,jetson]
```
as `mmdet` requires torch.distributed which is only build until version 1.11 and not compatible with pypose. See the [Dockerfile](./Dockerfile) for a workaround.
**Known Issue**
- mmcv build wheel does not finish:
- fix by installing with defined CUDA version, as detailed [here](https://mmcv.readthedocs.io/en/latest/get_started/installation.html#install-with-pip). For CUDA Version 11.7 and torch==2.0.x use
```
pip install mmcv==2.0.0 -f https://download.openmmlab.com/mmcv/dist/cu117/torch2.0/index.html
```
**Extension**
This work includes the switch from semantic to direct RGB input for the training pipeline, to facilitate further research. For RGB input, an option exist to employ a backbone with mask2former pre-trained weights. For this option, include the github submodule, install the requirements included there and build the necessary cuda operators. These steps are not necessary for the published planner!
```bash
pip install git+https://github.com/facebookresearch/detectron2.git
git submodule update --init
pip install -r third_party/mask2former/requirements.txt
cd third_party/mask2former/mask2former/modeling/pixel_decoder/ops \
sh make.sh
```
**Remark**
Note that for an editable install for packages without setup.py, PEP660 has to be fulfilled. This requires the following versions (as described [here](https://stackoverflow.com/questions/69711606/how-to-install-a-package-using-pip-in-editable-mode-with-pyproject-toml) in detail)
- [pip >= 21.3](https://pip.pypa.io/en/stable/news/#v21-3)
```
python3 -m pip install --upgrade pip
```
- [setuptools >= 64.0.0](https://github.com/pypa/setuptools/blob/main/CHANGES.rst#v6400)
```
python3 -m pip install --upgrade setuptools
```
## Training
Here an overview of the steps involved in training the policy.
For more detailed instructions, please refer to [TRAINING.md](TRAINING.md).
0. Training Data Generation <br>
Training data is generated from the [Matterport 3D](https://github.com/niessner/Matterport), [Carla](https://carla.org/) and [NVIDIA Warehouse](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_static_assets.html) using developed Isaac Sim Extension, that are open-sourced. Currently, the extensions are updated to the latest `Orbit` version and will be available soon, an intermediate solution is given [here](https://github.com/pascal-roth/orbit_envs).
1. Build Cost-Map <br>
The first step in training the policy is to build a cost-map from the available depth and semantic data. A cost-map is a representation of the environment where each cell is assigned a cost value indicating its traversability. The cost-map guides the optimization, therefore, is required to be differentiable. Cost-maps are built using the [cost-builder](viplanner/cost_builder.py) with configs [here](viplanner/config/costmap_cfg.py), given a pointcloud of the environment with semantic information (either from simultion or real-world information).
2. Training <br>
Once the cost-map is constructed, the next step is to train the policy. The policy is a machine learning model that learns to make decisions based on the depth and semantic measurements. An example training script can be found [here](viplanner/train.py) with configs [here](viplanner/config/learning_cfg.py)
3. Evaluation <br>
Performance assessment can be performed on simulation and real-world data. The policy will be evaluated regarding multiple metrics such as distance to goal, average and maximum cost, path length. In order to let the policy be executed on anymal in simulation, please refer to [Omniverse Extension](./omniverse/README.md)
## Inference
1. Real-World <br>
ROS-Node is provided to run the planner on the LeggedRobot ANYmal, for details please see [ROS-Node-README](ros/README.md).
2. NVIDIA Isaac-Sim <br>
The planner can be executed within Nvidia Isaac Sim. It is implemented as part of the [Orbit Framework](https://isaac-orbit.github.io/) with an own extension. For details, please see [Omniverse Extension](./omniverse/README.md).
### Model Download
The latest model is available to download: [[checkpoint](https://drive.google.com/file/d/1PY7XBkyIGESjdh1cMSiJgwwaIT0WaxIc/view?usp=sharing)] [[config](https://drive.google.com/file/d/1r1yhNQAJnjpn9-xpAQWGaQedwma5zokr/view?usp=sharing)]
## <a name="CitingViPlanner"></a>Citing ViPlanner
```
@article{roth2023viplanner,
title ={ViPlanner: Visual Semantic Imperative Learning for Local Navigation},
author ={Pascal Roth and Julian Nubert and Fan Yang and Mayank Mittal and Marco Hutter},
journal = {2024 IEEE International Conference on Robotics and Automation (ICRA)},
year = {2023},
month = {May},
}
```
### License
This code belongs to Robotic Systems Lab, ETH Zurich.
All right reserved
**Authors: [Pascal Roth](https://github.com/pascal-roth), [Julian Nubert](https://juliannubert.com/), [Fan Yang](https://github.com/MichaelFYang), [Mayank Mittal](https://mayankm96.github.io/), and [Marco Hutter](https://rsl.ethz.ch/the-lab/people/person-detail.MTIxOTEx.TGlzdC8yNDQxLC0xNDI1MTk1NzM1.html)<br />
Maintainer: Pascal Roth, [email protected]**
The ViPlanner package has been tested under ROS Noetic on Ubuntu 20.04.
This is research code, expect that it changes often and any fitness for a particular purpose is disclaimed.
| 7,098 | Markdown | 54.031007 | 550 | 0.75796 |
leggedrobotics/viplanner/ros/INSTALL.md | # INSTALL
It is strongly recommend to use the provided docker images for NVIDIA Jetson Orion (L4T r35.1.0) due to special version requirements on the Jetson!
## Models
For the models, place them in the `ros/planner/models` folder, both the viplanner and mask2former model.
For the semantics, we use the Mask2Former implementation of [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former), as it improves inference speed on the jetson compared to the code version publish by the authors. For inference reason, we use the smallest network with ResNet 50 backbone pre-trained on the COCO dataset that can be downloaded [here](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth).
## Nvidia Jetson Docker
Before building the docker images, enabling of Docker Default Runtime is necessary in otder to allow access to the CUDA compiler (nvcc) during `docker build` operations. Therefore, add `"default-runtime": "nvidia"` to your `/etc/docker/daemon.json` configuration file before attempting to build the containers:
```json
{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"default-runtime": "nvidia"
}
```
You will then want to restart the Docker service or reboot your system before proceeding. This can be done by running:
> service docker restart
In order to build the docker container on a NVIDIA Jetson Orin, execute the following steps:
1. On the jetson, local the files: `nvidia-l4t-apt-source.list` (usually at location `/etc/apt/sources.list.d/nvidia-l4t-apt-source.list`) and `nvidia-l4t-apt-source.clean.list` (if not on device, just creat an empty file). Then, copy both into `bin/packages`.
2. The DockerContext of the image is the parent directory of viplanner, thus, make sure you put the repo under a folder, e.g., git and not under your home as otherwise all files under home are copied to the context. The container can then be build as follows:
```bash
./bin/build.sh
```
3. To run the container, we assume that there exists `$HOME/catkin_ws` and `$HOME\git`. In the former, the catkin workspace with the `src` directory is located (don't build the packages yet) and in the second are any kind of git repositories that should be included. If both are there, the docker container can be started with:
```bash
./bin/run.sh
```
4. The viplanner repo should be linked into `$HOME/catkin_ws/src`. Then the planner's ROS Node can be build as follows:
```bash
catkin build viplanner_pkgs
```
Similarly add all other robot specific repositories and build the corresponidng packages.
**Remark**: If additional development environments such as TensorRT are required, you can add them in the docker file following the examples given in the [jetson-container repo](https://github.com/dusty-nv/jetson-containers).
## Manual Installation
- require ROS Noetic Installation (http://wiki.ros.org/noetic/Installation/Ubuntu)
- require CUDA Toolkit (same version as the one used to compile torch! This is crucial otherwise the segmentation network cannot run!)
- Dependency for JoyStick Planner:
```bash
sudo apt install libusb-dev
```
- Installation of VIPlanner
follow instructions in [README.md](../README.md) and install with inference flag. This installs mmdetection for Mask2Former, detailed instructions of the installation are given i the official documentation, [here](https://mmdetection.readthedocs.io/en/latest/).
- Build all ros packages
```bash
catkin build viplanner_pkgs
```
## Known Issues
### ROS numpy
- In ROS numpy, there still exists `np.float` of previous numpy versions.
- FIX:
in '/opt/ros/noetic/lib/python3/dist-packages/ros_numpy/point_cloud2.py' change all occurrences of 'np.float' to 'float'
### General
- Setuptools version during install. VIPlanner requires are rather recent version of setuptools (>64.0.0) which can lead to problems with the mask2former install. It is recommended to always install mask2former first and then upgrade setuptools to the version needed for the VIPlanner. Otherwise following errors can be observed:
- ERROR:
```bash
Invalid version: '0.23ubuntu1'
```
- FIX:
> pip install --upgrade --user setuptools==58.3.0
- ERROR:
```
File "/usr/local/lib/python3.8/dist-packages/pkg_resources/_vendor/packaging/version.py", line 264, in __init__
match = self._regex.search(version)
TypeError: expected string or bytes-like object
```
- FIX:
manually editing `site-packages/pkg_resources/_vendor/packaging/version.py` with `str()`
### Within the Docker
- SSL Issue when running `pip install` within the docker when trying to manually install additional packages (description [here](https://stackoverflow.com/questions/50692816/pip-install-ssl-issue))
- ERROR:
```bash
python -m pip install torch
Collecting zeep
Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLError("bad handshake: Error([('SSL routines', 'ssl3_get_server_certificate', 'certificate verify failed')],)",),)': /simple/torch/
```
- FIX:
> python3 -m pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org --index-url=https://pypi.org/simple/ torch
- PyYAML upgrade error (described [here](https://clay-atlas.com/us/blog/2022/07/23/solved-cannot-uninstall-pyyaml-it-is-a-distutils-installed-project-and-thus-we-cannot-accurately-determine-which-files-belong-to-it-which-would-lead-to-only-a-partial-uninstall/))
- ERROR:
```
Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
```
- FIX:
> pip install --ignore-installed PyYAML
-
| 6,013 | Markdown | 48.295082 | 558 | 0.736903 |
leggedrobotics/viplanner/ros/README.md | # ViPlanner ROS Node
## Overview
ROS Node to run ViPlanner on the LeggedRobot Platform ANYmal.
The implementation consists of
- the `planner` itself, running a semantic segmentation network and ViPlanner in parallel
- a `visualizer` to project the path in the RGB and depth camera stream of the robot
- a `pathFollower` to translate the path into twist commands that can be executed by the robot
- an RViz plugin to set the waypoints for the planner
## Installation
Please refer to [Installation Instructions](./INSTALL.md) where details about the included docker and a manual install is given.
## Usage
For the legged platform ANYmal, we provide configuration files for the C and D version as well as a more robot independent configuration based on a mounted RGB-D camera. The configuration files are located in the [config](./planner/config/) folder. Before running the planner, make sure to adjust the configuration file to your needs and select the correct one in the `viplanner.launch` file.
After launching the ANYmal software stack, run the VIPlanner without visualization:
```bash
roslaunch viplanner_node viplanner.launch
```
By enabling the `WaypointTool` in RViz, you can set waypoints for the planner. The planner will track these waypoints.
It is recommended to visualize the path and the waypoints in RViz to verify the correct behavior.
## SmartJoystick
Press the **LB** button on the joystick, when seeing the output on the screen:
Switch to Smart Joystick mode ...
Now the smartjoystick feature is enabled. It takes the joy stick command as motion intention and runs the VIPlanner in the background for low-level obstacle avoidance.
| 1,668 | Markdown | 44.108107 | 392 | 0.788969 |
leggedrobotics/viplanner/ros/joystick_drivers/README.md | # ROS Joystick Drivers Stack #
[](https://github.com/ros-drivers/joystick_drivers/actions)
A simple set of nodes for supporting various types of joystick inputs and producing ROS messages from the underlying OS messages.
| 334 | Markdown | 54.833324 | 170 | 0.799401 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/CHANGELOG.rst | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package ps3joy
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.15.0 (2020-10-12)
-------------------
1.14.0 (2020-07-07)
-------------------
* Fixing linter errors for Noetic. (`#174 <https://github.com/ros-drivers/joystick_drivers/issues/174>`_)
* Make sure to import struct where it is used. (`#162 <https://github.com/ros-drivers/joystick_drivers/issues/162>`_)
* roslint and Generic Clean-Up (`#161 <https://github.com/ros-drivers/joystick_drivers/issues/161>`_)
* Contributors: Chris Lalancette, Joshua Whitley
1.13.0 (2019-06-24)
-------------------
* Merge pull request `#128 <https://github.com/ros-drivers/joystick_drivers/issues/128>`_ from ros-drivers/fix/tab_errors
* Cleaning up Python indentation.
* Merge pull request `#123 <https://github.com/ros-drivers/joystick_drivers/issues/123>`_ from cclauss/modernize-python2-code
* Modernize Python 2 code to get ready for Python 3
* Merge branch 'master' into indigo-devel
* Contributors: Joshua Whitley, Matthew, cclauss
1.12.0 (2018-06-11)
-------------------
* Addressed numerous outstanding PRs.
* Created bluetooth_devices.md
* Created testing guide for ps3joy.
* Create procedure_test.md
* Let ps3joy_node not quit on inactivity-timeout.
* Refine diagnostics message usage in ps3joy_node
* Improve ps3joy_node with rospy.init_node and .is_shutdown
* Remove quit on failed root level check, part one of issue `#53 <https://github.com/ros-drivers/joystick_drivers/issues/53>`_
* Create README
* Changed package xml to format 2
* Contributors: Alenso Labady, Felix Kolbe, Jonathan Bohren, alab288, jprod123
1.11.0 (2017-02-10)
-------------------
* Update dependencies to remove warnings
* Contributors: Mark D Horn
1.10.1 (2015-05-24)
-------------------
* Remove stray architechture_independent flags
* Contributors: Jonathan Bohren, Scott K Logan
1.10.0 (2014-06-26)
-------------------
* First indigo reelase
* Update ps3joy/package.xml URLs with github user ros to ros-drivers
* Prompt for sudo password when required
* Contributors: Felix Kolbe, Jonathan Bohren, dawonn
| 2,071 | reStructuredText | 37.37037 | 126 | 0.688073 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/diagnostics.yaml | type: AnalyzerGroup
pub_rate: 1.0 # Optional
base_path: '' # Optional, prepended to all diagnostic output
analyzers:
PS3State:
type: diagnostic_aggregator/GenericAnalyzer
path: 'PS3 State'
timeout: 5.0
startswith: ['Battery', 'Charging State', 'Connection', 'ps3_joy']
remove_prefix: 'ps3_joy'
| 316 | YAML | 27.818179 | 70 | 0.699367 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/package.xml | <package format="2">
<name>ps3joy</name>
<version>1.15.0</version>
<license>BSD</license>
<description>
Playstation 3 SIXAXIS or DUAL SHOCK 3 joystick driver.
Driver for the Sony PlayStation 3 SIXAXIS or DUAL SHOCK 3
joysticks. In its current state, this driver is not compatible
with the use of other Bluetooth HID devices. The driver listens
for a connection on the HID ports, starts the joystick
streaming data, and passes the data to the Linux uinput device
so that it shows up as a normal joystick.
</description>
<maintainer email="[email protected]">Jonathan Bohren</maintainer>
<author>Blaise Gassend</author>
<author>@pabr.org</author>
<author>Melonee Wise</author>
<url type="website">http://www.ros.org/wiki/ps3joy</url>
<url type="development">https://github.com/ros-drivers/joystick_drivers</url>
<url type="bugtracker">https://github.com/ros-drivers/joystick_drivers/issues</url>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roslint</build_depend>
<depend>bluez</depend>
<depend>diagnostic_msgs</depend>
<depend>joystick</depend>
<depend>libusb-dev</depend>
<depend>python3-bluez</depend>
<depend>rosgraph</depend>
<depend>rospy</depend>
<depend>sensor_msgs</depend>
</package>
| 1,277 | XML | 32.631578 | 85 | 0.725137 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/README.md | # PlayStation 3 Joystick Driver for ROS
This package provides a driver for the PS3 (SIXAXIS or DUALSHOCK3) bluetooth joystick.
This driver provides a more reliable connection, and provides access to the joystick's accelerometers and gyroscope. Linux's native support for the PS3 joystick does lacks this functionality.
Additional documentation:
* [Troubleshooting](doc/troubleshooting.md)
* [Testing Instructions](doc/testing.md)
* [Bluetooth Device Compatibility](doc/bluetooth_devices.md)
## Dependencies
* joystick
* libusb-dev
* bluez-5.37
## Pairing instructions
1. If you can connect the joystick and the bluetooth dongle into the same
computer connect the joystick to the computer using a USB cable.
2. Load the bluetooth dongle's MAC address into the ps3 joystick using:
```
sudo bash
rosrun ps3joy sixpair
```
If you cannot connect the joystick to the same computer as the dongle,
find out the bluetooth dongle's MAC address by running (on the computer
that has the bluetooth dongle):
```
hciconfig
```
If this does not work, you may need to do
```
sudo hciconfig hci0 up
```
and retry
```
hciconfig
```
3. Plug the PS3 joystick into some other computer using a USB cable.
4. Replace the joystick's mac address in the following command:
```
sudo rosrun ps3joy sixpair 01:23:45:67:89:ab
```
## Starting the PS3 joystick
5. Run the following command
```
rosrun ps3joy ps3joy.py
```
6. Open a new terminal and reboot bluez and run joy with:
```
sudo systemctl restart bluetooth
rosrun joy joy_node
```
7. Open a new terminal and echo the joy topic
```
rostopic echo joy
```
8. This should make a joystick appear at /dev/input/js?
9. You can check that it is working with
jstest /dev/input/js?
(replace ? with the name of your joystick)
## Command-line Options
### ps3joy.py
```
usage: ps3joy.py [--inactivity-timeout=<n>] [--no-disable-bluetoothd] [--redirect-output] [--continuous-output]=<f>
<n>: inactivity timeout in seconds (saves battery life).
<f>: file name to redirect output to.
```
`--inactivity-timeout`
This may be useful for saving battery life and reducing contention on the 2.4 GHz network.Your PS3 controller
will shutdown after a given amount of time of inactivity.
`--no-disable-bluetoothd`
ps3joy.py will not take down bluetoothd. Bluetoothd must be configured to not handle input device, otherwise
you will receive an error saying "Error binding to socket".
`--redirect-output`
This can be helpful when ps3joy.py is running in the background. This will allow the standard output
and error messages to redirected into a file.
`--continuous-output`
This will output continuous motion streams and as a result this will no longer leave extended periods of
no messages on the /joy topic. ( This only works for ps3joy.py. Entering this parameter in ps3joy_node.py will
result in the parameter being ignored.)
## Limitations
This driver will not coexist with any other bluetooth device. In future releases, we plan to allow first non-HID and later any bluetooth device to coexist with this driver. The following devices do coexist:
* Non-HID devices using a userland driver, such as one written using pybluez.
* Keyboards or mice running in HID proxy mode, which appear to the kernel as USB devices.
| 3,290 | Markdown | 30.64423 | 206 | 0.755319 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/src/sixpair.c | /*
* Copyright (c) 2007, 2008 [email protected]
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <unistd.h>
#include <stdio.h>
#include <usb.h>
#define VENDOR 0x054c
#define PRODUCT 0x0268
#define USB_DIR_IN 0x80
#define USB_DIR_OUT 0
void fatal(char *msg) { perror(msg); exit(1); }
void show_master(usb_dev_handle *devh, int itfnum) {
printf("Current Bluetooth master: ");
unsigned char msg[8];
int res = usb_control_msg
(devh, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x01, 0x03f5, itfnum, (void*)msg, sizeof(msg), 5000);
if ( res < 0 ) { perror("USB_REQ_GET_CONFIGURATION"); return; }
printf("%02x:%02x:%02x:%02x:%02x:%02x\n",
msg[2], msg[3], msg[4], msg[5], msg[6], msg[7]);
}
void set_master(usb_dev_handle *devh, int itfnum, int mac[6]) {
printf("Setting master bd_addr to %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
char msg[8]= { 0x01, 0x00, mac[0],mac[1],mac[2],mac[3],mac[4],mac[5] };
int res = usb_control_msg
(devh,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x09,
0x03f5, itfnum, msg, sizeof(msg),
5000);
if ( res < 0 ) fatal("USB_REQ_SET_CONFIGURATION");
}
void process_device(int argc, char **argv, struct usb_device *dev,
struct usb_config_descriptor *cfg, int itfnum) {
int mac[6];
usb_dev_handle *devh = usb_open(dev);
if ( ! devh ) fatal("usb_open");
usb_detach_kernel_driver_np(devh, itfnum);
int res = usb_claim_interface(devh, itfnum);
if ( res < 0 ) fatal("usb_claim_interface");
show_master(devh, itfnum);
if ( argc >= 2 ) {
if ( sscanf(argv[1], "%x:%x:%x:%x:%x:%x",
&mac[0],&mac[1],&mac[2],&mac[3],&mac[4],&mac[5]) != 6 ) {
printf("usage: %s [<bd_addr of master>]\n", argv[0]);
exit(1);
}
} else {
FILE *f = popen("hcitool dev", "r");
if ( !f ||
fscanf(f, "%*s\n%*s %x:%x:%x:%x:%x:%x",
&mac[0],&mac[1],&mac[2],&mac[3],&mac[4],&mac[5]) != 6 ) {
printf("Unable to retrieve local bd_addr from `hcitool dev`.\n");
printf("Please enable Bluetooth or specify an address manually.\n");
exit(1);
}
pclose(f);
}
set_master(devh, itfnum, mac);
usb_close(devh);
}
int main(int argc, char *argv[]) {
usb_init();
if ( usb_find_busses() < 0 ) fatal("usb_find_busses");
if ( usb_find_devices() < 0 ) fatal("usb_find_devices");
struct usb_bus *busses = usb_get_busses();
if ( ! busses ) fatal("usb_get_busses");
int found = 0;
struct usb_bus *bus;
for ( bus=busses; bus; bus=bus->next ) {
struct usb_device *dev;
for ( dev=bus->devices; dev; dev=dev->next) {
struct usb_config_descriptor *cfg;
for ( cfg = dev->config;
cfg < dev->config + dev->descriptor.bNumConfigurations;
++cfg ) {
int itfnum;
for ( itfnum=0; itfnum<cfg->bNumInterfaces; ++itfnum ) {
struct usb_interface *itf = &cfg->interface[itfnum];
struct usb_interface_descriptor *alt;
for ( alt = itf->altsetting;
alt < itf->altsetting + itf->num_altsetting;
++alt ) {
if ( dev->descriptor.idVendor == VENDOR &&
dev->descriptor.idProduct == PRODUCT &&
alt->bInterfaceClass == 3 ) {
process_device(argc, argv, dev, cfg, itfnum);
++found;
}
}
}
}
}
}
if ( ! found ) {
printf("No controller found on USB busses. Please connect your joystick via USB.\n");
return 1;
}
return 0;
}
| 4,935 | C | 31.906666 | 89 | 0.645593 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/scripts/ps3joy_node.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import fcntl
import os
import select
import struct
import subprocess
import sys
import time
import traceback
from array import array
import rosgraph.masterapi
import roslib
import rospy
import sensor_msgs.msg
from bluetooth import *
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
roslib.load_manifest("ps3joy")
L2CAP_PSM_HIDP_CTRL = 17
L2CAP_PSM_HIDP_INTR = 19
inactivity_timout_string = "--inactivity-timeout"
no_disable_bluetoothd_string = "--no-disable-bluetoothd"
redirect_output_string = "--redirect-output"
class uinput:
EV_KEY = 1
EV_REL = 2
EV_ABS = 3
BUS_USB = 3
ABS_MAX = 0x3F
class uinputjoy:
def open_uinput(self):
for name in ["/dev/input/uinput", "/dev/misc/uinput", "/dev/uinput"]:
try:
return os.open(name, os.O_WRONLY)
break
except Exception as e:
pass
return None
def __init__(self, buttons, axes, axmin, axmax, axfuzz, axflat):
self.file = self.open_uinput()
if self.file is None:
print("Trying to modprobe uinput.", file=sys.stderr)
os.system("modprobe uinput > /dev/null 2>&1")
time.sleep(1) # uinput isn't ready to go right away.
self.file = self.open_uinput()
if self.file is None:
print(
("Can't open uinput device. Is it accessible by this" " user? Did you mean to run as root?"),
file=sys.stderr,
)
raise OSError
UI_SET_EVBIT = 0x40045564
UI_SET_KEYBIT = 0x40045565
UI_SET_RELBIT = 0x40045566
UI_DEV_CREATE = 0x5501
UI_SET_RELBIT = 0x40045566
UI_SET_ABSBIT = 0x40045567
uinput_user_dev = "80sHHHHi" + (uinput.ABS_MAX + 1) * 4 * "i"
if len(axes) != len(axmin) or len(axes) != len(axmax):
raise Exception("uinputjoy.__init__: axes, axmin and axmax should have same" " length")
absmin = [0] * (uinput.ABS_MAX + 1)
absmax = [0] * (uinput.ABS_MAX + 1)
absfuzz = [2] * (uinput.ABS_MAX + 1)
absflat = [4] * (uinput.ABS_MAX + 1)
for i in range(0, len(axes)):
absmin[axes[i]] = axmin[i]
absmax[axes[i]] = axmax[i]
absfuzz[axes[i]] = axfuzz[i]
absflat[axes[i]] = axflat[i]
os.write(
self.file,
struct.pack(
uinput_user_dev,
"Sony Playstation SixAxis/DS3",
uinput.BUS_USB,
0x054C,
0x0268,
0,
0,
*(absmax + absmin + absfuzz + absflat),
),
)
fcntl.ioctl(self.file, UI_SET_EVBIT, uinput.EV_KEY)
for b in buttons:
fcntl.ioctl(self.file, UI_SET_KEYBIT, b)
for a in axes:
fcntl.ioctl(self.file, UI_SET_EVBIT, uinput.EV_ABS)
fcntl.ioctl(self.file, UI_SET_ABSBIT, a)
fcntl.ioctl(self.file, UI_DEV_CREATE)
self.value = [None] * (len(buttons) + len(axes))
self.type = [uinput.EV_KEY] * len(buttons) + [uinput.EV_ABS] * len(axes)
self.code = buttons + axes
def update(self, value):
input_event = "LLHHi"
t = time.time()
th = int(t)
tl = int((t - th) * 1000000)
if len(value) != len(self.value):
print(
"Unexpected length for value in update (%i instead of %i)."
" This is a bug." % (len(value), len(self.value)),
file=sys.stderr,
)
for i in range(0, len(value)):
if value[i] != self.value[i]:
os.write(
self.file,
struct.pack(
input_event,
th,
tl,
self.type[i],
self.code[i],
value[i],
),
)
self.value = list(value)
class BadJoystickException(Exception):
def __init__(self):
Exception.__init__(self, "Unsupported joystick.")
class decoder:
def __init__(self, daemon, inactivity_timeout=float(1e3000)):
# buttons=[uinput.BTN_SELECT, uinput.BTN_THUMBL, uinput.BTN_THUMBR, uinput.BTN_START,
# uinput.BTN_FORWARD, uinput.BTN_RIGHT, uinput.BTN_BACK, uinput.BTN_LEFT,
# uinput.BTN_TL, uinput.BTN_TR, uinput.BTN_TL2, uinput.BTN_TR2,
# uinput.BTN_X, uinput.BTN_A, uinput.BTN_B, uinput.BTN_Y,
# uinput.BTN_MODE]
# axes=[uinput.ABS_X, uinput.ABS_Y, uinput.ABS_Z, uinput.ABS_RX,
# uinput.ABS_RX, uinput.ABS_RY, uinput.ABS_PRESSURE, uinput.ABS_DISTANCE,
# uinput.ABS_THROTTLE, uinput.ABS_RUDDER, uinput.ABS_WHEEL, uinput.ABS_GAS,
# uinput.ABS_HAT0Y, uinput.ABS_HAT1Y, uinput.ABS_HAT2Y, uinput.ABS_HAT3Y,
# uinput.ABS_TILT_X, uinput.ABS_TILT_Y, uinput.ABS_MISC, uinput.ABS_RZ]
buttons = range(0x100, 0x111)
axes = range(0, 20)
axmin = [0] * 20
axmax = [255] * 20
axfuzz = [2] * 20
axflat = [4] * 20
for i in range(-4, 0): # Gyros have more bits than other axes
axmax[i] = 1023
axfuzz[i] = 4
axflat[i] = 4
for i in range(4, len(axmin) - 4): # Buttons should be zero when not pressed
axmin[i] = -axmax[i]
self.joy = uinputjoy(buttons, axes, axmin, axmax, axfuzz, axflat)
self.axmid = [sum(pair) / 2 for pair in zip(axmin, axmax)]
self.fullstop() # Probably useless because of uinput startup bug
self.outlen = len(buttons) + len(axes)
self.inactivity_timeout = inactivity_timeout
self.daemon = daemon
self.init_ros()
step_active = 1
step_idle = 2
step_error = 3
def init_ros(self):
rospy.init_node("ps3joy", anonymous=True, disable_signals=True)
rospy.Subscriber(
"joy/set_feedback",
sensor_msgs.msg.JoyFeedbackArray,
self.set_feedback,
)
self.diagnostics = Diagnostics()
self.led_values = [1, 0, 0, 0]
self.rumble_cmd = [0, 255]
self.led_cmd = 2
self.core_down = False
# ********************************************************************************
# Raw Data Format
# unsigned char ReportType; //Report Type 01
# unsigned char Reserved1; // Unknown
# unsigned int ButtonState; // Main buttons
# unsigned char PSButtonState; // PS button
# unsigned char Reserved2; // Unknown
# unsigned char LeftStickX; // left Joystick X axis 0 - 255, 128 is mid
# unsigned char LeftStickY; // left Joystick Y axis 0 - 255, 128 is mid
# unsigned char RightStickX; // right Joystick X axis 0 - 255, 128 is mid
# unsigned char RightStickY; // right Joystick Y axis 0 - 255, 128 is mid
# unsigned char Reserved3[4]; // Unknown
# unsigned char PressureUp; // digital Pad Up button Pressure 0 - 255
# unsigned char PressureRight; // digital Pad Right button Pressure 0 - 255
# unsigned char PressureDown; // digital Pad Down button Pressure 0 - 255
# unsigned char PressureLeft; // digital Pad Left button Pressure 0 - 255
# unsigned char PressureL2; // digital Pad L2 button Pressure 0 - 255
# unsigned char PressureR2; // digital Pad R2 button Pressure 0 - 255
# unsigned char PressureL1; // digital Pad L1 button Pressure 0 - 255
# unsigned char PressureR1; // digital Pad R1 button Pressure 0 - 255
# unsigned char PressureTriangle; // digital Pad Triangle button Pressure 0 - 255
# unsigned char PressureCircle; // digital Pad Circle button Pressure 0 - 255
# unsigned char PressureCross; // digital Pad Cross button Pressure 0 - 255
# unsigned char PressureSquare; // digital Pad Square button Pressure 0 - 255
# unsigned char Reserved4[3]; // Unknown
# unsigned char Charge; // charging status ? 02 = charge, 03 = normal
# unsigned char Power; // Battery status
# unsigned char Connection; // Connection Type
# unsigned char Reserved5[9]; // Unknown
# unsigned int AccelerometerX; // X axis accelerometer Big Endian 0 - 1023
# unsigned int Accelero // Y axis accelerometer Big Endian 0 - 1023
# unsigned int AccelerometerZ; // Z axis accelerometer Big Endian 0 - 1023
# unsigned int GyrometerX; // Z axis Gyro Big Endian 0 - 1023
# *********************************************************************************
def step(self, rawdata): # Returns true if the packet was legal
if len(rawdata) == 50:
joy_coding = "!1B2x3B1x4B4x12B3x1B1B1B9x4H"
all_data = list(struct.unpack(joy_coding, rawdata)) # removing power data
state_data = all_data[20:23]
data = all_data[0:20] + all_data[23:]
prefix = data.pop(0)
self.diagnostics.publish(state_data)
if prefix != 161:
print(
"Unexpected prefix (%i). Is this a PS3 Dual Shock or Six" " Axis?" % prefix,
file=sys.stderr,
)
return self.step_error
out = []
for j in range(0, 2): # Split out the buttons.
curbyte = data.pop(0)
for k in range(0, 8):
out.append(int((curbyte & (1 << k)) != 0))
out = out + data
self.joy.update(out)
axis_motion = [
abs(out[17:][i] - self.axmid[i]) > 20 for i in range(0, len(out) - 17 - 4)
] # 17 buttons, 4 inertial sensors
if any(out[0:17]) or any(axis_motion):
return self.step_active
return self.step_idle
elif len(rawdata) == 13:
print(
("Your bluetooth adapter is not supported. Does it support" " Bluetooth 2.0?"),
file=sys.stderr,
)
raise BadJoystickException()
else:
print(
"Unexpected packet length (%i). Is this a PS3 Dual Shock or" " Six Axis?" % len(rawdata),
file=sys.stderr,
)
return self.step_error
def fullstop(self):
self.joy.update([0] * 17 + self.axmid)
def set_feedback(self, msg):
for feedback in msg.array:
if feedback.type == sensor_msgs.msg.JoyFeedback.TYPE_LED and feedback.id < 4:
self.led_values[feedback.id] = int(round(feedback.intensity))
elif feedback.type == sensor_msgs.msg.JoyFeedback.TYPE_RUMBLE and feedback.id < 2:
self.rumble_cmd[feedback.id] = int(feedback.intensity * 255)
else:
rospy.logwarn(
"Feedback %s of type %s does not exist for this joystick.",
feedback.id,
feedback.type,
)
self.led_cmd = self.led_values[0] * pow(2, 1) + self.led_values[1] * pow(2, 2)
self.led_cmd = self.led_cmd + self.led_values[2] * pow(2, 3) + self.led_values[3] * pow(2, 4)
self.new_msg = True
def send_cmd(self, ctrl):
command = [
0x52,
0x01,
0x00,
0xFE,
self.rumble_cmd[1],
0xFE,
self.rumble_cmd[0], # rumble values
0x00,
0x00,
0x00,
0x00,
self.led_cmd,
0xFF,
0x27,
0x10,
0x00,
0x32, # LED 4
0xFF,
0x27,
0x10,
0x00,
0x32, # LED 3
0xFF,
0x27,
0x10,
0x00,
0x32, # LED 2
0xFF,
0x27,
0x10,
0x00,
0x32, # LED 1
0x00,
0x00,
0x00,
0x00,
0x00,
]
ctrl.send(array("B", command).tostring())
self.new_msg = False
def run(self, intr, ctrl):
activated = False
try:
self.fullstop()
lastactivitytime = lastvalidtime = time.time()
while not rospy.is_shutdown():
(rd, wr, err) = select.select([intr], [], [], 0.1)
curtime = time.time()
if len(rd) + len(wr) + len(err) == 0: # Timeout
ctrl.send("\x53\xf4\x42\x03\x00\x00") # Try activating the stream.
else: # Got a frame.
if not activated:
self.send_cmd(ctrl)
time.sleep(0.5)
self.rumble_cmd[1] = 0
self.send_cmd(ctrl)
print("Connection activated")
activated = True
try:
if self.new_msg:
self.send_cmd(ctrl)
rawdata = intr.recv(128)
except BluetoothError as s:
print("Got Bluetooth error %s. Disconnecting." % s)
return
if len(rawdata) == 0: # Orderly shutdown of socket
print("Joystick shut down the connection, battery may be" " discharged.")
return
if not rosgraph.masterapi.is_online():
print("The roscore or node shutdown, ps3joy shutting" " down.")
return
stepout = self.step(rawdata)
if stepout != self.step_error:
lastvalidtime = curtime
if stepout == self.step_active:
lastactivitytime = curtime
if curtime - lastactivitytime > self.inactivity_timeout:
print(
"Joystick inactive for %.0f seconds. Disconnecting to"
" save battery." % self.inactivity_timeout
)
return
if curtime - lastvalidtime >= 0.1:
# Zero all outputs if we don't hear a valid frame for 0.1 to 0.2 seconds
self.fullstop()
if curtime - lastvalidtime >= 5:
# Disconnect if we don't hear a valid frame for 5 seconds
print("No valid data for 5 seconds. Disconnecting. This" " should not happen, please report it.")
return
time.sleep(0.005) # No need to blaze through the loop when there is an error
finally:
self.fullstop()
class Diagnostics:
def __init__(self):
self.STATE_TEXTS_CHARGING = {0: "Charging", 1: "Not Charging"}
self.STATE_TEXTS_CONNECTION = {
18: "USB Connection",
20: "Rumbling",
22: "Bluetooth Connection",
}
self.STATE_TEXTS_BATTERY = {
0: "No Charge",
1: "20% Charge",
2: "40% Charge",
3: "60% Charge",
4: "80% Charge",
5: "100% Charge",
238: "Charging",
}
self.diag_pub = rospy.Publisher("/diagnostics", DiagnosticArray)
self.last_diagnostics_time = rospy.get_rostime()
def publish(self, state):
STATE_INDEX_CHARGING = 0
STATE_INDEX_BATTERY = 1
STATE_INDEX_CONNECTION = 2
# timed gate: limit to 1 Hz
curr_time = rospy.get_rostime()
if (curr_time - self.last_diagnostics_time).to_sec() < 1.0:
return
self.last_diagnostics_time = curr_time
# compose diagnostics message
diag = DiagnosticArray()
diag.header.stamp = curr_time
# battery info
stat = DiagnosticStatus(name="Battery", level=DiagnosticStatus.OK, message="OK")
try:
battery_state_code = state[STATE_INDEX_BATTERY]
stat.message = self.STATE_TEXTS_BATTERY[battery_state_code]
if battery_state_code < 3:
stat.level = DiagnosticStatus.WARN
if battery_state_code < 1:
stat.level = DiagnosticStatus.ERROR
stat.message = "Please Recharge Battery (%s)." % self.STATE_TEXTS_BATTERY[battery_state_code]
except KeyError as ex:
stat.message = "Invalid Battery State %s" % ex
rospy.logwarn("Invalid Battery State %s" % ex)
stat.level = DiagnosticStatus.ERROR
diag.status.append(stat)
# connection info
stat = DiagnosticStatus(
name="ps3joy: Connection Type",
level=DiagnosticStatus.OK,
message="OK",
)
try:
stat.message = self.STATE_TEXTS_CONNECTION[state[STATE_INDEX_CONNECTION]]
except KeyError as ex:
stat.message = "Invalid Connection State %s" % ex
rospy.logwarn("Invalid Connection State %s" % ex)
stat.level = DiagnosticStatus.ERROR
diag.status.append(stat)
# charging info
stat = DiagnosticStatus(
name="ps3joy: Charging State",
level=DiagnosticStatus.OK,
message="OK",
)
try:
stat.message = self.STATE_TEXTS_CHARGING[state[STATE_INDEX_CHARGING]]
except KeyError as ex:
stat.message = "Invalid Charging State %s" % ex
rospy.logwarn("Invalid Charging State %s" % ex)
stat.level = DiagnosticStatus.ERROR
diag.status.append(stat)
# publish message
self.diag_pub.publish(diag)
class Quit(Exception):
def __init__(self, errorcode):
Exception.__init__(self)
self.errorcode = errorcode
def check_hci_status():
# Check if hci0 is up and pscanning, take action as necessary.
proc = subprocess.Popen(["hciconfig"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if out.find("UP") == -1:
os.system("hciconfig hci0 up > /dev/null 2>&1")
if out.find("PSCAN") == -1:
os.system("hciconfig hci0 pscan > /dev/null 2>&1")
class connection_manager:
def __init__(self, decoder):
self.decoder = decoder
def prepare_bluetooth_socket(self, port):
sock = BluetoothSocket(L2CAP)
return self.prepare_socket(sock, port)
def prepare_net_socket(self, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return self.prepare_socket(sock, port)
def prepare_socket(self, sock, port):
first_loop = True
while True:
try:
sock.bind(("", port))
except Exception as e:
print(repr(e))
if first_loop:
print(
(
"Error binding to socket, will retry every 5"
" seconds. Do you have another ps3joy.py running?"
" This error occurs on some distributions. Please"
" read"
" http://www.ros.org/wiki/ps3joy/Troubleshooting"
" for solutions."
),
file=sys.stderr,
)
first_loop = False
time.sleep(0.5)
continue
sock.listen(1)
return sock
def listen_net(self, intr_port, ctrl_port):
intr_sock = self.prepare_net_socket(intr_port)
ctrl_sock = self.prepare_net_socket(ctrl_port)
self.listen(intr_sock, ctrl_sock)
def listen_bluetooth(self):
intr_sock = self.prepare_bluetooth_socket(L2CAP_PSM_HIDP_INTR)
ctrl_sock = self.prepare_bluetooth_socket(L2CAP_PSM_HIDP_CTRL)
self.listen(intr_sock, ctrl_sock)
def listen(self, intr_sock, ctrl_sock):
self.n = 0
while not rospy.is_shutdown():
print("Waiting for connection. Disconnect your PS3 joystick from USB" " and press the pairing button.")
try:
intr_sock.settimeout(5)
ctrl_sock.settimeout(1)
while True:
try:
(intr, (idev, iport)) = intr_sock.accept()
break
except Exception as e:
if str(e) == "timed out":
check_hci_status()
else:
raise
try:
try:
(ctrl, (cdev, cport)) = ctrl_sock.accept()
except Exception as e:
print(
("Got interrupt connection without control" " connection. Giving up on it."),
file=sys.stderr,
)
continue
try:
if idev == cdev:
self.decoder.run(intr, ctrl)
print("Connection terminated.")
quit(0)
else:
print(
("Simultaneous connection from two" " different devices. Ignoring both."),
file=sys.stderr,
)
finally:
ctrl.close()
finally:
intr.close()
except BadJoystickException:
pass
except KeyboardInterrupt:
print("\nCTRL+C detected. Exiting.")
rospy.signal_shutdown("\nCTRL+C detected. Exiting.")
quit(0)
except Exception as e:
traceback.print_exc()
print("Caught exception: %s" % str(e), file=sys.stderr)
time.sleep(1)
def usage(errcode):
print(
"usage: ps3joy.py ["
+ inactivity_timout_string
+ "=<n>] ["
+ no_disable_bluetoothd_string
+ "] ["
+ redirect_output_string
+ "]=<f>"
)
print("<n>: inactivity timeout in seconds (saves battery life).")
print("<f>: file name to redirect output to.")
print("Unless " + no_disable_bluetoothd_string + " is specified, bluetoothd will be stopped.")
raise Quit(errcode)
def is_arg_with_param(arg, prefix):
if not arg.startswith(prefix):
return False
if not arg.startswith(prefix + "="):
print("Expected '=' after " + prefix)
print()
usage(1)
return True
if __name__ == "__main__":
errorcode = 0
try:
inactivity_timeout = float(1e3000)
disable_bluetoothd = True
daemon = False
for arg in sys.argv[1:]: # Be very tolerant in case we are roslaunched.
if arg == "--help":
usage(0)
elif is_arg_with_param(arg, inactivity_timout_string):
str_value = arg[len(inactivity_timout_string) + 1 :]
try:
inactivity_timeout = float(str_value)
if inactivity_timeout < 0:
print("Inactivity timeout must be positive.")
print()
usage(1)
except ValueError:
print("Error parsing inactivity timeout: " + str_value)
print()
usage(1)
elif arg == no_disable_bluetoothd_string:
disable_bluetoothd = False
elif is_arg_with_param(arg, redirect_output_string):
str_value = arg[len(redirect_output_string) + 1 :]
try:
print("Redirecting output to:", str_value)
sys.stdout = open(str_value, "a", 1)
except OSError as e:
print("Error opening file to redirect output:", str_value)
raise Quit(1)
sys.stderr = sys.stdout
else:
print("Ignoring parameter: '%s'" % arg)
# If the user does not have HW permissions indicate that ps3joy must be run as root
if os.getuid() != 0:
print("ps3joy.py must be run as root.", file=sys.stderr)
quit(1)
if disable_bluetoothd:
os.system("/etc/init.d/bluetooth stop > /dev/null 2>&1")
time.sleep(1) # Give the socket time to be available.
try:
while os.system("hciconfig hci0 > /dev/null 2>&1") != 0:
print(
("No bluetooth dongle found or bluez rosdep not" " installed. Will retry in 5 seconds."),
file=sys.stderr,
)
time.sleep(5)
if inactivity_timeout == float(1e3000):
print("No inactivity timeout was set. (Run with --help for" " details.)")
else:
print("Inactivity timeout set to %.0f seconds." % inactivity_timeout)
cm = connection_manager(decoder(daemon, inactivity_timeout=inactivity_timeout))
cm.listen_bluetooth()
finally:
if disable_bluetoothd:
os.system("/etc/init.d/bluetooth start > /dev/null 2>&1")
except Quit as e:
errorcode = e.errorcode
except KeyboardInterrupt:
print("\nCTRL+C detected. Exiting.")
rospy.signal_shutdown("\nCTRL+C detected. Exiting.")
exit(errorcode)
| 27,503 | Python | 38.347639 | 117 | 0.523834 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/scripts/ps3joysim.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import fcntl
import os
import select
import signal
import socket
import struct
import sys
import threading
import time
import traceback
import ps3joy
from bluetooth import *
def mk_in_socket():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
sock.listen(1)
return sock, sock.getsockname()[1]
# Class to spawn the ps3joy.py infrastructure in its own thread
class driversim(threading.Thread):
def __init__(self, intr, ctrl):
threading.Thread.__init__(self)
self.intr = intr
self.ctrl = ctrl
self.start()
def run(self):
self.cm = ps3joy.connection_manager(ps3joy.decoder())
self.cm.listen(self.intr, self.ctrl)
print("driversim exiting")
def shutdown(self):
self.cm.shutdown = True
class joysim(threading.Thread):
def __init__(self, intr, ctrl):
threading.Thread.__init__(self)
print("Starting joystick simulator on ports", intr, "and", ctrl)
self.intr = socket.socket()
self.intr.connect(("127.0.0.1", intr))
if self.intr == -1:
raise "Error creating interrupt socket."
self.ctrl = socket.socket()
self.ctrl.connect(("127.0.0.1", ctrl))
if self.ctrl == -1:
raise "Error creating control socket."
self.active = False
self.shutdown = False
self.start()
def run(self):
while not self.active and not self.shutdown:
(rd, wr, err) = select.select([self.ctrl], [], [], 1)
if len(rd) == 1:
cmd = self.ctrl.recv(128)
if cmd == "\x53\xf4\x42\x03\x00\x00":
self.active = True
print("Got activate command")
else:
print("Got unknown command (len=%i)" % len(cmd), end=" ")
time.sleep(1)
for c in cmd:
print("%x" % ord(c), end=" ")
print()
print("joyactivate exiting")
def publishstate(self, ax, butt):
if self.active:
ranges = [255] * 17 + [8191] * 20
axval = [int((v + 1) * s / 2) for (v, s) in zip(ax, ranges)]
buttout = []
for i in range(0, 2):
newval = 0
for j in range(0, 8):
newval = newval << 1
if butt[i * 8 + j]:
newval = newval + 1
buttout.append(newval)
joy_coding = "!1B2x3B1x4B4x12B15x4H"
self.intr.send(struct.pack(joy_coding, 161, *(buttout + [0] + axval)))
else:
print("Tried to publish while inactive")
if __name__ == "__main__":
def stop_all_threads(a, b):
exit(0)
signal.signal(signal.SIGINT, stop_all_threads)
# Create sockets for the driver side and pass them to the driver
(intr_in, intr_port) = mk_in_socket()
(ctrl_in, ctrl_port) = mk_in_socket()
ds = driversim(intr_in, ctrl_in)
# Give the simulator a chance to get going
time.sleep(2)
# Call up the simulator telling it which ports to connect to.
js = joysim(intr_port, ctrl_port)
buttons1 = [True] * 16
axes1 = [1, 0, -1, 0.5] * 5
buttons2 = [False] * 16
axes2 = [-1] * 20
buttons3 = [False] * 16
axes3 = [0] * 20
shutdown = False
while not js.active and not shutdown:
time.sleep(0.2)
time.sleep(0.01)
time.sleep(0.01)
while not shutdown:
js.publishstate(axes1, buttons2)
time.sleep(0.01)
axes1[0] = -axes1[0]
js.publishstate(axes2, buttons2)
time.sleep(0.01)
print("main exiting")
| 5,356 | Python | 32.273292 | 82 | 0.609597 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/scripts/ps3joy.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import fcntl
import os
import select
import subprocess
import sys
import time
import traceback
from bluetooth import *
L2CAP_PSM_HIDP_CTRL = 17
L2CAP_PSM_HIDP_INTR = 19
inactivity_timout_string = "--inactivity-timeout"
no_disable_bluetoothd_string = "--no-disable-bluetoothd"
redirect_output_string = "--redirect-output"
continuous_motion_output_string = "--continuous-output"
class uinput:
EV_KEY = 1
EV_REL = 2
EV_ABS = 3
BUS_USB = 3
ABS_MAX = 0x3F
class uinputjoy:
def open_uinput(self):
for name in ["/dev/input/uinput", "/dev/misc/uinput", "/dev/uinput"]:
try:
return os.open(name, os.O_WRONLY)
break
except Exception as e:
pass
return None
def __init__(self, buttons, axes, axmin, axmax, axfuzz, axflat):
self.file = self.open_uinput()
if self.file is None:
print("Trying to modprobe uinput.", file=sys.stderr)
os.system("modprobe uinput > /dev/null 2>&1")
time.sleep(1) # uinput isn't ready to go right away.
self.file = self.open_uinput()
if self.file is None:
print(
("Can't open uinput device. Is it accessible by this" " user? Did you mean to run as root?"),
file=sys.stderr,
)
raise OSError
UI_SET_EVBIT = 0x40045564
UI_SET_KEYBIT = 0x40045565
UI_SET_RELBIT = 0x40045566
UI_DEV_CREATE = 0x5501
UI_SET_RELBIT = 0x40045566
UI_SET_ABSBIT = 0x40045567
uinput_user_dev = "80sHHHHi" + (uinput.ABS_MAX + 1) * 4 * "i"
if len(axes) != len(axmin) or len(axes) != len(axmax):
raise Exception("uinputjoy.__init__: axes, axmin and axmax should have same" " length")
absmin = [0] * (uinput.ABS_MAX + 1)
absmax = [0] * (uinput.ABS_MAX + 1)
absfuzz = [2] * (uinput.ABS_MAX + 1)
absflat = [4] * (uinput.ABS_MAX + 1)
for i in range(0, len(axes)):
absmin[axes[i]] = axmin[i]
absmax[axes[i]] = axmax[i]
absfuzz[axes[i]] = axfuzz[i]
absflat[axes[i]] = axflat[i]
os.write(
self.file,
struct.pack(
uinput_user_dev,
"Sony Playstation SixAxis/DS3",
uinput.BUS_USB,
0x054C,
0x0268,
0,
0,
*(absmax + absmin + absfuzz + absflat),
),
)
fcntl.ioctl(self.file, UI_SET_EVBIT, uinput.EV_KEY)
for b in buttons:
fcntl.ioctl(self.file, UI_SET_KEYBIT, b)
for a in axes:
fcntl.ioctl(self.file, UI_SET_EVBIT, uinput.EV_ABS)
fcntl.ioctl(self.file, UI_SET_ABSBIT, a)
fcntl.ioctl(self.file, UI_DEV_CREATE)
self.value = [None] * (len(buttons) + len(axes))
self.type = [uinput.EV_KEY] * len(buttons) + [uinput.EV_ABS] * len(axes)
self.code = buttons + axes
def update(self, value):
input_event = "LLHHi"
t = time.time()
th = int(t)
tl = int((t - th) * 1000000)
if len(value) != len(self.value):
print(
"Unexpected length for value in update (%i instead of %i). "
"This is a bug." % (len(value), len(self.value)),
file=sys.stderr,
)
for i in range(0, len(value)):
if value[i] != self.value[i]:
os.write(
self.file,
struct.pack(
input_event,
th,
tl,
self.type[i],
self.code[i],
value[i],
),
)
self.value = list(value)
class BadJoystickException(Exception):
def __init__(self):
Exception.__init__(self, "Unsupported joystick.")
class decoder:
def __init__(self, inactivity_timeout=float(1e3000), continuous_motion_output=False):
# buttons=[uinput.BTN_SELECT, uinput.BTN_THUMBL, uinput.BTN_THUMBR, uinput.BTN_START,
# uinput.BTN_FORWARD, uinput.BTN_RIGHT, uinput.BTN_BACK, uinput.BTN_LEFT,
# uinput.BTN_TL, uinput.BTN_TR, uinput.BTN_TL2, uinput.BTN_TR2,
# uinput.BTN_X, uinput.BTN_A, uinput.BTN_B, uinput.BTN_Y,
# uinput.BTN_MODE]
# axes=[uinput.ABS_X, uinput.ABS_Y, uinput.ABS_Z, uinput.ABS_RX,
# uinput.ABS_RX, uinput.ABS_RY, uinput.ABS_PRESSURE, uinput.ABS_DISTANCE,
# uinput.ABS_THROTTLE, uinput.ABS_RUDDER, uinput.ABS_WHEEL, uinput.ABS_GAS,
# uinput.ABS_HAT0Y, uinput.ABS_HAT1Y, uinput.ABS_HAT2Y, uinput.ABS_HAT3Y,
# uinput.ABS_TILT_X, uinput.ABS_TILT_Y, uinput.ABS_MISC, uinput.ABS_RZ,
# ]
buttons = range(0x100, 0x111)
axes = range(0, 20)
axmin = [0] * 20
axmax = [255] * 20
axfuzz = [2] * 20
axflat = [4] * 20
for i in range(-4, 0): # Gyros have more bits than other axes
axmax[i] = 1023
axfuzz[i] = 4
axflat[i] = 4
if continuous_motion_output:
axfuzz[i] = 0
axflat[i] = 0
for i in range(4, len(axmin) - 4): # Buttons should be zero when not pressed
axmin[i] = -axmax[i]
self.joy = uinputjoy(buttons, axes, axmin, axmax, axfuzz, axflat)
self.axmid = [sum(pair) / 2 for pair in zip(axmin, axmax)]
self.fullstop() # Probably useless because of uinput startup bug
self.outlen = len(buttons) + len(axes)
self.inactivity_timeout = inactivity_timeout
step_active = 1
step_idle = 2
step_error = 3
def step(self, rawdata): # Returns true if the packet was legal
if len(rawdata) == 50:
joy_coding = "!1B2x3B1x4B4x12B15x4H"
data = list(struct.unpack(joy_coding, rawdata))
prefix = data.pop(0)
if prefix != 161:
print(
"Unexpected prefix (%i). Is this a PS3 Dual Shock or Six" " Axis?" % prefix,
file=sys.stderr,
)
return self.step_error
out = []
for j in range(0, 2): # Split out the buttons.
curbyte = data.pop(0)
for k in range(0, 8):
out.append(int((curbyte & (1 << k)) != 0))
out = out + data
self.joy.update(out)
axis_motion = [
abs(out[17:][i] - self.axmid[i]) > 20 for i in range(0, len(out) - 17 - 4)
] # 17 buttons, 4 inertial sensors
if any(out[0:17]) or any(axis_motion):
return self.step_active
return self.step_idle
elif len(rawdata) == 13:
print(
("Your bluetooth adapter is not supported. " "Does it support Bluetooth 2.0?"),
file=sys.stderr,
)
raise BadJoystickException()
else:
print(
"Unexpected packet length (%i). " "Is this a PS3 Dual Shock or Six Axis?" % len(rawdata),
file=sys.stderr,
)
return self.step_error
def fullstop(self):
self.joy.update([0] * 17 + self.axmid)
def run(self, intr, ctrl):
activated = False
try:
self.fullstop()
lastactivitytime = lastvalidtime = time.time()
while True:
(rd, wr, err) = select.select([intr], [], [], 0.1)
curtime = time.time()
if len(rd) + len(wr) + len(err) == 0: # Timeout
ctrl.send("\x53\xf4\x42\x03\x00\x00") # Try activating the stream.
else: # Got a frame.
if not activated:
print("Connection activated")
activated = True
try:
rawdata = intr.recv(128)
except BluetoothError as s:
print("Got Bluetooth error %s. Disconnecting." % s)
return
if len(rawdata) == 0: # Orderly shutdown of socket
print("Joystick shut down the connection, battery may be" " discharged.")
return
stepout = self.step(rawdata)
if stepout != self.step_error:
lastvalidtime = curtime
if stepout == self.step_active:
lastactivitytime = curtime
if curtime - lastactivitytime > self.inactivity_timeout:
print(
"Joystick inactive for %.0f seconds. "
"Disconnecting to save battery." % self.inactivity_timeout
)
return
if curtime - lastvalidtime >= 0.1:
# Zero all outputs if we don't hear a valid frame for 0.1 to 0.2 seconds
self.fullstop()
if curtime - lastvalidtime >= 5:
# Disconnect if we don't hear a valid frame for 5 seconds
print("No valid data for 5 seconds. Disconnecting. " "This should not happen, please report it.")
return
time.sleep(0.005) # No need to blaze through the loop when there is an error
finally:
self.fullstop()
class Quit(Exception):
def __init__(self, errorcode):
Exception.__init__(self)
self.errorcode = errorcode
def check_hci_status():
# Check if hci0 is up and pscanning, take action as necessary.
proc = subprocess.Popen(["hciconfig"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if out.find("UP") == -1:
os.system("hciconfig hci0 up > /dev/null 2>&1")
if out.find("PSCAN") == -1:
os.system("hciconfig hci0 pscan > /dev/null 2>&1")
class connection_manager:
def __init__(self, decoder):
self.decoder = decoder
self.shutdown = False
def prepare_bluetooth_socket(self, port):
sock = BluetoothSocket(L2CAP)
return self.prepare_socket(sock, port)
def prepare_net_socket(self, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return self.prepare_socket(sock, port)
def prepare_socket(self, sock, port):
first_loop = True
while True:
try:
sock.bind(("", port))
except Exception as e:
print(repr(e))
if first_loop:
print(
(
"Error binding to socket, will retry every 5"
" seconds. Do you have another ps3joy.py running?"
" This error occurs on some distributions (such as"
" Ubuntu Karmic). Please read"
" http://www.ros.org/wiki/ps3joy/Troubleshooting"
" for solutions."
),
file=sys.stderr,
)
first_loop = False
time.sleep(0.5)
continue
sock.listen(1)
return sock
def listen_net(self, intr_port, ctrl_port):
intr_sock = self.prepare_net_socket(intr_port)
ctrl_sock = self.prepare_net_socket(ctrl_port)
self.listen(intr_sock, ctrl_sock)
def listen_bluetooth(self):
intr_sock = self.prepare_bluetooth_socket(L2CAP_PSM_HIDP_INTR)
ctrl_sock = self.prepare_bluetooth_socket(L2CAP_PSM_HIDP_CTRL)
self.listen(intr_sock, ctrl_sock)
def listen(self, intr_sock, ctrl_sock):
self.n = 0
while not self.shutdown:
print("Waiting for connection. Disconnect your PS3 joystick from USB" " and press the pairing button.")
try:
intr_sock.settimeout(5)
ctrl_sock.settimeout(1)
while True:
try:
(intr, (idev, iport)) = intr_sock.accept()
break
except Exception as e:
if str(e) == "timed out":
check_hci_status()
else:
raise
try:
try:
(ctrl, (cdev, cport)) = ctrl_sock.accept()
except Exception as e:
print(
("Got interrupt connection without control" " connection. Giving up on it."),
file=sys.stderr,
)
continue
try:
if idev == cdev:
self.decoder.run(intr, ctrl)
print("Connection terminated.")
else:
print(
("Simultaneous connection from two" " different devices. Ignoring both."),
file=sys.stderr,
)
finally:
ctrl.close()
finally:
intr.close()
except BadJoystickException:
pass
except KeyboardInterrupt:
print("CTRL+C detected. Exiting.")
quit(0)
except Exception as e:
traceback.print_exc()
print("Caught exception: %s" % str(e), file=sys.stderr)
time.sleep(1)
print()
def usage(errcode):
print(
"usage: ps3joy.py ["
+ inactivity_timout_string
+ "=<n>] ["
+ no_disable_bluetoothd_string
+ "] ["
+ redirect_output_string
+ "] ["
+ continuous_motion_output_string
+ "]=<f>"
)
print("<n>: inactivity timeout in seconds (saves battery life).")
print("<f>: file name to redirect output to.")
print("Unless " + no_disable_bluetoothd_string + " is specified, bluetoothd will be stopped.")
raise Quit(errcode)
def is_arg_with_param(arg, prefix):
if not arg.startswith(prefix):
return False
if not arg.startswith(prefix + "="):
print("Expected '=' after " + prefix)
print()
usage(1)
return True
if __name__ == "__main__":
errorcode = 0
try:
# Get Root Privileges
euid = os.geteuid()
if euid != 0:
args = ["sudo", sys.executable] + sys.argv + [os.environ]
os.execlpe("sudo", *args)
if euid != 0:
raise SystemExit("Root Privlages Required.")
inactivity_timeout = float(1e3000)
disable_bluetoothd = True
continuous_output = False
for arg in sys.argv[1:]: # Be very tolerant in case we are roslaunched.
if arg == "--help":
usage(0)
elif is_arg_with_param(arg, inactivity_timout_string):
str_value = arg[len(inactivity_timout_string) + 1 :]
try:
inactivity_timeout = float(str_value)
if inactivity_timeout < 0:
print("Inactivity timeout must be positive.")
print()
usage(1)
except ValueError:
print("Error parsing inactivity timeout: " + str_value)
print()
usage(1)
elif arg == no_disable_bluetoothd_string:
disable_bluetoothd = False
elif arg == continuous_motion_output_string:
continuous_output = True
elif is_arg_with_param(arg, redirect_output_string):
str_value = arg[len(redirect_output_string) + 1 :]
try:
print("Redirecting output to:", str_value)
sys.stdout = open(str_value, "a", 1)
except OSError as e:
print("Error opening file to redirect output:", str_value)
raise Quit(1)
sys.stderr = sys.stdout
else:
print("Ignoring parameter: '%s'" % arg)
if disable_bluetoothd:
os.system("/etc/init.d/bluetooth stop > /dev/null 2>&1")
time.sleep(1) # Give the socket time to be available.
try:
while os.system("hciconfig hci0 > /dev/null 2>&1") != 0:
print(
("No bluetooth dongle found or bluez rosdep not" " installed. Will retry in 5 seconds."),
file=sys.stderr,
)
time.sleep(5)
if inactivity_timeout == float(1e3000):
print("No inactivity timeout was set. (Run with --help for" " details.)")
else:
print("Inactivity timeout set to %.0f seconds." % inactivity_timeout)
cm = connection_manager(
decoder(
inactivity_timeout=inactivity_timeout,
continuous_motion_output=continuous_output,
)
)
cm.listen_bluetooth()
finally:
if disable_bluetoothd:
os.system("/etc/init.d/bluetooth start > /dev/null 2>&1")
except Quit as e:
errorcode = e.errorcode
except KeyboardInterrupt:
print("CTRL+C detected. Exiting.")
exit(errorcode)
| 19,430 | Python | 37.25 | 117 | 0.516264 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/doc/bluetooth_devices.md |
## Bluetooth Device Compatibility
This driver works with most 2.x Bluetooth adapters.This driver should also work with 3.0 and 4.0 Bluetooth adapters.
Please report other compatible and incompatible Bluetooth adapters through a pull request to this page.
### Adapters that are known to work
* Cambridge Silicon Radio, Ltd Bluetooth Dongle (Bluetooth 4.0)
### Adapters that are known not to work
* Linksys USBBT100 version 2 (Bluetooth 1.1)
* USB device 0a12:0x0001
| 471 | Markdown | 30.466665 | 116 | 0.781316 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/doc/testing.md | # Testing procedures for the ps3joy package #
## Pairing the ps3 controller via bluetooth ##
If the controller is not paired to the bluetooth dongle connect
the controller via usb
Enter the following commands:
```
sudo bash
rosrun ps3joy sixpair
```
The current bluetooth master address and setting master address should the same.
```
Current Bluetooth master: 00:15:83:ed:3f:21
Setting master bd_addr to 00:15:83:ed:3f:21
```
## Running the ps3joy nodes ##
The package should consists of the following nodes:
* ps3joy.py
* ps3joy_node.py
Running ps3joy_node.py will **require** being root to run if the user does not have
permissions to the hardware.
Enter the following commands in the terminal:
```
sudo bash
rosrun ps3joy ps3joy_node.py
```
The following message should be displayed aftwards:
```
Waiting Connection. Disconnect your PS3 joystick from USB and press the pairing button.
```
If your joystick does not pair, open a new terminal and restart bluez by
entering the following command:
```
sudo systemctl restart bluetooth
```
The terminal where ps3joy_node.py was launched should display the following message:
```
Connection is Activated.
```
## ps3joy Diagnostics test ##
Open a new terminal and enter the following following command:
```
rostopic list
```
You should see the following:
```
/diagnostics
/joy/set_feedback
/rosout
/rosout_agg
```
Enter the following command to diagnose the current state of the joystick
```
rostopic echo /diagnostics
```
You will see the charging state, battery percentage and, connection type in your terminal:
```
header:
seq: 1
stamp:
secs: 1498667204
nsecs: 603754043
frame_id: ''
status:
-
level: 0
name: Battery
message: 100% Charge
hardware_id: ''
values: []
-
level: 0
name: Connection
message: Bluetooth Connection
hardware_id: ''
values: []
-
level: 0
name: Charging State
message: Not Charging
hardware_id: ''
values: []
```
If you plug in the usb cable both the connection type and charging state will change:
```
header:
seq: 8
stamp:
secs: 1498667507
nsecs: 798973083
frame_id: ''
status:
-
level: 0
name: Battery
message: Charging
hardware_id: ''
values: []
-
level: 0
name: Connection
message: USB Connection
hardware_id: ''
values: []
-
level: 0
name: Charging State
message: Charging
hardware_id: ''
values: []
```
## Confirming the ps3 joystick input ##
Check to see if your joystick is recgonized by your computer.
```
ls /dev/input/
```
Tell the ps3joy node which device is the ps3 joystick
```
rosparam set joy_node/dev "dev/input/jsX"
```
X would be the number your device was given.
you can now start the joy node:
```
rosrun joy joy_node
```
You should see something that looks like this:
```
[ INFO] [1498673536.447541090]: Opened joystick: /dev/input/js0. deadzone_: 0.050000.
```
Open a new terminal and use rostopic to observe the data from the controller.
```
rostopic echo joy
```
You should see the input data dipslayed on your terminal.
## Recharging the PS3 joystick
1. Have an available USB port on a computer, and the computer must be on while the joystick is
charging.
2. Connect the PS3 joystick to a computer using an A to mini-B USB cable.
3. The LEDs on the joystick should blink at about 1Hz to indicate the the joystick is charging.
## Shutting down the ps3 joystick
There are two ways to turn of the ps3 joystick
1. Press and hold the pairing button on the joystick for approximately 10 seconds
2. You can also also shut the controller down by killing the process of ps3joy_node.py
press Ctrl-c on your keyboard to kill the processes and the joystick will turn off
as well.
| 3,758 | Markdown | 21.644578 | 95 | 0.714742 |
leggedrobotics/viplanner/ros/joystick_drivers/ps3joy/doc/troubleshooting.md | ## Troubleshooting ##
-------------------------
#### Issues pairing the PS3 joystick ####
When pairing your joystick via bluetooth, you may receive the following message on
your terminal:
```
Current Bluetooth master: 00:15:83:ed:3f:21
Unable to retrieve local bd_addr from `hcitool dev`.
Please enable Bluetooth or specify an address manually.
```
This would indicate that your bluetooth is disabled. To enable your bluetooth, try the
following:
1. Check the status of your bluetooth by entering the following:
```
sudo systemctl status bluetooth
```
You may see something like this:
```
● bluetooth.service - Bluetooth service
Loaded: loaded (/lib/systemd/system/bluetooth.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:bluetoothd(8)
```
If you do, that means your bluetooth service is disabled. Turn enable it enter
```
sudo systemctl start bluetooth
sudo systemctl status bluetooth
```
After running these commands your bluetooth service should be up and running:
```
● bluetooth.service - Bluetooth service
Loaded: loaded (/lib/systemd/system/bluetooth.service; disabled; vendor preset: enabled)
Active: active (running) since Thu 2017-06-29 16:21:43 EDT; 16s ago
Docs: man:bluetoothd(8)
Main PID: 27362 (bluetoothd)
Status: "Running"
CGroup: /system.slice/bluetooth.service
└─27362 /usr/local/libexec/bluetooth/bluetoothd
```
Retry the commands that were mentioned in step 2 for pairing the PS3 joystick.
2. Run the following command:
```
hciconfig hci0 reset
```
followed by:
```
sudo bash
rosrun ps3joy sixpair
```
| 1,598 | Markdown | 28.072727 | 91 | 0.734043 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/CHANGELOG.rst | ^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package joy
^^^^^^^^^^^^^^^^^^^^^^^^^
1.15.1 (2021-06-07)
-------------------
1.15.0 (2020-10-12)
-------------------
* Added autodetection for force-feedback devices. (`#169 <https://github.com/ros-drivers/joystick_drivers/issues/169>`_)
* Added autodetection for force-feedback devices.
* RAII for closedir
* joy: Little fixes for force feedback. (`#167 <https://github.com/ros-drivers/joystick_drivers/issues/167>`_)
This commit increases the maximum magnitude of the FF effects to double the previous maximum.
* Print out joystick name on initialization. (`#168 <https://github.com/ros-drivers/joystick_drivers/issues/168>`_)
This helps figuring out what string to give to the `dev_name` parameter.
* Contributors: Martin Pecka
1.14.0 (2020-07-07)
-------------------
* frame_id in the header of the joystick msg (`#166 <https://github.com/ros-drivers/joystick_drivers/issues/166>`_)
* roslint and Generic Clean-Up (`#161 <https://github.com/ros-drivers/joystick_drivers/issues/161>`_)
* Merge pull request `#158 <https://github.com/ros-drivers/joystick_drivers/issues/158>`_ from clalancette/ros1-cleanups
ROS1 joy cleanups
* Greatly simplify the sticky_buttons support.
* Small fixes to rumble support.
* Use C++ style casts.
* Use empty instead of length.
* joy_def_ff -> joy_dev_ff
* Cleanup header includes.
* Use size_t appropriately.
* NULL -> nullptr everywhere.
* Style cleanup in joy_node.cpp.
* Merge pull request `#154 <https://github.com/ros-drivers/joystick_drivers/issues/154>`_ from zchen24/master
Minor: moved default to right indent level
* Contributors: Chris Lalancette, Joshua Whitley, Mamoun Gharbi, Zihan Chen
1.13.0 (2019-06-24)
-------------------
* Merge pull request `#120 <https://github.com/ros-drivers/joystick_drivers/issues/120>`_ from furushchev/remap
add joy_remap and its sample
* Merge pull request `#128 <https://github.com/ros-drivers/joystick_drivers/issues/128>`_ from ros-drivers/fix/tab_errors
Cleaning up Python indentation.
* Merge pull request `#111 <https://github.com/ros-drivers/joystick_drivers/issues/111>`_ from matt-attack/indigo-devel
Add Basic Force Feedback Support
* Merge pull request `#126 <https://github.com/ros-drivers/joystick_drivers/issues/126>`_ from clalancette/minor-formatting
* Put brackets around ROS\_* macros.
In some circumstances they may be defined to empty, so we need
to have brackets to ensure that they are syntactically valid.
Signed-off-by: Chris Lalancette <[email protected]>
* Merge pull request `#122 <https://github.com/ros-drivers/joystick_drivers/issues/122>`_ from lbucklandAS/fix-publish-timestamp
Add timestamp to all joy messages
* Change error messages and set ps3 as default controller
* Better handle device loss
Allow for loss and redetection of device with force feedback
* Add basic force feedback over usb
Addresses `#89 <https://github.com/ros-drivers/joystick_drivers/issues/89>`_
* Contributors: Chris Lalancette, Furushchev, Joshua Whitley, Lucas Buckland, Matthew, Matthew Bries
1.12.0 (2018-06-11)
-------------------
* Update timestamp when using autorepeat_rate
* Added dev_name parameter to select joystick by name
* Added Readme for joy package with description of new device name parameter
* Fixed numerous outstanding PRs.
* Added sticky buttons
* Changed package xml to format 2
* Fixed issue when the joystick data did not got send until changed.
* Changed messaging to better reflect what the script is doing
* Contributors: Dino Hüllmann, Jonathan Bohren, Joshua Whitley, Miklos Marton, Naoki Mizuno, jprod123, psimona
1.11.0 (2017-02-10)
-------------------
* fixed joy/Cmakelists for osx
* Update dependencies to remove warnings
* Contributors: Marynel Vazquez, Mark D Horn
1.10.1 (2015-05-24)
-------------------
* Remove stray architechture_independent flags
* Contributors: Jonathan Bohren, Scott K Logan
1.10.0 (2014-06-26)
-------------------
* First indigo release
| 3,981 | reStructuredText | 45.302325 | 128 | 0.72419 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/package.xml | <package format="2">
<name>joy</name>
<version>1.15.1</version>
<license>BSD</license>
<description>
ROS driver for a generic Linux joystick.
The joy package contains joy_node, a node that interfaces a
generic Linux joystick to ROS. This node publishes a "Joy"
message, which contains the current state of each one of the
joystick's buttons and axes.
</description>
<maintainer email="[email protected]">Jonathan Bohren</maintainer>
<author>Morgan Quigley</author>
<author>Brian Gerkey</author>
<author>Kevin Watts</author>
<author>Blaise Gassend</author>
<url type="website">http://www.ros.org/wiki/joy</url>
<url type="development">https://github.com/ros-drivers/joystick_drivers</url>
<url type="bugtracker">https://github.com/ros-drivers/joystick_drivers/issues</url>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roslint</build_depend>
<depend>diagnostic_updater</depend>
<depend>joystick</depend>
<depend>roscpp</depend>
<depend>sensor_msgs</depend>
<test_depend>rosbag</test_depend>
<export>
<rosbag migration_rule_file="migration_rules/Joy.bmr"/>
</export>
</package>
| 1,157 | XML | 29.473683 | 85 | 0.715644 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/README.md | # ROS Driver for Generic Linux Joysticks
The joy package contains joy_node, a node that interfaces a generic Linux joystick to ROS. This node publishes a "Joy" message, which contains the current state of each one of the joystick's buttons and axes.
## Supported Hardware
This node should work with any joystick that is supported by Linux.
## Published Topics
* joy ([sensor_msgs/Joy](http://docs.ros.org/api/sensor_msgs/html/msg/Joy.html)): outputs the joystick state.
## Device Selection
There are two parameters controlling which device should be used:
* ~dev (string, default: "/dev/input/js0")
* ~dev_name (string, default: "" (empty = disabled))
If ~dev_name is empty, ~dev defines the Linux joystick device from which to read joystick events.
If ~dev_name is defined, the node enumerates all available joysticks, i.e. /dev/input/js*. The first joystick matching ~dev_name is opened. If no joystick matches the desired device name, the device specified by ~dev is used as a fallback.
To get a list of the names of all connected joysticks, an invalid ~dev_name can be specified. For example:
`rosrun joy joy_node _dev_name:="*"`
The output might look like this:
```
[ INFO]: Found joystick: ST LIS3LV02DL Accelerometer (/dev/input/js1).
[ INFO]: Found joystick: Microsoft X-Box 360 pad (/dev/input/js0).
[ERROR]: Couldn't find a joystick with name *. Falling back to default device.
```
Then the node can be started with the device name given in the list. For example:
`rosrun joy joy_node _dev_name:="Microsoft X-Box 360 pad"`
## Advanced Parameters
* ~deadzone (double, default: 0.05)
* Amount by which the joystick has to move before it is considered to be off-center. This parameter is specified relative to an axis normalized between -1 and 1. Thus, 0.1 means that the joystick has to move 10% of the way to the edge of an axis's range before that axis will output a non-zero value. Linux does its own deadzone processing, so in many cases this value can be set to zero.
* ~autorepeat_rate (double, default: 0.0 (disabled))
* Rate in Hz at which a joystick that has a non-changing state will resend the previously sent message.
* ~coalesce_interval (double, default: 0.001)
* Axis events that are received within coalesce_interval (seconds) of each other are sent out in a single ROS message. Since the kernel sends each axis motion as a separate event, coalescing greatly reduces the rate at which messages are sent. This option can also be used to limit the rate of outgoing messages. Button events are always sent out immediately to avoid missing button presses.
## Further Information
For further information have a look at the [Wiki page](http://wiki.ros.org/joy).
| 2,708 | Markdown | 49.166666 | 393 | 0.755908 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/test/test_joy_msg_migration.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import roslib
roslib.load_manifest("joy")
import os
import re
import struct
import sys
import unittest
import rosbag
import rosbagmigration
import rospy
import rostest
from cStringIO import StringIO
migrator = rosbagmigration.MessageMigrator()
def repack(x):
return struct.unpack("<f", struct.pack("<f", x))[0]
class TestJoyMsgsMigration(unittest.TestCase):
# (*) Joy.saved
########### Joy ###############
def get_old_joy(self):
joy_classes = self.load_saved_classes("Joy.saved")
joy = joy_classes["joy/Joy"]
return joy([0.1, 0.2, 0.3, 0.4, 0.5], [0, 1, 0, 1, 0])
def get_new_joy(self):
from roslib.msg import Header
from sensor_msgs.msg import Joy
return Joy(Header(), [0.1, 0.2, 0.3, 0.4, 0.5], [0, 1, 0, 1, 0])
def test_joy(self):
self.do_test("joy", self.get_old_joy, self.get_new_joy)
########### Helper functions ###########
def setUp(self):
self.pkg_dir = roslib.packages.get_pkg_dir("joy")
def load_saved_classes(self, saved_msg):
f = open(f"{self.pkg_dir}/test/saved/{saved_msg}")
type_line = f.readline()
pat = re.compile(r"\[(.*)]:")
type_match = pat.match(type_line)
self.assertTrue(
type_match is not None,
("Full definition file malformed. First line should be:" " '[my_package/my_msg]:'"),
)
saved_type = type_match.groups()[0]
saved_full_text = f.read()
saved_classes = roslib.genpy.generate_dynamic(saved_type, saved_full_text)
self.assertTrue(
saved_classes is not None,
"Could not generate class from full definition file.",
)
self.assertTrue(
saved_classes.has_key(saved_type),
"Could not generate class from full definition file.",
)
return saved_classes
def do_test(self, name, old_msg, new_msg):
# Name the bags
oldbag = f"{self.pkg_dir}/test/{name}_old.bag"
newbag = f"{self.pkg_dir}/test/{name}_new.bag"
# Create an old message
bag = rosbag.Bag(oldbag, "w")
bag.write("topic", old_msg(), roslib.rostime.Time())
bag.close()
# Check and migrate
res = rosbagmigration.checkbag(migrator, oldbag)
self.assertTrue(
not False in [m[1] == [] for m in res],
"Bag not ready to be migrated",
)
res = rosbagmigration.fixbag(migrator, oldbag, newbag)
self.assertTrue(res, "Bag not converted successfully")
# Pull the first message out of the bag
topic, msg, t = rosbag.Bag(newbag).read_messages().next()
# Reserialize the new message so that floats get screwed up, etc.
m = new_msg()
buff = StringIO()
m.serialize(buff)
m.deserialize(buff.getvalue())
# Strifying them helps make the comparison easier until I figure out why the equality operator is failing
self.assertTrue(roslib.message.strify_message(msg) == roslib.message.strify_message(m))
# self.assertTrue(msgs[0][1] == m)
# Cleanup
os.remove(oldbag)
os.remove(newbag)
if __name__ == "__main__":
rostest.unitrun(
"test_joy_msg",
"test_joy_msg_migration",
TestJoyMsgsMigration,
sys.argv,
)
| 4,982 | Python | 31.357143 | 113 | 0.645323 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/src/joy_node.cpp | /*
* joy_node
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <ORGANIZATION> nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
// \author: Blaise Gassend
#include <memory>
#include <string>
#include <dirent.h>
#include <fcntl.h>
#include <limits.h>
#include <linux/input.h>
#include <linux/joystick.h>
#include <math.h>
#include <sys/stat.h>
#include <unistd.h>
#include <diagnostic_updater/diagnostic_updater.h>
#include <ros/ros.h>
#include <sensor_msgs/Joy.h>
#include <sensor_msgs/JoyFeedbackArray.h>
int closedir_cb(DIR *dir)
{
if (dir)
return closedir(dir);
return 0;
}
/// \brief Opens, reads from and publishes joystick events
class Joystick
{
private:
ros::NodeHandle nh_;
bool open_;
bool sticky_buttons_;
bool default_trig_val_;
std::string joy_dev_;
std::string joy_dev_name_;
std::string joy_dev_ff_;
double deadzone_;
double autorepeat_rate_; // in Hz. 0 for no repeat.
double coalesce_interval_; // Defaults to 100 Hz rate limit.
int event_count_;
int pub_count_;
ros::Publisher pub_;
double lastDiagTime_;
int ff_fd_;
struct ff_effect joy_effect_;
bool update_feedback_;
diagnostic_updater::Updater diagnostic_;
typedef std::unique_ptr<DIR, decltype(&closedir)> dir_ptr;
/// \brief Publishes diagnostics and status
void diagnostics(diagnostic_updater::DiagnosticStatusWrapper& stat)
{
double now = ros::Time::now().toSec();
double interval = now - lastDiagTime_;
if (open_)
{
stat.summary(0, "OK");
}
else
{
stat.summary(2, "Joystick not open.");
}
stat.add("topic", pub_.getTopic());
stat.add("device", joy_dev_);
stat.add("device name", joy_dev_name_);
stat.add("dead zone", deadzone_);
stat.add("autorepeat rate (Hz)", autorepeat_rate_);
stat.add("coalesce interval (s)", coalesce_interval_);
stat.add("recent joystick event rate (Hz)", event_count_ / interval);
stat.add("recent publication rate (Hz)", pub_count_ / interval);
stat.add("subscribers", pub_.getNumSubscribers());
stat.add("default trig val", default_trig_val_);
stat.add("sticky buttons", sticky_buttons_);
event_count_ = 0;
pub_count_ = 0;
lastDiagTime_ = now;
}
/*! \brief Returns the device path of the first joystick that matches joy_name.
* If no match is found, an empty string is returned.
*/
std::string get_dev_by_joy_name(const std::string& joy_name)
{
const char path[] = "/dev/input"; // no trailing / here
struct dirent *entry;
struct stat stat_buf;
dir_ptr dev_dir(opendir(path), &closedir_cb);
if (dev_dir == nullptr)
{
ROS_ERROR("Couldn't open %s. Error %i: %s.", path, errno, strerror(errno));
return "";
}
while ((entry = readdir(dev_dir.get())) != nullptr)
{
// filter entries
if (strncmp(entry->d_name, "js", 2) != 0) // skip device if it's not a joystick
{
continue;
}
std::string current_path = std::string(path) + "/" + entry->d_name;
if (stat(current_path.c_str(), &stat_buf) == -1)
{
continue;
}
if (!S_ISCHR(stat_buf.st_mode)) // input devices are character devices, skip other
{
continue;
}
// get joystick name
int joy_fd = open(current_path.c_str(), O_RDONLY);
if (joy_fd == -1)
{
continue;
}
char current_joy_name[128];
if (ioctl(joy_fd, JSIOCGNAME(sizeof(current_joy_name)), current_joy_name) < 0)
{
strncpy(current_joy_name, "Unknown", sizeof(current_joy_name));
}
close(joy_fd);
ROS_INFO("Found joystick: %s (%s).", current_joy_name, current_path.c_str());
if (strcmp(current_joy_name, joy_name.c_str()) == 0)
{
return current_path;
}
}
return "";
}
/*! \brief Autodetection of the force feedback device. If autodetection fails,
* returns empty string.
* \param joy_dev A nonempty path to the joy device we search force feedback for.
*/
std::string get_ff_dev(const std::string& joy_dev)
{
const char path[] = "/dev/input/by-id"; // no trailing / here
struct dirent *entry;
// the selected joy can be a symlink, but we want the canonical /dev/input/jsX
char realpath_buf[PATH_MAX];
char *res = realpath(joy_dev.c_str(), realpath_buf);
if (res == nullptr)
{
return "";
}
dir_ptr dev_dir(opendir(path), &closedir_cb);
if (dev_dir == nullptr)
{
ROS_ERROR("Couldn't open %s. Error %i: %s.", path, errno, strerror(errno));
return "";
}
const std::string joy_dev_real(realpath_buf);
std::string joy_dev_id;
// first, find the device in /dev/input/by-id that corresponds to the selected joy,
// i.e. its realpath is the same as the selected joy's one
while ((entry = readdir(dev_dir.get())) != nullptr)
{
res = strstr(entry->d_name, "-joystick");
// filter entries
if (res == nullptr) // skip device if it's not a joystick
{
continue;
}
const auto current_path = std::string(path) + "/" + entry->d_name;
res = realpath(current_path.c_str(), realpath_buf);
if (res == nullptr)
{
continue;
}
const std::string dev_real(realpath_buf);
if (dev_real == joy_dev_real)
{
// we found the ID device which maps to the selected joy
joy_dev_id = current_path;
break;
}
}
// if no corresponding ID device was found, the autodetection won't work
if (joy_dev_id.empty())
{
return "";
}
const auto joy_dev_id_prefix = joy_dev_id.substr(0, joy_dev_id.length() - strlen("-joystick"));
std::string event_dev;
// iterate through the by-id dir once more, this time finding the -event-joystick file with the
// same prefix as the ID device we've already found
dev_dir = dir_ptr(opendir(path), &closedir_cb);
while ((entry = readdir(dev_dir.get())) != nullptr)
{
res = strstr(entry->d_name, "-event-joystick");
if (res == nullptr) // skip device if it's not an event joystick
{
continue;
}
const auto current_path = std::string(path) + "/" + entry->d_name;
if (current_path.find(joy_dev_id_prefix) != std::string::npos)
{
ROS_INFO("Found force feedback event device %s", current_path.c_str());
event_dev = current_path;
break;
}
}
return event_dev;
}
public:
Joystick() : nh_(), diagnostic_(), ff_fd_(-1)
{}
void set_feedback(const sensor_msgs::JoyFeedbackArray::ConstPtr& msg)
{
if (ff_fd_ == -1)
{
return; // we aren't ready yet
}
size_t size = msg->array.size();
for (size_t i = 0; i < size; i++)
{
// process each feedback
if (msg->array[i].type == 1 && ff_fd_ != -1) // TYPE_RUMBLE
{
// if id is zero, that's low freq, 1 is high
joy_effect_.direction = 0; // down
joy_effect_.type = FF_RUMBLE;
if (msg->array[i].id == 0)
{
joy_effect_.u.rumble.strong_magnitude = (static_cast<float>(0xFFFFU))*msg->array[i].intensity;
}
else
{
joy_effect_.u.rumble.weak_magnitude = (static_cast<float>(0xFFFFU))*msg->array[i].intensity;
}
joy_effect_.replay.length = 1000;
joy_effect_.replay.delay = 0;
update_feedback_ = true;
}
}
}
/// \brief Opens joystick port, reads from port and publishes while node is active
int main(int argc, char **argv)
{
diagnostic_.add("Joystick Driver Status", this, &Joystick::diagnostics);
diagnostic_.setHardwareID("none");
// Parameters
ros::NodeHandle nh_param("~");
pub_ = nh_.advertise<sensor_msgs::Joy>("joy", 1);
ros::Subscriber sub = nh_.subscribe("joy/set_feedback", 10, &Joystick::set_feedback, this);
nh_param.param<std::string>("dev", joy_dev_, "/dev/input/js0");
nh_param.param<std::string>("dev_ff", joy_dev_ff_, "/dev/input/event0");
nh_param.param<std::string>("dev_name", joy_dev_name_, "");
nh_param.param<double>("deadzone", deadzone_, 0.05);
nh_param.param<double>("autorepeat_rate", autorepeat_rate_, 0);
nh_param.param<double>("coalesce_interval", coalesce_interval_, 0.001);
nh_param.param<bool>("default_trig_val", default_trig_val_, false);
nh_param.param<bool>("sticky_buttons", sticky_buttons_, false);
// Checks on parameters
if (!joy_dev_name_.empty())
{
std::string joy_dev_path = get_dev_by_joy_name(joy_dev_name_);
if (joy_dev_path.empty())
{
ROS_ERROR("Couldn't find a joystick with name %s. Falling back to default device.", joy_dev_name_.c_str());
}
else
{
ROS_INFO("Using %s as joystick device.", joy_dev_path.c_str());
joy_dev_ = joy_dev_path;
}
}
if (autorepeat_rate_ > 1 / coalesce_interval_)
{
ROS_WARN("joy_node: autorepeat_rate (%f Hz) > 1/coalesce_interval (%f Hz) "
"does not make sense. Timing behavior is not well defined.", autorepeat_rate_, 1/coalesce_interval_);
}
if (deadzone_ >= 1)
{
ROS_WARN("joy_node: deadzone greater than 1 was requested. The semantics of deadzone have changed. "
"It is now related to the range [-1:1] instead of [-32767:32767]. For now I am dividing your deadzone "
"by 32767, but this behavior is deprecated so you need to update your launch file.");
deadzone_ /= 32767;
}
if (deadzone_ > 0.9)
{
ROS_WARN("joy_node: deadzone (%f) greater than 0.9, setting it to 0.9", deadzone_);
deadzone_ = 0.9;
}
if (deadzone_ < 0)
{
ROS_WARN("joy_node: deadzone_ (%f) less than 0, setting to 0.", deadzone_);
deadzone_ = 0;
}
if (autorepeat_rate_ < 0)
{
ROS_WARN("joy_node: autorepeat_rate (%f) less than 0, setting to 0.", autorepeat_rate_);
autorepeat_rate_ = 0;
}
if (coalesce_interval_ < 0)
{
ROS_WARN("joy_node: coalesce_interval (%f) less than 0, setting to 0.", coalesce_interval_);
coalesce_interval_ = 0;
}
// Parameter conversions
double autorepeat_interval = 1 / autorepeat_rate_;
double scale = -1. / (1. - deadzone_) / 32767.;
double unscaled_deadzone = 32767. * deadzone_;
js_event event;
struct timeval tv;
fd_set set;
int joy_fd;
event_count_ = 0;
pub_count_ = 0;
lastDiagTime_ = ros::Time::now().toSec();
// Big while loop opens, publishes
while (nh_.ok())
{
open_ = false;
diagnostic_.force_update();
bool first_fault = true;
while (true)
{
ros::spinOnce();
if (!nh_.ok())
{
goto cleanup;
}
joy_fd = open(joy_dev_.c_str(), O_RDONLY);
if (joy_fd != -1)
{
// There seems to be a bug in the driver or something where the
// initial events that are to define the initial state of the
// joystick are not the values of the joystick when it was opened
// but rather the values of the joystick when it was last closed.
// Opening then closing and opening again is a hack to get more
// accurate initial state data.
close(joy_fd);
joy_fd = open(joy_dev_.c_str(), O_RDONLY);
}
if (joy_fd != -1)
{
break;
}
if (first_fault)
{
ROS_ERROR("Couldn't open joystick %s. Will retry every second.", joy_dev_.c_str());
first_fault = false;
}
sleep(1.0);
diagnostic_.update();
}
auto dev_ff = joy_dev_ff_;
if (joy_dev_ff_.empty())
{
dev_ff = get_ff_dev(joy_dev_);
}
if (!dev_ff.empty())
{
ff_fd_ = open(dev_ff.c_str(), O_RDWR);
/* Set the gain of the device*/
int gain = 100; /* between 0 and 100 */
struct input_event ie; /* structure used to communicate with the driver */
ie.type = EV_FF;
ie.code = FF_GAIN;
ie.value = 0xFFFFUL * gain / 100;
if (write(ff_fd_, &ie, sizeof(ie)) == -1)
{
ROS_WARN("Couldn't set gain on joystick force feedback: %s", strerror(errno));
}
memset(&joy_effect_, 0, sizeof(joy_effect_));
joy_effect_.id = -1;
joy_effect_.direction = 0; // down
joy_effect_.type = FF_RUMBLE;
joy_effect_.u.rumble.strong_magnitude = 0;
joy_effect_.u.rumble.weak_magnitude = 0;
joy_effect_.replay.length = 1000;
joy_effect_.replay.delay = 0;
// upload the effect
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_);
}
char current_joy_name[128];
if (ioctl(joy_fd, JSIOCGNAME(sizeof(current_joy_name)), current_joy_name) < 0)
{
strncpy(current_joy_name, "Unknown", sizeof(current_joy_name));
}
ROS_INFO("Opened joystick: %s (%s). deadzone_: %f.", joy_dev_.c_str(), current_joy_name, deadzone_);
open_ = true;
diagnostic_.force_update();
bool tv_set = false;
bool publication_pending = false;
tv.tv_sec = 1;
tv.tv_usec = 0;
sensor_msgs::Joy joy_msg; // Here because we want to reset it on device close.
double val; // Temporary variable to hold event values
while (nh_.ok())
{
ros::spinOnce();
bool publish_now = false;
bool publish_soon = false;
FD_ZERO(&set);
FD_SET(joy_fd, &set);
int select_out = select(joy_fd+1, &set, nullptr, nullptr, &tv);
if (select_out == -1)
{
tv.tv_sec = 0;
tv.tv_usec = 0;
continue;
}
// play the rumble effect (can probably do this at lower rate later)
if (ff_fd_ != -1)
{
struct input_event start;
start.type = EV_FF;
start.code = joy_effect_.id;
start.value = 1;
if (write(ff_fd_, (const void*) &start, sizeof(start)) == -1)
{
break; // fd closed
}
// upload the effect
if (update_feedback_ == true)
{
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_);
update_feedback_ = false;
}
}
if (FD_ISSET(joy_fd, &set))
{
if (read(joy_fd, &event, sizeof(js_event)) == -1 && errno != EAGAIN)
{
break; // Joystick is probably closed. Definitely occurs.
}
joy_msg.header.stamp = ros::Time().now();
event_count_++;
switch (event.type)
{
case JS_EVENT_BUTTON:
case JS_EVENT_BUTTON | JS_EVENT_INIT:
if (event.number >= joy_msg.buttons.size())
{
size_t old_size = joy_msg.buttons.size();
joy_msg.buttons.resize(event.number+1);
for (size_t i = old_size; i < joy_msg.buttons.size(); i++)
{
joy_msg.buttons[i] = 0.0;
}
}
if (sticky_buttons_)
{
if (event.value == 1)
{
joy_msg.buttons[event.number] = 1 - joy_msg.buttons[event.number];
}
}
else
{
joy_msg.buttons[event.number] = (event.value ? 1 : 0);
}
// For initial events, wait a bit before sending to try to catch
// all the initial events.
if (!(event.type & JS_EVENT_INIT))
{
publish_now = true;
}
else
{
publish_soon = true;
}
break;
case JS_EVENT_AXIS:
case JS_EVENT_AXIS | JS_EVENT_INIT:
val = event.value;
if (event.number >= joy_msg.axes.size())
{
size_t old_size = joy_msg.axes.size();
joy_msg.axes.resize(event.number+1);
for (size_t i = old_size; i < joy_msg.axes.size(); i++)
{
joy_msg.axes[i] = 0.0;
}
}
if (default_trig_val_)
{
// Allows deadzone to be "smooth"
if (val > unscaled_deadzone)
{
val -= unscaled_deadzone;
}
else if (val < -unscaled_deadzone)
{
val += unscaled_deadzone;
}
else
{
val = 0;
}
joy_msg.axes[event.number] = val * scale;
// Will wait a bit before sending to try to combine events.
publish_soon = true;
break;
}
else
{
if (!(event.type & JS_EVENT_INIT))
{
val = event.value;
if (val > unscaled_deadzone)
{
val -= unscaled_deadzone;
}
else if (val < -unscaled_deadzone)
{
val += unscaled_deadzone;
}
else
{
val = 0;
}
joy_msg.axes[event.number] = val * scale;
}
publish_soon = true;
break;
}
default:
ROS_WARN("joy_node: Unknown event type. Please file a ticket. "
"time=%u, value=%d, type=%Xh, number=%d", event.time, event.value, event.type, event.number);
break;
}
}
else if (tv_set) // Assume that the timer has expired.
{
joy_msg.header.stamp = ros::Time().now();
publish_now = true;
}
if (publish_now)
{
// Assume that all the JS_EVENT_INIT messages have arrived already.
// This should be the case as the kernel sends them along as soon as
// the device opens.
joy_msg.header.stamp = ros::Time().now();
joy_msg.header.frame_id = joy_dev_.c_str();
pub_.publish(joy_msg);
publish_now = false;
tv_set = false;
publication_pending = false;
publish_soon = false;
pub_count_++;
}
// If an axis event occurred, start a timer to combine with other
// events.
if (!publication_pending && publish_soon)
{
tv.tv_sec = trunc(coalesce_interval_);
tv.tv_usec = (coalesce_interval_ - tv.tv_sec) * 1e6;
publication_pending = true;
tv_set = true;
}
// If nothing is going on, start a timer to do autorepeat.
if (!tv_set && autorepeat_rate_ > 0)
{
tv.tv_sec = trunc(autorepeat_interval);
tv.tv_usec = (autorepeat_interval - tv.tv_sec) * 1e6;
tv_set = true;
}
if (!tv_set)
{
tv.tv_sec = 1;
tv.tv_usec = 0;
}
diagnostic_.update();
} // End of joystick open loop.
close(ff_fd_);
close(joy_fd);
ros::spinOnce();
if (nh_.ok())
{
ROS_ERROR("Connection to joystick device lost unexpectedly. Will reopen.");
}
}
cleanup:
ROS_INFO("joy_node shut down.");
return 0;
}
};
int main(int argc, char **argv)
{
ros::init(argc, argv, "joy_node");
Joystick j;
return j.main(argc, argv);
}
| 21,009 | C++ | 29.582242 | 117 | 0.550336 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/scripts/joy_remap.py | #!/usr/bin/env python
# Author: furushchev <[email protected]>
import ast
import operator as op
import traceback
import rospy
from sensor_msgs.msg import Joy
class RestrictedEvaluator:
def __init__(self):
self.operators = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.BitXor: op.xor,
ast.USub: op.neg,
}
self.functions = {
"abs": lambda x: abs(x),
"max": lambda *x: max(*x),
"min": lambda *x: min(*x),
}
def _reval_impl(self, node, variables):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
op = self.operators[type(node.op)]
return op(
self._reval_impl(node.left, variables),
self._reval_impl(node.right, variables),
)
elif isinstance(node, ast.UnaryOp):
op = self.operators[type(node.op)]
return op(self._reval_impl(node.operand, variables))
elif isinstance(node, ast.Call) and node.func.id in self.functions:
func = self.functions[node.func.id]
args = [self._reval_impl(n, variables) for n in node.args]
return func(*args)
elif isinstance(node, ast.Name) and node.id in variables:
return variables[node.id]
elif isinstance(node, ast.Subscript) and node.value.id in variables:
var = variables[node.value.id]
idx = node.slice.value.n
try:
return var[idx]
except IndexError:
raise IndexError("Variable '%s' out of range: %d >= %d" % (node.value.id, idx, len(var)))
else:
raise TypeError("Unsupported operation: %s" % node)
def reval(self, expr, variables):
expr = str(expr)
if len(expr) > 1000:
raise ValueError("The length of an expression must not be more than 1000" " characters")
try:
return self._reval_impl(ast.parse(expr, mode="eval").body, variables)
except Exception as e:
rospy.logerr(traceback.format_exc())
raise e
class JoyRemap:
def __init__(self):
self.evaluator = RestrictedEvaluator()
self.mappings = self.load_mappings("~mappings")
self.warn_remap("joy_out")
self.pub = rospy.Publisher("joy_out", Joy, queue_size=1)
self.warn_remap("joy_in")
self.sub = rospy.Subscriber(
"joy_in",
Joy,
self.callback,
queue_size=rospy.get_param("~queue_size", None),
)
def load_mappings(self, ns):
btn_remap = rospy.get_param(ns + "/buttons", [])
axes_remap = rospy.get_param(ns + "/axes", [])
rospy.loginfo("loaded remapping: %d buttons, %d axes" % (len(btn_remap), len(axes_remap)))
return {"buttons": btn_remap, "axes": axes_remap}
def warn_remap(self, name):
if name == rospy.remap_name(name):
rospy.logwarn("topic '%s' is not remapped" % name)
def callback(self, in_msg):
out_msg = Joy(header=in_msg.header)
map_axes = self.mappings["axes"]
map_btns = self.mappings["buttons"]
out_msg.axes = [0.0] * len(map_axes)
out_msg.buttons = [0] * len(map_btns)
in_dic = {"axes": in_msg.axes, "buttons": in_msg.buttons}
for i, exp in enumerate(map_axes):
try:
out_msg.axes[i] = self.evaluator.reval(exp, in_dic)
except NameError as e:
rospy.logerr("You are using vars other than 'buttons' or 'axes': %s" % e)
except UnboundLocalError as e:
rospy.logerr("Wrong form: %s" % e)
except Exception as e:
raise e
for i, exp in enumerate(map_btns):
try:
if self.evaluator.reval(exp, in_dic) > 0:
out_msg.buttons[i] = 1
except NameError as e:
rospy.logerr("You are using vars other than 'buttons' or 'axes': %s" % e)
except UnboundLocalError as e:
rospy.logerr("Wrong form: %s" % e)
except Exception as e:
raise e
self.pub.publish(out_msg)
if __name__ == "__main__":
rospy.init_node("joy_remap")
n = JoyRemap()
rospy.spin()
| 4,439 | Python | 34.238095 | 105 | 0.54269 |
leggedrobotics/viplanner/ros/joystick_drivers/joy/config/ps4joy.yaml | mappings:
axes:
- axes[0]
- axes[1]
- axes[2]
- axes[5]
- axes[6] * 2.0
- axes[7] * 2.0
- axes[8] * -2.0
- 0.0
- max(axes[10], 0.0) * -1.0
- min(axes[9], 0.0)
- min(axes[10], 0.0)
- max(axes[9], 0.0) * -1.0
- (axes[3] - 1.0) * 0.5
- (axes[4] - 1.0) * 0.5
- buttons[4] * -1.0
- buttons[5] * -1.0
- buttons[3] * -1.0
- buttons[2] * -1.0
- buttons[1] * -1.0
- buttons[0] * -1.0
- 0.0
- 0.0
- 0.0
- 0.0
- 0.0
- 0.0
- 0.0
buttons:
- buttons[8]
- buttons[10]
- buttons[11]
- buttons[9]
- max(axes[10], 0.0)
- min(axes[9], 0.0) * -1.0
- min(axes[10], 0.0) * -1.0
- max(axes[9], 0.0)
- buttons[6]
- buttons[7]
- buttons[4]
- buttons[5]
- buttons[3]
- buttons[2]
- buttons[1]
- buttons[0]
- buttons[12]
| 874 | YAML | 17.229166 | 31 | 0.40389 |
leggedrobotics/viplanner/ros/pathFollower/package.xml | <package>
<name>path_follower</name>
<version>0.0.1</version>
<description>Simple Path Follower for executing the path on the robot</description>
<maintainer email="[email protected]">Pascal Roth</maintainer>
<author email="[email protected]">Ji Zhang</author>
<license>BSD</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>message_filters</build_depend>
<build_depend>pcl_ros</build_depend>
<run_depend>roscpp</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>sensor_msgs</run_depend>
<run_depend>message_filters</run_depend>
<run_depend>pcl_ros</run_depend>
</package>
| 746 | XML | 30.124999 | 85 | 0.727882 |
leggedrobotics/viplanner/ros/pathFollower/README.md | # Path Follower Controller
Code used from https://github.com/MichaelFYang/iplanner_path_follow/tree/master
| 108 | Markdown | 26.249993 | 79 | 0.814815 |
leggedrobotics/viplanner/ros/pathFollower/src/pathFollower.cpp | #include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <ros/ros.h>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <std_msgs/Int8.h>
#include <std_msgs/Float32.h>
#include <nav_msgs/Path.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/TwistStamped.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Joy.h>
#include <tf/transform_datatypes.h>
#include <tf/transform_broadcaster.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/kdtree/kdtree_flann.h>
using namespace std;
const double PI = 3.1415926;
const double EPS = 1e-5;
double sensorOffsetX = 0;
double sensorOffsetY = 0;
int pubSkipNum = 1;
int pubSkipCount = 0;
bool twoWayDrive = true;
double lookAheadDis = 0.5;
double yawRateGain = 7.5;
double stopYawRateGain = 7.5;
double maxYawRate = 45.0;
double maxSpeed = 1.0;
double maxAccel = 1.0;
double switchTimeThre = 1.0;
double dirDiffThre = 0.1;
double stopDisThre = 0.2;
double slowDwnDisThre = 1.0;
bool useInclRateToSlow = false;
double inclRateThre = 120.0;
double slowRate1 = 0.25;
double slowRate2 = 0.5;
double slowTime1 = 2.0;
double slowTime2 = 2.0;
bool useInclToStop = false;
double inclThre = 45.0;
double stopTime = 5.0;
bool noRotAtStop = false;
bool noRotAtGoal = true;
bool autonomyMode = false;
double autonomySpeed = 1.0;
double joyToSpeedDelay = 2.0;
float joySpeed = 0;
float joySpeedRaw = 0;
float joyYaw = 0;
int safetyStop = 0;
float vehicleX = 0;
float vehicleY = 0;
float vehicleZ = 0;
float vehicleRoll = 0;
float vehiclePitch = 0;
float vehicleYaw = 0;
float vehicleXRec = 0;
float vehicleYRec = 0;
float vehicleZRec = 0;
float vehicleRollRec = 0;
float vehiclePitchRec = 0;
float vehicleYawRec = 0;
float vehicleYawRate = 0;
float vehicleSpeed = 0;
double dirMomentum = 0.25;
double lastDiffDir = 0;
double odomTime = 0;
double joyTime = 0;
double slowInitTime = 0;
double stopInitTime = false;
int pathPointID = 0;
bool pathInit = false;
bool navFwd = true;
double switchTime = 0;
bool baseInverted = false;
std::string odomTopic = "/state_estimation";
std::string commandTopic = "/cmd_vel";
std::string baseFrame = "base";
nav_msgs::Path path;
void odomHandler(const geometry_msgs::PoseWithCovarianceStampedConstPtr& odomIn)
{
odomTime = odomIn->header.stamp.toSec();
double roll, pitch, yaw;
geometry_msgs::Quaternion geoQuat = odomIn->pose.pose.orientation;
tf::Matrix3x3(tf::Quaternion(geoQuat.x, geoQuat.y, geoQuat.z, geoQuat.w)).getRPY(roll, pitch, yaw);
vehicleRoll = roll;
vehiclePitch = pitch;
vehicleYaw = yaw;
vehicleX = odomIn->pose.pose.position.x;
vehicleY = odomIn->pose.pose.position.y;
vehicleZ = odomIn->pose.pose.position.z;
if ((fabs(roll) > inclThre * PI / 180.0 || fabs(pitch) > inclThre * PI / 180.0) && useInclToStop) {
stopInitTime = odomIn->header.stamp.toSec();
}
}
void odomHandler(const nav_msgs::OdometryConstPtr& odomIn)
{
odomTime = odomIn->header.stamp.toSec();
double roll, pitch, yaw;
geometry_msgs::Quaternion geoQuat = odomIn->pose.pose.orientation;
tf::Matrix3x3(tf::Quaternion(geoQuat.x, geoQuat.y, geoQuat.z, geoQuat.w)).getRPY(roll, pitch, yaw);
vehicleRoll = roll;
vehiclePitch = pitch;
vehicleYaw = yaw;
vehicleX = odomIn->pose.pose.position.x;
vehicleY = odomIn->pose.pose.position.y;
vehicleZ = odomIn->pose.pose.position.z;
if ((fabs(roll) > inclThre * PI / 180.0 || fabs(pitch) > inclThre * PI / 180.0) && useInclToStop) {
stopInitTime = odomIn->header.stamp.toSec();
}
}
void pathHandler(const nav_msgs::Path::ConstPtr& pathIn)
{
int pathSize = pathIn->poses.size();
path.poses.resize(pathSize);
for (int i = 0; i < pathSize; i++) {
path.poses[i].pose.position.x = pathIn->poses[i].pose.position.x;
path.poses[i].pose.position.y = pathIn->poses[i].pose.position.y;
path.poses[i].pose.position.z = pathIn->poses[i].pose.position.z;
}
vehicleXRec = vehicleX;
vehicleYRec = vehicleY;
vehicleZRec = vehicleZ;
vehicleRollRec = vehicleRoll;
vehiclePitchRec = vehiclePitch;
vehicleYawRec = vehicleYaw;
pathPointID = 0;
pathInit = true;
}
void joystickHandler(const sensor_msgs::Joy::ConstPtr& joy)
{
joyTime = ros::Time::now().toSec();
joySpeedRaw = sqrt(joy->axes[3] * joy->axes[3] + joy->axes[4] * joy->axes[4]);
joySpeed = joySpeedRaw;
if (joySpeed > 1.0) joySpeed = 1.0;
if (joy->axes[4] == 0) joySpeed = 0;
joyYaw = joy->axes[3];
if (joySpeed == 0 && noRotAtStop) joyYaw = 0;
if (joy->axes[4] < 0 && !twoWayDrive) {
joySpeed = 0;
joyYaw = 0;
}
if (joy->axes[2] > -0.1) {
autonomyMode = false;
} else {
autonomyMode = true;
}
}
void speedHandler(const std_msgs::Float32::ConstPtr& speed)
{
double speedTime = ros::Time::now().toSec();
if (autonomyMode && speedTime - joyTime > joyToSpeedDelay && joySpeedRaw == 0) {
joySpeed = speed->data / maxSpeed;
if (joySpeed < 0) joySpeed = 0;
else if (joySpeed > 1.0) joySpeed = 1.0;
}
}
void stopHandler(const std_msgs::Int8::ConstPtr& stop)
{
safetyStop = stop->data;
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "pathFollower");
ros::NodeHandle nh;
ros::NodeHandle nhPrivate = ros::NodeHandle("~");
nhPrivate.getParam("sensorOffsetX", sensorOffsetX);
nhPrivate.getParam("sensorOffsetY", sensorOffsetY);
nhPrivate.getParam("pubSkipNum", pubSkipNum);
nhPrivate.getParam("twoWayDrive", twoWayDrive);
nhPrivate.getParam("lookAheadDis", lookAheadDis);
nhPrivate.getParam("yawRateGain", yawRateGain);
nhPrivate.getParam("stopYawRateGain", stopYawRateGain);
nhPrivate.getParam("maxYawRate", maxYawRate);
nhPrivate.getParam("maxSpeed", maxSpeed);
nhPrivate.getParam("maxAccel", maxAccel);
nhPrivate.getParam("switchTimeThre", switchTimeThre);
nhPrivate.getParam("dirDiffThre", dirDiffThre);
nhPrivate.getParam("stopDisThre", stopDisThre);
nhPrivate.getParam("slowDwnDisThre", slowDwnDisThre);
nhPrivate.getParam("useInclRateToSlow", useInclRateToSlow);
nhPrivate.getParam("inclRateThre", inclRateThre);
nhPrivate.getParam("slowRate1", slowRate1);
nhPrivate.getParam("slowRate2", slowRate2);
nhPrivate.getParam("slowTime1", slowTime1);
nhPrivate.getParam("slowTime2", slowTime2);
nhPrivate.getParam("useInclToStop", useInclToStop);
nhPrivate.getParam("inclThre", inclThre);
nhPrivate.getParam("stopTime", stopTime);
nhPrivate.getParam("noRotAtStop", noRotAtStop);
nhPrivate.getParam("noRotAtGoal", noRotAtGoal);
nhPrivate.getParam("autonomyMode", autonomyMode);
nhPrivate.getParam("autonomySpeed", autonomySpeed);
nhPrivate.getParam("joyToSpeedDelay", joyToSpeedDelay);
nhPrivate.getParam("odomTopic", odomTopic);
nhPrivate.getParam("commandTopic", commandTopic);
nhPrivate.getParam("baseFrame", baseFrame);
nhPrivate.getParam("baseInverted", baseInverted);
lookAheadDis += std::hypot(sensorOffsetX, sensorOffsetY);
ros::Subscriber subOdom = nh.subscribe<geometry_msgs::PoseWithCovarianceStamped> (odomTopic, 5, odomHandler);
// ros::Subscriber subOdom = nh.subscribe<nav_msgs::Odometry> (odomTopic, 5, odomHandler);
ros::Subscriber subPath = nh.subscribe<nav_msgs::Path> ("/viplanner/path", 5, pathHandler);
ros::Subscriber subJoystick = nh.subscribe<sensor_msgs::Joy> ("/joy", 5, joystickHandler);
ros::Subscriber subSpeed = nh.subscribe<std_msgs::Float32> ("/speed", 5, speedHandler);
ros::Subscriber subStop = nh.subscribe<std_msgs::Int8> ("/stop", 5, stopHandler);
ros::Publisher pubSpeed = nh.advertise<geometry_msgs::TwistStamped> (commandTopic, 5);
geometry_msgs::TwistStamped cmd_vel;
cmd_vel.header.frame_id = baseFrame;
if (autonomyMode) {
joySpeed = autonomySpeed / maxSpeed;
if (joySpeed < 0) joySpeed = 0;
else if (joySpeed > 1.0) joySpeed = 1.0;
}
ros::Rate rate(100);
bool status = ros::ok();
while (status) {
ros::spinOnce();
if (pathInit) {
float vehicleXRel = cos(vehicleYawRec) * (vehicleX - vehicleXRec)
+ sin(vehicleYawRec) * (vehicleY - vehicleYRec);
float vehicleYRel = -sin(vehicleYawRec) * (vehicleX - vehicleXRec)
+ cos(vehicleYawRec) * (vehicleY - vehicleYRec);
int pathSize = path.poses.size();
float endDisX = path.poses[pathSize - 1].pose.position.x - vehicleXRel;
float endDisY = path.poses[pathSize - 1].pose.position.y - vehicleYRel;
float endDis = sqrt(endDisX * endDisX + endDisY * endDisY);
float disX, disY, dis;
while (pathPointID < pathSize - 1) {
disX = path.poses[pathPointID].pose.position.x - vehicleXRel;
disY = path.poses[pathPointID].pose.position.y - vehicleYRel;
dis = sqrt(disX * disX + disY * disY);
if (dis < lookAheadDis) {
pathPointID++;
} else {
break;
}
}
disX = path.poses[pathPointID].pose.position.x - vehicleXRel;
disY = path.poses[pathPointID].pose.position.y - vehicleYRel;
dis = sqrt(disX * disX + disY * disY);
float pathDir = atan2(disY, disX);
double dirDiff = vehicleYaw - vehicleYawRec - pathDir;
if (dirDiff > PI) dirDiff -= 2 * PI;
else if (dirDiff < -PI) dirDiff += 2 * PI;
if (dirDiff > PI) dirDiff -= 2 * PI;
else if (dirDiff < -PI) dirDiff += 2 * PI;
if (twoWayDrive) {
double time = ros::Time::now().toSec();
if (fabs(dirDiff) > PI / 2 && navFwd && time - switchTime > switchTimeThre) {
navFwd = false;
switchTime = time;
} else if (fabs(dirDiff) < PI / 2 && !navFwd && time - switchTime > switchTimeThre) {
navFwd = true;
switchTime = time;
}
}
float joySpeed2 = maxSpeed * joySpeed;
if (!navFwd) {
dirDiff += PI;
if (dirDiff > PI) dirDiff -= 2 * PI;
joySpeed2 *= -1;
}
// Add momentum to dirDiff
if (fabs(dirDiff) > dirDiffThre - EPS && dis > lookAheadDis + EPS) {
if (lastDiffDir - dirDiff > PI) dirDiff += 2 * PI;
else if (lastDiffDir - dirDiff < -PI) dirDiff -= 2 * PI;
dirDiff = (1.0 - dirMomentum) * dirDiff + dirMomentum * lastDiffDir;
dirDiff = std::max(std::min(dirDiff, PI-EPS), -PI+EPS);
lastDiffDir = dirDiff;
} else {
lastDiffDir = 0.0;
}
if (fabs(vehicleSpeed) < 2.0 * maxAccel / 100.0) vehicleYawRate = -stopYawRateGain * dirDiff;
else vehicleYawRate = -yawRateGain * dirDiff;
if (vehicleYawRate > maxYawRate * PI / 180.0) vehicleYawRate = maxYawRate * PI / 180.0;
else if (vehicleYawRate < -maxYawRate * PI / 180.0) vehicleYawRate = -maxYawRate * PI / 180.0;
if (joySpeed2 == 0 && !autonomyMode) {
vehicleYawRate = maxYawRate * joyYaw * PI / 180.0;
} else if (pathSize <= 1 || (dis < stopDisThre && noRotAtGoal)) {
vehicleYawRate = 0;
}
if (pathSize <= 1) {
joySpeed2 = 0;
} else if (endDis / slowDwnDisThre < joySpeed) {
joySpeed2 *= endDis / slowDwnDisThre;
}
float joySpeed3 = joySpeed2;
if (odomTime < slowInitTime + slowTime1 && slowInitTime > 0) joySpeed3 *= slowRate1;
else if (odomTime < slowInitTime + slowTime1 + slowTime2 && slowInitTime > 0) joySpeed3 *= slowRate2;
if (fabs(dirDiff) < dirDiffThre && dis > stopDisThre) {
if (vehicleSpeed < joySpeed3) vehicleSpeed += maxAccel / 100.0;
else if (vehicleSpeed > joySpeed3) vehicleSpeed -= maxAccel / 100.0;
} else {
if (vehicleSpeed > 0) vehicleSpeed -= maxAccel / 100.0;
else if (vehicleSpeed < 0) vehicleSpeed += maxAccel / 100.0;
}
if (odomTime < stopInitTime + stopTime && stopInitTime > 0) {
vehicleSpeed = 0;
vehicleYawRate = 0;
}
if (safetyStop >= 1) vehicleSpeed = 0;
if (safetyStop >= 2) vehicleYawRate = 0;
pubSkipCount--;
if (pubSkipCount < 0) {
cmd_vel.header.stamp = ros::Time().fromSec(odomTime);
if (fabs(vehicleSpeed) <= maxAccel / 100.0) cmd_vel.twist.linear.x = 0;
else if (baseInverted) cmd_vel.twist.linear.x = -vehicleSpeed;
else cmd_vel.twist.linear.x = vehicleSpeed;
cmd_vel.twist.angular.z = vehicleYawRate;
pubSpeed.publish(cmd_vel);
pubSkipCount = pubSkipNum;
}
}
status = ros::ok();
rate.sleep();
}
return 0;
}
| 12,612 | C++ | 30.770781 | 111 | 0.666429 |
leggedrobotics/viplanner/ros/waypoint_rviz_plugin/plugin_description.xml | <library path="lib/libwaypoint_rviz_plugin">
<class name="rviz/WaypointTool"
type="rviz::WaypointTool"
base_class_type="rviz::Tool">
<description>
A tool for sending waypoints
</description>
</class>
</library>
| 269 | XML | 25.999997 | 44 | 0.591078 |
leggedrobotics/viplanner/ros/waypoint_rviz_plugin/README.md | CODE MODIFIED FROM: https://github.com/HongbiaoZ/autonomous_exploration_development_environment
| 96 | Markdown | 47.499976 | 95 | 0.854167 |
leggedrobotics/viplanner/ros/waypoint_rviz_plugin/src/waypoint_tool.cpp |
// CODE MODIFIED FROM: https://github.com/HongbiaoZ/autonomous_exploration_development_environment
#include "waypoint_tool.h"
namespace rviz
{
WaypointTool::WaypointTool()
{
shortcut_key_ = 'w';
topic_property_ = new StringProperty("Topic", "waypoint", "The topic on which to publish navigation waypionts.",
getPropertyContainer(), SLOT(updateTopic()), this);
}
void WaypointTool::onInitialize()
{
PoseTool::onInitialize();
setName("Waypoint");
updateTopic();
vehicle_z = 0;
}
void WaypointTool::updateTopic()
{
const std::string odom_topic = "/state_estimator/pose_in_odom";
sub_ = nh_.subscribe<geometry_msgs::PoseWithCovarianceStamped> (odom_topic, 5, &WaypointTool::odomHandler, this);
pub_ = nh_.advertise<geometry_msgs::PointStamped>("/mp_waypoint", 5);
pub_joy_ = nh_.advertise<sensor_msgs::Joy>("/joy", 5);
}
void WaypointTool::odomHandler(const geometry_msgs::PoseWithCovarianceStampedConstPtr& odom)
{
vehicle_z = odom->pose.pose.position.z;
}
void WaypointTool::onPoseSet(double x, double y, double theta)
{
sensor_msgs::Joy joy;
joy.axes.push_back(0);
joy.axes.push_back(0);
joy.axes.push_back(-1.0);
joy.axes.push_back(0);
joy.axes.push_back(1.0);
joy.axes.push_back(1.0);
joy.axes.push_back(0);
joy.axes.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(1);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.buttons.push_back(0);
joy.header.stamp = ros::Time::now();
joy.header.frame_id = "waypoint_tool";
pub_joy_.publish(joy);
geometry_msgs::PointStamped waypoint_odom;
waypoint_odom.header.frame_id = "odom";
waypoint_odom.header.stamp = joy.header.stamp;
waypoint_odom.point.x = x;
waypoint_odom.point.y = y;
waypoint_odom.point.z = vehicle_z;
// Create a TransformListener object to receive transforms
tf2_ros::Buffer tf_buffer;
tf2_ros::TransformListener tf_listener(tf_buffer);
// Wait for the transform to become available
try {
geometry_msgs::TransformStamped transform = tf_buffer.lookupTransform("map", "base", ros::Time(0));
geometry_msgs::PointStamped waypoint_map;
tf2::doTransform(waypoint_odom, waypoint_map, transform);
waypoint_map.header.frame_id = "map";
waypoint_map.header.stamp = ros::Time::now();
// Print out the transformed point coordinates
ROS_INFO("Point in map frame: (%.2f, %.2f, %.2f)",
waypoint_map.point.x, waypoint_map.point.y, waypoint_map.point.z);
pub_.publish(waypoint_map);
// usleep(10000);
// pub_.publish(waypoint);
} catch (tf2::TransformException &ex) {
ROS_WARN("%s", ex.what());
pub_.publish(waypoint_odom);
}
}
}
#include <pluginlib/class_list_macros.hpp>
PLUGINLIB_EXPORT_CLASS(rviz::WaypointTool, rviz::Tool)
| 2,985 | C++ | 28.564356 | 115 | 0.679062 |
leggedrobotics/viplanner/ros/waypoint_rviz_plugin/include/waypoint_tool.h | #ifndef WAYPOINT_RVIZ_PLUGIN_WAYPOINT_TOOL_H
#define WAYPOINT_RVIZ_PLUGIN_WAYPOINT_TOOL_H
#include <sstream>
#include <ros/ros.h>
#include <QObject>
#include <sensor_msgs/Joy.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PointStamped.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <tf2/transform_datatypes.h>
#include <tf2_ros/transform_listener.h>
#include "rviz/display_context.h"
#include "rviz/properties/string_property.h"
#include "rviz/default_plugin/tools/pose_tool.h"
namespace rviz
{
class StringProperty;
class WaypointTool : public PoseTool
{
Q_OBJECT
public:
WaypointTool();
virtual ~WaypointTool()
{
}
virtual void onInitialize();
protected:
virtual void odomHandler(const geometry_msgs::PoseWithCovarianceStampedConstPtr& odom);
virtual void onPoseSet(double x, double y, double theta);
private Q_SLOTS:
void updateTopic();
private:
float vehicle_z;
ros::NodeHandle nh_;
ros::Subscriber sub_;
ros::Publisher pub_;
ros::Publisher pub_joy_;
StringProperty* topic_property_;
};
}
#endif // WAYPOINT_RVIZ_PLUGIN_WAYPOINT_TOOL_H
| 1,112 | C | 19.611111 | 89 | 0.748201 |
leggedrobotics/viplanner/ros/planner/src/m2f_inference.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import numpy as np
# ROS
import rospy
from mmdet.apis import inference_detector, init_detector
from mmdet.evaluation import INSTANCE_OFFSET
# viplanner-ros
from viplanner.config.coco_sem_meta import get_class_for_id_mmdet
from viplanner.config.viplanner_sem_meta import VIPlannerSemMetaHandler
class Mask2FormerInference:
"""Run Inference on Mask2Former model to estimate semantic segmentation"""
debug: bool = False
def __init__(
self,
config_file="configs/coco/panoptic-segmentation/maskformer2_R50_bs16_50ep.yaml",
checkpoint_file="model_final.pth",
) -> None:
# Build the model from a config file and a checkpoint file
self.model = init_detector(config_file, checkpoint_file, device="cuda:0")
# mapping from coco class id to viplanner class id and color
viplanner_meta = VIPlannerSemMetaHandler()
coco_viplanner_cls_mapping = get_class_for_id_mmdet(self.model.dataset_meta["classes"])
self.viplanner_sem_class_color_map = viplanner_meta.class_color
self.coco_viplanner_color_mapping = {}
for coco_id, viplanner_cls_name in coco_viplanner_cls_mapping.items():
self.coco_viplanner_color_mapping[coco_id] = viplanner_meta.class_color[viplanner_cls_name]
return
def predict(self, image: np.ndarray) -> np.ndarray:
"""Predict semantic segmentation from image
Args:
image (np.ndarray): image to be processed in BGR format
"""
result = inference_detector(self.model, image)
result = result.pred_panoptic_seg.sem_seg.detach().cpu().numpy()[0]
# create output
panoptic_mask = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)
for curr_sem_class in np.unique(result):
curr_label = curr_sem_class % INSTANCE_OFFSET
try:
panoptic_mask[result == curr_sem_class] = self.coco_viplanner_color_mapping[curr_label]
except KeyError:
if curr_sem_class != len(self.model.dataset_meta["classes"]):
rospy.logwarn(f"Category {curr_label} not found in" " coco_viplanner_cls_mapping.")
panoptic_mask[result == curr_sem_class] = self.viplanner_sem_class_color_map["static"]
if self.debug:
import matplotlib.pyplot as plt
plt.imshow(panoptic_mask)
plt.show()
return panoptic_mask
# EoF
| 2,610 | Python | 34.767123 | 103 | 0.65977 |
leggedrobotics/viplanner/ros/planner/src/viplanner_node.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
import sys
import time
import warnings
from typing import Optional, Tuple
import cv2
import cv_bridge
import numpy as np
import PIL
# ROS
import ros_numpy
import rospkg
import rospy
import scipy.spatial.transform as stf
import tf2_geometry_msgs
import tf2_ros
import torch
from geometry_msgs.msg import Point, PointStamped, PoseStamped
from nav_msgs.msg import Path
from sensor_msgs.msg import CameraInfo, CompressedImage, Image, Joy
from std_msgs.msg import Float32, Header, Int16
from visualization_msgs.msg import Marker
# init ros node
rospack = rospkg.RosPack()
pack_path = rospack.get_path("viplanner_node")
sys.path.append(pack_path)
# VIPlanner
from src.m2f_inference import Mask2FormerInference
from src.vip_inference import VIPlannerInference
from utils.rosutil import ROSArgparse
warnings.filterwarnings("ignore")
# conversion matrix from ROS camera convention (z-forward, y-down, x-right)
# to robotics convention (x-forward, y-left, z-up)
ROS_TO_ROBOTICS_MAT = stf.Rotation.from_euler("XYZ", [-90, 0, -90], degrees=True).as_matrix()
CAMERA_FLIP_MAT = stf.Rotation.from_euler("XYZ", [180, 0, 0], degrees=True).as_matrix()
class VIPlannerNode:
"""VIPlanner ROS Node Class"""
debug: bool = False
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
# init planner algo class
self.vip_algo = VIPlannerInference(self.cfg)
if self.vip_algo.train_cfg.sem:
# init semantic network
self.m2f_inference = Mask2FormerInference(
config_file=args.m2f_config_path,
checkpoint_file=args.m2f_model_path,
)
self.m2f_timer_data = Float32()
self.m2f_timer_pub = rospy.Publisher(self.cfg.m2f_timer_topic, Float32, queue_size=10)
# init transforms
self.tf_buffer = tf2_ros.Buffer(rospy.Duration(100.0)) # tf buffer length
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
# init bridge
self.bridge = cv_bridge.CvBridge()
# init flags
self.is_goal_init = False
self.ready_for_planning_depth = False
self.ready_for_planning_rgb_sem = False
self.is_goal_processed = False
self.is_smartjoy = False
self.goal_cam_frame_set = False
self.init_goal_trans = True
# planner status
self.planner_status = Int16()
self.planner_status.data = 0
# fear reaction
self.fear_buffter = 0
self.is_fear_reaction = False
# process time
self.vip_timer_data = Float32()
self.vip_timer_pub = rospy.Publisher("/viplanner/timer", Float32, queue_size=10)
# depth and rgb image message
self.depth_header: Header = Header()
self.rgb_header: Header = Header()
self.depth_img: np.ndarray = None
self.depth_pose: np.ndarray = None
self.sem_rgb_img: np.ndarray = None
self.sem_rgb_odom: np.ndarray = None
self.pix_depth_cam_frame: np.ndarray = None
rospy.Subscriber(
self.cfg.depth_topic,
Image,
callback=self.depthCallback,
queue_size=1,
buff_size=2**24,
)
if self.vip_algo.train_cfg.sem or self.vip_algo.train_cfg.rgb:
if self.cfg.compressed:
rospy.Subscriber(
self.cfg.rgb_topic,
CompressedImage,
callback=self.imageCallbackCompressed,
queue_size=1,
buff_size=2**24,
)
else:
rospy.Subscriber(
self.cfg.rgb_topic,
Image,
callback=self.imageCallback,
queue_size=1,
buff_size=2**24,
)
else:
self.ready_for_planning_rgb_sem = True
# subscribe to further topics
self.goal_world_frame: PointStamped = None
self.goal_robot_frame: PointStamped = None
rospy.Subscriber(self.cfg.goal_topic, PointStamped, self.goalCallback)
rospy.Subscriber("/joy", Joy, self.joyCallback, queue_size=10)
# camera info subscribers
self.K_depth: np.ndarray = np.zeros((3, 3))
self.K_rgb: np.ndarray = np.zeros((3, 3))
self.depth_intrinsics_init: bool = False
self.rgb_intrinsics_init: bool = False
rospy.Subscriber(
self.cfg.depth_info_topic,
CameraInfo,
callback=self.depthCamInfoCallback,
)
rospy.Subscriber(
self.cfg.rgb_info_topic,
CameraInfo,
callback=self.rgbCamInfoCallback,
)
# publish effective goal pose and marker with max distance (circle)
# and direct line (line)
self.crop_goal_pub = rospy.Publisher("viplanner/visualization/crop_goal", PointStamped, queue_size=1)
self.marker_circ_pub = rospy.Publisher("viplanner/visualization/odom_circle", Marker, queue_size=1)
self.marker_line_pub = rospy.Publisher("viplanner/visualization/goal_line", Marker, queue_size=1)
self.marker_circ: Marker = None
self.marker_line: Marker = None
self.max_goal_distance: float = 10.0
self._init_markers()
# planning status topics
self.status_pub = rospy.Publisher("/viplanner/status", Int16, queue_size=10)
# path topics
self.path_pub = rospy.Publisher(self.cfg.path_topic, Path, queue_size=10)
self.path_viz_pub = rospy.Publisher(self.cfg.path_topic + "_viz", Path, queue_size=10)
self.fear_path_pub = rospy.Publisher(self.cfg.path_topic + "_fear", Path, queue_size=10)
# viz semantic image
self.m2f_pub = rospy.Publisher("/viplanner/sem_image/compressed", CompressedImage, queue_size=3)
rospy.loginfo("VIPlanner Ready.")
return
def spin(self):
r = rospy.Rate(self.cfg.main_freq)
while not rospy.is_shutdown():
if all(
(
self.ready_for_planning_rgb_sem,
self.ready_for_planning_depth,
self.is_goal_init,
self.goal_cam_frame_set,
)
):
# copy current data
cur_depth_image = self.depth_img.copy()
cur_depth_pose = self.depth_pose.copy()
if self.vip_algo.train_cfg.sem or self.vip_algo.train_cfg.rgb:
cur_rgb_pose = self.sem_rgb_odom.copy()
cur_rgb_image = self.sem_rgb_img.copy()
# warp rgb image
if False:
start = time.time()
if self.pix_depth_cam_frame is None:
self.initPixArray(cur_depth_image.shape)
(
cur_rgb_image,
overlap_ratio,
depth_zero_ratio,
) = self.imageWarp(
cur_rgb_image,
cur_depth_image,
cur_rgb_pose,
cur_depth_pose,
)
time_warp = time.time() - start
if overlap_ratio < self.cfg.overlap_ratio_thres:
rospy.logwarn_throttle(
2.0,
(
"Waiting for new semantic image since"
f" overlap ratio is {overlap_ratio} <"
f" {self.cfg.overlap_ratio_thres}, with"
f" depth zero ratio {depth_zero_ratio}"
),
)
self.pubPath(np.zeros((51, 3)), self.is_goal_init)
continue
if depth_zero_ratio > self.cfg.depth_zero_ratio_thres:
rospy.logwarn_throttle(
2.0,
(
"Waiting for new depth image since depth"
f" zero ratio is {depth_zero_ratio} >"
f" {self.cfg.depth_zero_ratio_thres}, with"
f" overlap ratio {overlap_ratio}"
),
)
self.pubPath(np.zeros((51, 3)), self.is_goal_init)
continue
else:
time_warp = 0.0
else:
time_warp = 0.0
self.time_sem = 0.0
# project goal
goal_cam_frame = self.goalProjection(cur_depth_pose=cur_depth_pose)
# Network Planning
start = time.time()
if self.vip_algo.train_cfg.sem or self.vip_algo.train_cfg.rgb:
waypoints, fear = self.vip_algo.plan(cur_depth_image, cur_rgb_image, goal_cam_frame)
else:
waypoints, fear = self.vip_algo.plan_depth(cur_depth_image, goal_cam_frame)
time_planner = time.time() - start
start = time.time()
# transform waypoint to robot frame (prev in depth cam frame
# with robotics convention)
waypoints = (self.cam_rot @ waypoints.T).T + self.cam_offset
# publish time
self.vip_timer_data.data = time_planner * 1000
self.vip_timer_pub.publish(self.vip_timer_data)
# check goal less than converage range
if (
(np.sqrt(goal_cam_frame[0][0] ** 2 + goal_cam_frame[0][1] ** 2) < self.cfg.conv_dist)
and self.is_goal_processed
and (not self.is_smartjoy)
):
self.ready_for_planning = False
self.is_goal_init = False
# planner status -> Success
if self.planner_status.data == 0:
self.planner_status.data = 1
self.status_pub.publish(self.planner_status)
rospy.loginfo("Goal Arrived")
# check for path with high risk (=fear) path
if fear > 0.7:
self.is_fear_reaction = True
is_track_ahead = self.isForwardTraking(waypoints)
self.fearPathDetection(fear, is_track_ahead)
if self.is_fear_reaction:
rospy.logwarn_throttle(2.0, "current path prediction is invalid.")
# planner status -> Fails
if self.planner_status.data == 0:
self.planner_status.data = -1
self.status_pub.publish(self.planner_status)
# publish path
self.pubPath(waypoints, self.is_goal_init)
time_other = time.time() - start
if self.vip_algo.train_cfg.pre_train_sem:
total_time = round(time_warp + self.time_sem + time_planner + time_other, 4)
print(
"Path predicted in"
f" {total_time}s"
f" \t warp: {round(time_warp, 4)}s \t sem:"
f" {round(self.time_sem, 4)}s \t planner:"
f" {round(time_planner, 4)}s \t other:"
f" {round(time_other, 4)}s"
)
self.time_sem = 0
else:
print(
"Path predicted in"
f" {round(time_warp + time_planner + time_other, 4)}s"
f" \t warp: {round(time_warp, 4)}s \t planner:"
f" {round(time_planner, 4)}s \t other:"
f" {round(time_other, 4)}s"
)
r.sleep()
rospy.spin()
"""GOAL PROJECTION"""
def goalProjection(self, cur_depth_pose: np.ndarray):
cur_goal_robot_frame = np.array(
[
self.goal_robot_frame.point.x,
self.goal_robot_frame.point.y,
self.goal_robot_frame.point.z,
]
)
cur_goal_world_frame = np.array(
[
self.goal_world_frame.point.x,
self.goal_world_frame.point.y,
self.goal_world_frame.point.z,
]
)
if np.linalg.norm(cur_goal_robot_frame[:2]) > self.max_goal_distance:
# crop goal position
cur_goal_robot_frame[:2] = (
cur_goal_robot_frame[:2] / np.linalg.norm(cur_goal_robot_frame[:2]) * (self.max_goal_distance / 2)
)
crop_goal = PointStamped()
crop_goal.header.stamp = self.depth_header.stamp
crop_goal.header.frame_id = self.cfg.robot_id
crop_goal.point.x = cur_goal_robot_frame[0]
crop_goal.point.y = cur_goal_robot_frame[1]
crop_goal.point.z = cur_goal_robot_frame[2]
self.crop_goal_pub.publish(crop_goal)
# update markers
self.marker_circ.color.a = 0.1
self.marker_circ.pose.position = Point(cur_depth_pose[0], cur_depth_pose[1], cur_depth_pose[2])
self.marker_circ_pub.publish(self.marker_circ)
self.marker_line.points = []
self.marker_line.points.append(
Point(cur_depth_pose[0], cur_depth_pose[1], cur_depth_pose[2])
) # world frame
self.marker_line.points.append(
Point(
cur_goal_world_frame[0],
cur_goal_world_frame[1],
cur_goal_world_frame[2],
)
) # world frame
self.marker_line_pub.publish(self.marker_line)
else:
self.marker_circ.color.a = 0
self.marker_circ_pub.publish(self.marker_circ)
self.marker_line.points = []
self.marker_line_pub.publish(self.marker_line)
goal_cam_frame = self.cam_rot.T @ (cur_goal_robot_frame - self.cam_offset).T
return torch.tensor(goal_cam_frame, dtype=torch.float32)[None, ...]
def _init_markers(self):
if isinstance(self.vip_algo.train_cfg.data_cfg, list):
self.max_goal_distance = self.vip_algo.train_cfg.data_cfg[0].max_goal_distance
else:
self.max_goal_distance = self.vip_algo.train_cfg.data_cfg.max_goal_distance
# setup circle marker
self.marker_circ = Marker()
self.marker_circ.header.frame_id = self.cfg.world_id
self.marker_circ.type = Marker.SPHERE
self.marker_circ.action = Marker.ADD
self.marker_circ.scale.x = self.max_goal_distance # only half of the distance
self.marker_circ.scale.y = self.max_goal_distance # only half of the distance
self.marker_circ.scale.z = 0.01
self.marker_circ.color.a = 0.1
self.marker_circ.color.r = 0.0
self.marker_circ.color.g = 0.0
self.marker_circ.color.b = 1.0
self.marker_circ.pose.orientation.w = 1.0
# setip line marker
self.marker_line = Marker()
self.marker_line.header.frame_id = self.cfg.world_id
self.marker_line.type = Marker.LINE_STRIP
self.marker_line.action = Marker.ADD
self.marker_line.scale.x = 0.1
self.marker_line.color.a = 1.0
self.marker_line.color.r = 0.0
self.marker_line.color.g = 0.0
self.marker_line.color.b = 1.0
self.marker_line.pose.orientation.w = 1.0
return
"""RGB/ SEM IMAGE WARP"""
def initPixArray(self, img_shape: tuple):
# get image plane mesh grid
pix_u = np.arange(0, img_shape[1])
pix_v = np.arange(0, img_shape[0])
grid = np.meshgrid(pix_u, pix_v)
pixels = np.vstack(list(map(np.ravel, grid))).T
pixels = np.hstack([pixels, np.ones((len(pixels), 1))]) # add ones for 3D coordinates
# transform to camera frame
k_inv = np.linalg.inv(self.K_depth)
pix_cam_frame = np.matmul(k_inv, pixels.T) # pixels in ROS camera convention (z forward, x right, y down)
# reorder to be in "robotics" axis order (x forward, y left, z up)
self.pix_depth_cam_frame = pix_cam_frame[[2, 0, 1], :].T * np.array([1, -1, -1])
return
def imageWarp(
self,
rgb_img: np.ndarray,
depth_img: np.ndarray,
pose_rgb: np.ndarray,
pose_depth: np.ndarray,
) -> np.ndarray:
# get 3D points of depth image
depth_rot = (
stf.Rotation.from_quat(pose_depth[3:]).as_matrix() @ ROS_TO_ROBOTICS_MAT
) # convert orientation from ROS camera to robotics=world frame
if not self.cfg.image_flip:
# rotation is included in ROS_TO_ROBOTICS_MAT and has to be
# removed when not fliped
depth_rot = depth_rot @ CAMERA_FLIP_MAT
dep_im_reshaped = depth_img.reshape(-1, 1)
depth_zero_ratio = np.sum(np.round(dep_im_reshaped, 5) == 0) / len(dep_im_reshaped)
points = dep_im_reshaped * (depth_rot @ self.pix_depth_cam_frame.T).T + pose_depth[:3]
# transform points to semantic camera frame
points_sem_cam_frame = (
(stf.Rotation.from_quat(pose_rgb[3:]).as_matrix() @ ROS_TO_ROBOTICS_MAT @ CAMERA_FLIP_MAT).T
@ (points - pose_rgb[:3]).T
).T
# normalize points
points_sem_cam_frame_norm = points_sem_cam_frame / points_sem_cam_frame[:, 0][:, np.newaxis]
# reorder points be camera convention (z-forward)
points_sem_cam_frame_norm = points_sem_cam_frame_norm[:, [1, 2, 0]] * np.array([-1, -1, 1])
# transform points to pixel coordinates
pixels = (self.K_rgb @ points_sem_cam_frame_norm.T).T
# filter points outside of image
filter_idx = (
(pixels[:, 0] >= 0)
& (pixels[:, 0] < rgb_img.shape[1])
& (pixels[:, 1] >= 0)
& (pixels[:, 1] < rgb_img.shape[0])
)
# get semantic annotation
rgb_pixels = np.zeros((pixels.shape[0], 3))
rgb_pixels[filter_idx] = rgb_img[
pixels[filter_idx, 1].astype(int) - 1,
pixels[filter_idx, 0].astype(int) - 1,
]
rgb_warped = rgb_pixels.reshape(depth_img.shape[0], depth_img.shape[1], 3)
# overlap ratio
overlap_ratio = np.sum(filter_idx) / pixels.shape[0]
# DEBUG
if self.debug:
print(
"depth_rot",
stf.Rotation.from_matrix(depth_rot).as_euler("xyz", degrees=True),
)
rgb_rot = stf.Rotation.from_quat(pose_rgb[3:]).as_matrix() @ ROS_TO_ROBOTICS_MAT @ CAMERA_FLIP_MAT
print(
"rgb_rot",
stf.Rotation.from_matrix(rgb_rot).as_euler("xyz", degrees=True),
)
import matplotlib.pyplot as plt
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.imshow(depth_img)
ax2.imshow(rgb_img)
ax3.imshow(rgb_warped / 255)
ax4.imshow(depth_img)
ax4.imshow(rgb_warped / 255, alpha=0.5)
plt.savefig(os.path.join(os.getcwd(), "depth_sem_warp.png"))
# plt.show()
plt.close()
# reshape to image
return rgb_warped, overlap_ratio, depth_zero_ratio
"""PATH PUB, GOAL and ODOM SUB and FEAR DETECTION"""
def pubPath(self, waypoints, is_goal_init=True):
# create path
poses = []
if is_goal_init:
for p in waypoints:
# gte individual pose in depth frame
pose = PoseStamped()
pose.pose.position.x = p[0]
pose.pose.position.y = p[1]
poses.append(pose)
# Wait for the transform from base frame to odom frame
trans = None
while trans is None:
try:
trans = self.tf_buffer.lookup_transform(
self.cfg.world_id,
self.cfg.robot_id,
self.depth_header.stamp,
rospy.Duration(1.0),
)
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
rospy.logerr(f"Failed to lookup transform from {self.cfg.robot_id} to" f" {self.cfg.world_id}")
continue
# Transform each pose from base to odom frame
transformed_poses = []
transformed_poses_np = np.zeros(waypoints.shape)
for idx, pose in enumerate(poses):
transformed_pose = tf2_geometry_msgs.do_transform_pose(pose, trans)
transformed_poses.append(transformed_pose)
transformed_poses_np[idx] = np.array(
[
transformed_pose.pose.position.x,
transformed_pose.pose.position.y,
transformed_pose.pose.position.z,
]
)
success, curr_depth_cam_odom_pose = self.poseCallback(self.depth_header.frame_id, rospy.Time(0))
# remove all waypoints already passed
front_poses = np.linalg.norm(transformed_poses_np - transformed_poses_np[0], axis=1) > np.linalg.norm(
curr_depth_cam_odom_pose[:3] - transformed_poses_np[0]
)
poses = [pose for idx, pose in enumerate(poses) if front_poses[idx]]
transformed_poses = [pose for idx, pose in enumerate(transformed_poses) if front_poses[idx]]
# add header
base_path = Path()
base_fear_path = Path()
odom_path = Path()
# assign header
base_path.header.frame_id = base_fear_path.header.frame_id = self.cfg.robot_id
odom_path.header.frame_id = self.cfg.world_id
base_path.header.stamp = base_fear_path.header.stamp = odom_path.header.stamp = self.depth_header.stamp
# assign poses
if self.is_fear_reaction:
base_fear_path.poses = poses
base_path.poses = poses[:1]
else:
base_path.poses = poses
odom_path.poses = transformed_poses
# publish path
self.fear_path_pub.publish(base_fear_path)
self.path_pub.publish(base_path)
self.path_viz_pub.publish(odom_path)
return
def fearPathDetection(self, fear, is_forward):
if fear > 0.5 and is_forward:
if not self.is_fear_reaction:
self.fear_buffter = self.fear_buffter + 1
elif self.is_fear_reaction:
self.fear_buffter = self.fear_buffter - 1
if self.fear_buffter > self.cfg.buffer_size:
self.is_fear_reaction = True
elif self.fear_buffter <= 0:
self.is_fear_reaction = False
return
def isForwardTraking(self, waypoints):
xhead = np.array([1.0, 0])
phead = None
for p in waypoints:
if np.linalg.norm(p[0:2]) > self.cfg.track_dist:
phead = p[0:2] / np.linalg.norm(p[0:2])
break
if np.all(phead is not None) and phead.dot(xhead) > 1.0 - self.cfg.angular_thread:
return True
return False
def joyCallback(self, joy_msg):
if joy_msg.buttons[4] > 0.9:
rospy.loginfo("Switch to Smart Joystick mode ...")
self.is_smartjoy = True
# reset fear reaction
self.fear_buffter = 0
self.is_fear_reaction = False
if self.is_smartjoy:
if np.sqrt(joy_msg.axes[3] ** 2 + joy_msg.axes[4] ** 2) < 1e-3:
# reset fear reaction
self.fear_buffter = 0
self.is_fear_reaction = False
self.ready_for_planning = False
self.is_goal_init = False
else:
joy_goal = PointStamped()
joy_goal.header.frame_id = self.cfg.robot_id
joy_goal.point.x = joy_msg.axes[4] * self.cfg.joyGoal_scale
joy_goal.point.y = joy_msg.axes[3] * self.cfg.joyGoal_scale
joy_goal.point.z = 0.0
joy_goal.header.stamp = rospy.Time.now()
self.goal_pose = joy_goal
self.is_goal_init = True
self.is_goal_processed = False
return
def goalCallback(self, msg):
rospy.loginfo("Received a new goal")
self.goal_pose = msg
self.is_smartjoy = False
self.is_goal_init = True
self.is_goal_processed = False
# reset fear reaction
self.fear_buffter = 0
self.is_fear_reaction = False
# reste planner status
self.planner_status.data = 0
return
"""RGB IMAGE AND DEPTH CALLBACKS"""
def poseCallback(self, frame_id: str, img_stamp, target_frame_id: Optional[str] = None) -> Tuple[bool, np.ndarray]:
target_frame_id = target_frame_id if target_frame_id else self.cfg.world_id
try:
if self.cfg.mount_cam_frame is None:
# Wait for the transform to become available
transform = self.tf_buffer.lookup_transform(target_frame_id, frame_id, img_stamp, rospy.Duration(4.0))
else:
frame_id = self.cfg.mount_cam_frame
transform = self.tf_buffer.lookup_transform(
target_frame_id,
self.cfg.mount_cam_frame,
img_stamp,
rospy.Duration(4.0),
)
# Extract the translation and rotation from the transform
translation = transform.transform.translation
rotation = transform.transform.rotation
pose = np.array(
[
translation.x,
translation.y,
translation.z,
rotation.x,
rotation.y,
rotation.z,
rotation.w,
]
)
return True, pose
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
rospy.logerr(f"Pose Fail to transfer {frame_id} into" f" {target_frame_id} frame.")
return False, np.zeros(7)
def imageCallback(self, rgb_msg: Image):
rospy.logdebug("Received rgb image %s: %d" % (rgb_msg.header.frame_id, rgb_msg.header.seq))
# image pose
success, pose = self.poseCallback(rgb_msg.header.frame_id, rgb_msg.header.stamp)
if not success:
return
# RGB image
try:
image = self.bridge.imgmsg_to_cv2(rgb_msg, "bgr8")
except cv_bridge.CvBridgeError as e:
print(e)
if self.vip_algo.train_cfg.sem:
image = self.semPrediction(image)
self.sem_rgb_odom = pose
self.sem_rgb_img = image
return
def imageCallbackCompressed(self, rgb_msg: CompressedImage):
rospy.logdebug(f"Received rgb image {rgb_msg.header.frame_id}:" f" {rgb_msg.header.stamp.to_sec()}")
self.rgb_header = rgb_msg.header
# image pose
success, pose = self.poseCallback(rgb_msg.header.frame_id, rgb_msg.header.stamp)
if not success:
return
# RGB Image
try:
rgb_arr = np.frombuffer(rgb_msg.data, np.uint8)
image = cv2.imdecode(rgb_arr, cv2.IMREAD_COLOR)
except cv_bridge.CvBridgeError as e:
print(e)
if self.vip_algo.train_cfg.sem:
image = self.semPrediction(image)
self.sem_rgb_img = image
self.sem_rgb_odom = pose
self.sem_rgb_new = True
self.ready_for_planning_rgb_sem = True
# publish the image
if self.vip_algo.train_cfg.sem:
image = cv2.resize(image, (480, 360))
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Convert the image to JPEG format
success, compressed_image = cv2.imencode(".jpg", image)
if not success:
rospy.logerr("Failed to compress semantic image")
return
# create compressed image and publish it
sem_msg = CompressedImage()
sem_msg.header = rgb_msg.header
sem_msg.format = "jpeg"
sem_msg.data = np.array(compressed_image).tostring()
self.m2f_pub.publish(sem_msg)
return
def semPrediction(self, image):
# semantic estimation with image in BGR format
start = time.time()
image = self.m2f_inference.predict(image)
self.time_sem = time.time() - start
# publish prediction time
self.m2f_timer_data.data = self.time_sem * 1000
self.m2f_timer_pub.publish(self.m2f_timer_data)
return image
def depthCallback(self, depth_msg: Image):
rospy.logdebug(f"Received depth image {depth_msg.header.frame_id}:" f" {depth_msg.header.stamp.to_sec()}")
# image time and pose
self.depth_header = depth_msg.header
success, self.depth_pose = self.poseCallback(depth_msg.header.frame_id, depth_msg.header.stamp)
if not success:
return
# DEPTH Image
image = ros_numpy.numpify(depth_msg)
image[~np.isfinite(image)] = 0
if self.cfg.depth_uint_type:
image = image / 1000.0
image[image > self.cfg.max_depth] = 0.0
if self.cfg.image_flip:
image = PIL.Image.fromarray(image)
self.depth_img = np.array(image.transpose(PIL.Image.Transpose.ROTATE_180))
else:
self.depth_img = image
# transform goal into robot frame and world frame
if self.is_goal_init:
if self.goal_pose.header.frame_id != self.cfg.robot_id:
try:
trans = self.tf_buffer.lookup_transform(
self.cfg.robot_id,
self.goal_pose.header.frame_id,
self.depth_header.stamp,
rospy.Duration(1.0),
)
self.goal_robot_frame = tf2_geometry_msgs.do_transform_point(self.goal_pose, trans)
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
rospy.logerr(
"Goal: Fail to transfer" f" {self.goal_pose.header.frame_id} into" f" {self.cfg.robot_id}"
)
return
else:
self.goal_robot_frame = self.goal_pose
if self.goal_pose.header.frame_id != self.cfg.world_id:
try:
trans = self.tf_buffer.lookup_transform(
self.cfg.world_id,
self.goal_pose.header.frame_id,
self.depth_header.stamp,
rospy.Duration(1.0),
)
self.goal_world_frame = tf2_geometry_msgs.do_transform_point(self.goal_pose, trans)
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
rospy.logerr(
"Goal: Fail to transfer" f" {self.goal_pose.header.frame_id} into" f" {self.cfg.world_id}"
)
return
else:
self.goal_world_frame = self.goal_pose
# get static transform and cam offset
if self.init_goal_trans:
if self.cfg.mount_cam_frame is None:
# get transform from robot frame to depth camera frame
success, tf_robot_depth = self.poseCallback(
self.depth_header.frame_id,
self.depth_header.stamp,
self.cfg.robot_id,
)
else:
success, tf_robot_depth = self.poseCallback(
self.cfg.mount_cam_frame,
self.depth_header.stamp,
self.cfg.robot_id,
)
if not success:
return
self.cam_offset = tf_robot_depth[0:3]
self.cam_rot = stf.Rotation.from_quat(tf_robot_depth[3:7]).as_matrix() @ ROS_TO_ROBOTICS_MAT
if not self.cfg.image_flip:
# rotation is included in ROS_TO_ROBOTICS_MAT and has to
# be removed when not fliped
self.cam_rot = self.cam_rot @ CAMERA_FLIP_MAT
if self.debug:
print(
"CAM ROT",
stf.Rotation.from_matrix(self.cam_rot).as_euler("xyz", degrees=True),
)
self.init_goal_trans = False
self.goal_cam_frame_set = True
# declare ready for planning
self.ready_for_planning_depth = True
self.is_goal_processed = True
return
""" Camera Info Callbacks"""
def depthCamInfoCallback(self, cam_info_msg: CameraInfo):
if not self.depth_intrinsics_init:
rospy.loginfo("Received depth camera info")
self.K_depth = cam_info_msg.K
self.K_depth = np.array(self.K_depth).reshape(3, 3)
self.depth_intrinsics_init = True
return
def rgbCamInfoCallback(self, cam_info_msg: CameraInfo):
if not self.rgb_intrinsics_init:
rospy.loginfo("Received rgb camera info")
self.K_rgb = cam_info_msg.K
self.K_rgb = np.array(self.K_rgb).reshape(3, 3)
self.rgb_intrinsics_init = True
return
if __name__ == "__main__":
node_name = "viplanner_node"
rospy.init_node(node_name, anonymous=False)
parser = ROSArgparse(relative=node_name)
# planning
parser.add_argument("main_freq", type=int, default=5, help="frequency of path planner")
parser.add_argument("image_flip", type=bool, default=True, help="is the image fliped")
parser.add_argument("conv_dist", type=float, default=0.5, help="converge range to the goal")
parser.add_argument(
"max_depth",
type=float,
default=10.0,
help="max depth distance in image",
)
parser.add_argument(
"overlap_ratio_thres",
type=float,
default=0.7,
help="overlap threshold betweens sem/rgb and depth image",
)
parser.add_argument(
"depth_zero_ratio_thres",
type=float,
default=0.7,
help="ratio of depth image that is non-zero",
)
# networks
parser.add_argument(
"model_save",
type=str,
default="models/vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDepSem_costSem_optimSGD",
help=("model directory (within should be a file called model.pt and" " model.yaml)"),
)
parser.add_argument(
"m2f_cfg_file",
type=str,
default=("models/coco_panoptic/swin/maskformer2_swin_tiny_bs16_50ep.yaml"),
help=("config file for m2f model (or pre-trained backbone for direct RGB" " input)"),
)
parser.add_argument(
"m2f_model_path",
type=str,
default="models/coco_panoptic/swin/model_final_9fd0ae.pkl",
help=("read model for m2f model (or pre-trained backbone for direct RGB" " input)"),
)
# ROS topics
parser.add_argument(
"depth_topic",
type=str,
default="/rgbd_camera/depth/image",
help="depth image ros topic",
)
parser.add_argument(
"depth_info_topic",
type=str,
default="/depth_camera_front_upper/depth/camera_info",
help="depth image info topic (get intrinsic matrix)",
)
parser.add_argument(
"rgb_topic",
type=str,
default="/wide_angle_camera_front/image_raw/compressed",
help="rgb camera topic",
)
parser.add_argument(
"rgb_info_topic",
type=str,
default="/wide_angle_camera_front/camera_info",
help="rgb camera info topic (get intrinsic matrix)",
)
parser.add_argument(
"goal_topic",
type=str,
default="/mp_waypoint",
help="goal waypoint ros topic",
)
parser.add_argument(
"path_topic",
type=str,
default="/viplanner/path",
help="VIP Path topic",
)
parser.add_argument(
"m2f_timer_topic",
type=str,
default="/viplanner/m2f_timer",
help="Time needed for semantic segmentation",
)
parser.add_argument(
"depth_uint_type",
type=bool,
default=False,
help="image in uint type or not",
)
parser.add_argument(
"compressed",
type=bool,
default=True,
help="If compressed rgb topic is used",
)
parser.add_argument(
"mount_cam_frame",
type=str,
default=None,
help="When cam is mounted, which frame to take for pose compute",
)
# frame_ids
parser.add_argument("robot_id", type=str, default="base", help="robot TF frame id")
parser.add_argument("world_id", type=str, default="odom", help="world TF frame id")
# fear reaction
parser.add_argument(
"is_fear_act",
type=bool,
default=True,
help="is open fear action or not",
)
parser.add_argument(
"buffer_size",
type=int,
default=10,
help="buffer size for fear reaction",
)
parser.add_argument(
"angular_thread",
type=float,
default=0.3,
help="angular thread for turning",
)
parser.add_argument(
"track_dist",
type=float,
default=0.5,
help="look ahead distance for path tracking",
)
# smart joystick
parser.add_argument(
"joyGoal_scale",
type=float,
default=0.5,
help="distance for joystick goal",
)
args = parser.parse_args()
# model save path
args.model_save = os.path.join(pack_path, args.model_save)
args.m2f_config_path = os.path.join(pack_path, args.m2f_cfg_file)
args.m2f_model_path = os.path.join(pack_path, args.m2f_model_path)
node = VIPlannerNode(args)
node.spin()
| 39,105 | Python | 36.674374 | 119 | 0.536428 |
leggedrobotics/viplanner/ros/planner/src/vip_inference.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import numpy as np
import torch
import torchvision.transforms as transforms
from viplanner.config.learning_cfg import TrainCfg
from viplanner.plannernet import AutoEncoder, DualAutoEncoder, get_m2f_cfg
from viplanner.traj_cost_opt.traj_opt import TrajOpt
torch.set_default_dtype(torch.float32)
class VIPlannerInference:
def __init__(
self,
cfg,
) -> None:
"""VIPlanner Inference Script
Args:
cfg (Namespace): Config Namespace
"""
# get configs
model_path = os.path.join(cfg.model_save, "model.pt")
config_path = os.path.join(cfg.model_save, "model.yaml")
# get train config
self.train_cfg: TrainCfg = TrainCfg.from_yaml(config_path)
# get model
if self.train_cfg.rgb:
m2f_cfg = get_m2f_cfg(cfg.m2f_config_path)
self.pixel_mean = m2f_cfg.MODEL.PIXEL_MEAN
self.pixel_std = m2f_cfg.MODEL.PIXEL_STD
else:
m2f_cfg = None
self.pixel_mean = [0, 0, 0]
self.pixel_std = [1, 1, 1]
if self.train_cfg.rgb or self.train_cfg.sem:
self.net = DualAutoEncoder(train_cfg=self.train_cfg, m2f_cfg=m2f_cfg)
else:
self.net = AutoEncoder(
encoder_channel=self.train_cfg.in_channel,
k=self.train_cfg.knodes,
)
try:
model_state_dict, _ = torch.load(model_path)
except ValueError:
model_state_dict = torch.load(model_path)
self.net.load_state_dict(model_state_dict)
# inference script = no grad for model
self.net.eval()
# move to GPU if available
if torch.cuda.is_available():
self.net = self.net.cuda()
self._device = "cuda"
else:
self._device = "cpu"
# transforms
self.transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize(tuple(self.train_cfg.img_input_size)),
]
)
# get trajectory generator
self.traj_generate = TrajOpt()
return
def img_converter(self, img: np.ndarray) -> torch.Tensor:
# crop image and convert to tensor
img = self.transforms(img)
return img.unsqueeze(0).to(self._device)
def plan(
self,
depth_image: np.ndarray,
sem_rgb_image: np.ndarray,
goal_robot_frame: torch.Tensor,
) -> tuple:
"""Plan to path towards the goal given depth and semantic image
Args:
depth_image (np.ndarray): Depth image from the robot
goal_robot_frame (torch.Tensor): Goal in robot frame
sem_rgb_image (np.ndarray): Semantic/ RGB Image from the robot.
Returns:
tuple: _description_
"""
with torch.no_grad():
depth_image = self.img_converter(depth_image).float()
if self.train_cfg.rgb:
sem_rgb_image = (sem_rgb_image - self.pixel_mean) / self.pixel_std
sem_rgb_image = self.img_converter(sem_rgb_image.astype(np.uint8)).float()
keypoints, fear = self.net(depth_image, sem_rgb_image, goal_robot_frame.to(self._device))
# generate trajectory
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return traj.cpu().squeeze(0).numpy(), fear.cpu().numpy()
def plan_depth(
self,
depth_image: np.ndarray,
goal_robot_frame: torch.Tensor,
) -> tuple:
with torch.no_grad():
depth_image = self.img_converter(depth_image).float()
keypoints, fear = self.net(depth_image, goal_robot_frame.to(self._device))
# generate trajectory
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return traj.cpu().squeeze(0).numpy(), fear.cpu().numpy()
# EoF
| 4,084 | Python | 29.946969 | 101 | 0.588883 |
leggedrobotics/viplanner/ros/planner/config/anymal_mount.yaml | # planning
main_freq: 10
image_flip: False
conv_dist: 0.5
max_depth: 15
overlap_ratio_thres: 0.80
depth_zero_ratio_thres: 0.6
# network model
model_save: models
m2f_model_path: models/sem_model.pth
m2f_cfg_file: /root/git/mmdetection/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
# ros topics
depth_topic: /depth_cam_mounted_front/depth/image_rect_raw
depth_info_topic: /depth_cam_mounted_front/depth/camera_info
rgb_topic: /depth_cam_mounted_front/color/image_raw/compressed
rgb_info_topic: /depth_cam_mounted_front/color/camera_info
mount_cam_frame: wide_angle_camera_rear_camera_parent # mounted camera is not part of the TF tree, specify its frame here
goal_topic: /mp_waypoint
path_topic: /viplanner/path
m2f_timer_topic: /viplanner/m2f_timer
depth_uint_type: True
compressed: True
# frame ids
robot_id: base_inverted # also adjust in path_follower.launch
world_id: odom
# fear reaction
is_fear_act: False
buffer_size: 3
angular_thread: 0.3
track_dist: 0.5
# smart joystick
joyGoal_scale: 2.5
| 1,279 | YAML | 37.787878 | 129 | 0.619234 |
leggedrobotics/viplanner/ros/planner/config/anymal_d.yaml | # planning
main_freq: 5
image_flip: True
conv_dist: 0.5
max_depth: 15
overlap_ratio_thres: 0.5
depth_zero_ratio_thres: 0.6
# network model
model_save: models/vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDep_costSem_optimSGD_depth
# mmdet
m2f_model_path: models/m2f_models/mask2former_r50_8xb2-lsj-50e_coco-panoptic_20230118_125535-54df384a.pth
m2f_cfg_file: /root/git/mmdetection/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
# ros topics
depth_topic: /depth_camera_front_upper/depth/image_rect_raw
depth_info_topic: /depth_camera_front_upper/depth/camera_info
rgb_topic: /wide_angle_camera_front/image_raw/compressed
rgb_info_topic: /wide_angle_camera_front/camera_info
goal_topic: /mp_waypoint
path_topic: /viplanner/path
m2f_timer_topic: /viplanner/m2f_timer
depth_uint_type: True
compressed: True
# frame ids
robot_id: base # also adjust in path_follower.launch
world_id: odom
# fear reaction
is_fear_act: False
buffer_size: 3
angular_thread: 0.3
track_dist: 0.5
# smart joystick
joyGoal_scale: 2.5
| 1,279 | YAML | 36.647058 | 113 | 0.630962 |
leggedrobotics/viplanner/ros/planner/config/anymal_c.yaml | # planning
main_freq: 10
image_flip: False
conv_dist: 0.5
max_depth: 15
overlap_ratio_thres: 0.80
depth_zero_ratio_thres: 0.6
# network model
model_save: models/vip_models/plannernet_env2azQ1b91cZZ_cam_mount_ep100_inputDepSem_costSem_optimSGD_new_cam_mount_combi_lossWidthMod_wgoal4.0_warehouse
# mmdet
# m2f_model_path: models/mask2former_r50_8xb2-lsj-50e_coco-panoptic_20230118_125535-54df384a.pth
m2f_model_path: models/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth
# m2f_cfg_file: ${HOME}/.local/lib/python3.8/site-packages/mmdet/.mim/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
m2f_cfg_file: /home/${USER}/.local/lib/python3.8/site-packages/mmdet/.mim/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
# ros topics
depth_topic: /depth_camera_front/depth/image_rect_raw
depth_info_topic: /depth_camera_front/depth/camera_info
rgb_topic: /depth_camera_front/color/image_raw/compressed
rgb_info_topic: /depth_camera_front/color/camera_info
goal_topic: /mp_waypoint
path_topic: /viplanner/path
m2f_timer_topic: /viplanner/m2f_timer
depth_uint_type: True
compressed: True
# frame ids
robot_id: base # also adjust in path_follower.launch
world_id: odom
# fear reaction
is_fear_act: False
buffer_size: 3
angular_thread: 0.3
track_dist: 0.5
# smart joystick
joyGoal_scale: 2.5
| 1,608 | YAML | 43.694443 | 164 | 0.654851 |
leggedrobotics/viplanner/ros/planner/utils/rosutil.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
import numpy as np
# ROS
import rospy
import torch
class ROSArgparse:
def __init__(self, relative=None):
self.relative = relative
def add_argument(self, name, default, type=None, help=None):
name = os.path.join(self.relative, name)
if rospy.has_param(name):
rospy.loginfo("Get param %s", name)
else:
rospy.logwarn("Couldn't find param: %s, Using default: %s", name, default)
value = rospy.get_param(name, default)
variable = name[name.rfind("/") + 1 :].replace("-", "_")
if isinstance(value, str):
exec(f"self.{variable}='{value}'")
else:
exec(f"self.{variable}={value}")
def parse_args(self):
return self
def msg_to_torch(data, shape=np.array([-1])):
return torch.from_numpy(data).view(shape.tolist())
def torch_to_msg(tensor):
return [tensor.view(-1).cpu().numpy(), tensor.shape]
# EoF
| 1,108 | Python | 22.595744 | 86 | 0.611011 |
leggedrobotics/viplanner/ros/visualizer/README.md | # VIPlanner Visualization Node
Error
[Open3D Error] (void open3d::visualization::gui::Application::Initialize(const char*)) ./cpp/open3d/visualization/gui/Application.cpp:256: Resource directory does not have Open3D resources: /usr/local/include/open3d/include/open3d/resources
Fix, copy resources from open3d python or directly take that directory (still don't know where they are with the normal installation - prob not compiled because of headless mode)
sudo apt install libpcl-dev
| 490 | Markdown | 48.099995 | 241 | 0.804082 |
leggedrobotics/viplanner/ros/visualizer/src/viplannerViz.cpp | /*
* Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
* Author: Pascal Roth
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <ros/ros.h>
#include <iostream>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <std_msgs/Bool.h>
#include <std_msgs/Float32.h>
#include <nav_msgs/Path.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PointStamped.h>
#include <geometry_msgs/PolygonStamped.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CompressedImage.h>
#include <sensor_msgs/CameraInfo.h>
#include <tf/transform_listener.h>
// #include <pcl_ros/transforms.h>
#include <tf/transform_datatypes.h>
#include <tf/transform_broadcaster.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <Eigen/Dense>
#include <Eigen/Core>
#include <opencv2/opencv.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <cmath>
#include <cv_bridge/cv_bridge.h>
using namespace std;
using namespace Eigen;
static const double mesh_size = 0.3;
static const int max_waypoints = 50;
Eigen::Matrix3d CAM_TO_ROBOT_FRAME = [] {
Eigen::Matrix3d m;
m << 0, 0, 1,
1, 0, 0,
0, 1, 0;
return m;
}();
Eigen::Matrix3d FLIP_MAT = [] {
Eigen::Matrix3d m;
m << 1, 0, 0,
0, -1, 0,
0, 0, -1;
return m;
}();
Eigen::Matrix3d ROT_MAT = [] {
Eigen::Matrix3d m;
m << 0, 0, 1,
1, 0, 0,
0, 1, 0;
return m;
}();
class VIPlannerViz {
public:
VIPlannerViz() : nh_("~") {
// Load parameters from the ROS parameter server
// parameter_name, variable_name, default_value
nh_.param<std::string> ("vizTopic", vizTopic_, "/viz_path_depth");
nh_.param<std::string> ("imgTopic", img_topic_, "/depth_camera_front_upper/depth/image_rect_raw");
nh_.param<std::string> ("infoTopic", info_topic_, "/depth_camera_front_upper/depth/camera_info");
nh_.param<std::string> ("pathTopic", path_topic_, "/path");
nh_.param<std::string> ("goalTopic", goal_topic_, "/mp_waypoint");
nh_.param<std::string> ("robot_frame", robot_frame_, "base");
nh_.param<std::string> ("odom_frame", odom_frame_, "odom");
nh_.param<std::string> ("domain", domain_, "depth");
nh_.param<bool> ("image_flip", image_flip_, true);
nh_.param<float> ("max_depth", max_depth_, 10.0);
// Subscribe to the image and the intrinsic matrix
if (domain_ == "rgb") {
subImage_ = nh_.subscribe<sensor_msgs::CompressedImage>(img_topic_, 1, &VIPlannerViz::imageRGBCallback, this);
} else if (domain_ == "depth") {
subImage_ = nh_.subscribe<sensor_msgs::Image>(img_topic_, 1, &VIPlannerViz::imageDepthCallback, this);
} else {
ROS_ERROR("Domain not supported!");
}
subCamInfo_ = nh_.subscribe<sensor_msgs::CameraInfo>(info_topic_, 1, &VIPlannerViz::camInfoCallback, this);
// Subscribe to the path
subPath_ = nh_.subscribe<nav_msgs::Path>(path_topic_, 1, &VIPlannerViz::pathCallback, this);
// Subscribe to the goal
subGoal_ = nh_.subscribe<geometry_msgs::PointStamped>(goal_topic_, 1, &VIPlannerViz::goalCallback, this);
// Publish the image with the path
pubImage_ = nh_.advertise<sensor_msgs::Image>(vizTopic_, 1);
}
// CALLBACKS
void imageRGBCallback(const sensor_msgs::CompressedImage::ConstPtr& rgb_msg)
{
ROS_DEBUG_STREAM("Received rgb image " << rgb_msg->header.frame_id << ": " << rgb_msg->header.stamp.toSec());
// image pose
poseCallback(rgb_msg->header.frame_id);
// RGB Image
try {
cv::Mat image = cv::imdecode(cv::Mat(rgb_msg->data), cv::IMREAD_COLOR);
// rotate image 90 degrees counter clockwise
if (!image_flip_) {
cv::rotate(image, image, cv::ROTATE_90_COUNTERCLOCKWISE);
}
current_image_time_ = rgb_msg->header.stamp;
image_ = image.clone();
image_init_ = true;
}
catch (cv::Exception& e) {
ROS_ERROR_STREAM("CvBridge Error: " << e.what());
}
}
void imageDepthCallback(const sensor_msgs::Image::ConstPtr& depth_msg)
{
ROS_DEBUG_STREAM("Received depth image " << depth_msg->header.frame_id << ": " << depth_msg->header.stamp.toSec());
// Image time and pose
poseCallback(depth_msg->header.frame_id); // Assuming that poseCallback is defined somewhere
current_image_time_ = depth_msg->header.stamp;
// Depth image
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(depth_msg, sensor_msgs::image_encodings::TYPE_16UC1);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Convert to Eigen matrix and apply operations
cv::Mat img_mat = cv_ptr->image;
cv::Mat depth_image_float;
img_mat.convertTo(depth_image_float, CV_32FC1, 1.0/1000.0);
cv::Mat mask = cv::Mat::zeros(img_mat.size(), img_mat.type());
cv::compare(depth_image_float, std::numeric_limits<double>::infinity(), mask, cv::CMP_EQ);
depth_image_float.setTo(0, mask);
if (image_flip_)
{
cv::flip(depth_image_float, depth_image_float, 0); // 0 indicates vertical flip
}
image_ = depth_image_float.clone();
image_init_ = true;
}
void poseCallback(const std::string& frame_id)
{
tf::StampedTransform transform;
try {
tf_listener.waitForTransform(odom_frame_, frame_id, ros::Time(0), ros::Duration(4.0));
tf_listener.lookupTransform(odom_frame_, frame_id, ros::Time(0), transform);
}
catch (tf::TransformException& e) {
ROS_ERROR_STREAM("Fail to transfer " << frame_id << " into " << odom_frame_ << " frame: " << e.what());
}
tf::poseTFToMsg(transform, pose_);
}
void camInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& cam_info_msg)
{
if (!intrinsics_init_)
{
ROS_INFO("Received camera info");
// Extract the intrinsic matrix from the CameraInfo message
intrinsics_.at<double>(0, 0) = cam_info_msg->K[0];
intrinsics_.at<double>(0, 1) = cam_info_msg->K[1];
intrinsics_.at<double>(0, 2) = cam_info_msg->K[2];
intrinsics_.at<double>(1, 0) = cam_info_msg->K[3];
intrinsics_.at<double>(1, 1) = cam_info_msg->K[4];
intrinsics_.at<double>(1, 2) = cam_info_msg->K[5];
intrinsics_.at<double>(2, 0) = cam_info_msg->K[6];
intrinsics_.at<double>(2, 1) = cam_info_msg->K[7];
intrinsics_.at<double>(2, 2) = cam_info_msg->K[8];
intrinsics_init_ = true;
}
}
void pathCallback(const nav_msgs::Path::ConstPtr& path_msg)
{
// Create an Eigen matrix with the same number of rows as the path
Eigen::MatrixXf path_mat_new(max_waypoints, 3);
// Copy the x, y, and z coordinates from the path message into the matrix
for (int i = 0; i < path_msg->poses.size(); i++)
{
path_mat_new(i, 0) = path_msg->poses[i].pose.position.x;
path_mat_new(i, 1) = path_msg->poses[i].pose.position.y;
path_mat_new(i, 2) = path_msg->poses[i].pose.position.z;
}
// Assign the new path to the path_ member variable
path_mat_ = path_mat_new;
path_init_ = true;
}
void goalCallback(const geometry_msgs::PointStamped::ConstPtr& goal_msg)
{
// Extract the goal point from the message
float x = goal_msg->point.x;
float y = goal_msg->point.y;
float z = goal_msg->point.z;
// Assign the goal point to the goal_ member variable
goal_ << x, y, z;
goal_init_ = true;
std::cout << "GOAL Received" << std::endl;
}
// HELPER FUNCTIONS
MatrixXf TransformPoints(Vector3f translation, Quaternionf rotation, MatrixXf points) {
// Convert the quaternion to a rotation matrix
Matrix3f rotation_matrix = rotation.toRotationMatrix();
// Multiply the translated points by the rotation matrix
points = points * rotation_matrix.transpose();
// Translate the points by the relative translation vector
points.rowwise() += translation.transpose();
return points;
}
void getOdom(Eigen::Vector3f& translation, Eigen::Quaternionf& rotation)
{
try
{
// Get the transformation from the reference frame to the target frame
tf::StampedTransform transform;
tf_listener.lookupTransform(odom_frame_, robot_frame_, ros::Time(0), transform);
// Extract the translation and rotation from the transformation
translation << transform.getOrigin().x(), transform.getOrigin().y(), transform.getOrigin().z();
rotation = Eigen::Quaternionf(transform.getRotation().getW(), transform.getRotation().getX(), transform.getRotation().getY(), transform.getRotation().getZ());
}
catch (tf::TransformException& ex)
{
ROS_ERROR("%s", ex.what());
}
}
// RUN NODE
void run() {
Eigen::Vector3f translation;
Eigen::Quaternionf rotation;
// Main loop
while (ros::ok()) {
if (path_init_ && goal_init_ && image_init_ && intrinsics_init_) {
// Get the current robot pose
getOdom(translation, rotation);
// Transform the path into world frame
MatrixXf transformed_path = TransformPoints(translation, rotation, path_mat_);
// orientate camera
cv::Mat cam_translation = (cv::Mat_<double>(3,1) << pose_.position.x, pose_.position.y, pose_.position.z);
Quaterniond rotation_matrix = Quaterniond(pose_.orientation.w, pose_.orientation.x, pose_.orientation.y, pose_.orientation.z);
Eigen::Matrix3d rotation_matrix_robotic_frame = rotation_matrix.toRotationMatrix() * CAM_TO_ROBOT_FRAME;
cv::Mat cam_rotation;
cv::eigen2cv(rotation_matrix_robotic_frame, cam_rotation);
cv::Mat rot_vector;
cv::Rodrigues(cam_rotation, rot_vector);
// Project 3D points onto image plane
std::vector<cv::Point2f> points2d;
cv::Mat path_points;
cv::eigen2cv(transformed_path, path_points);
cv::projectPoints(path_points, rot_vector, cam_translation, intrinsics_, cv::noArray(), points2d);
// Get the position of the path points in camera frame --> needed to get the radius of the sphere in the image
std::vector<cv::Point3f> points3d(path_points.rows);
for (int i = 0; i < path_points.rows; i++) {
cv::Mat p = path_points.row(i); // get i-th row of path_points
cv::Mat p_double;
p.convertTo(p_double, CV_64F); // convert to double
cv::Mat p_cam = cam_rotation * (p_double.t() - cam_translation); // transpose row vector and subtract translation
cv::Point3f p_cam_3d(p_cam.at<double>(0), p_cam.at<double>(1), p_cam.at<double>(2)); // convert to Point3f
points3d[i] = p_cam_3d;
}
// Draw points on image
cv::Mat outputImage;
if (image_.channels() == 1) {
double min_val, max_val;
cv::minMaxLoc(image_, &min_val, &max_val);
cv::Mat gray_image;
cv::normalize(image_, gray_image, 0, 255, cv::NORM_MINMAX, CV_8UC1);
cv::cvtColor(gray_image, outputImage, cv::COLOR_GRAY2BGR);
}
else {
outputImage = image_.clone();
}
cv::Mat overlay_image = outputImage.clone();
for (int i = 0; i < points2d.size(); i++) {
cv::Point3f p = points3d[i];
cv::Point2f p2d = points2d[i];
cv::Point2d center(p.x, p.y);
int radius = std::min(std::max(cvRound(5 * intrinsics_.at<double>(0,0) / p.z), 0), 5);
cv::circle(overlay_image, p2d, radius, cv::Scalar(0, 255, 0), cv::FILLED);
}
// Following line overlays transparent rectangle over the image
cv::Mat final_img;
double alpha = 0.4; // Transparency factor.
cv::addWeighted(overlay_image, alpha, outputImage, 1 - alpha, 0, final_img);
// Publish as ROS image
cv_bridge::CvImage cv_image;
cv_image.header.stamp = current_image_time_;
cv_image.encoding = sensor_msgs::image_encodings::BGR8;
cv_image.image = final_img;
pubImage_.publish(cv_image.toImageMsg());
// Show resulting image
// cv::imshow("Overlay", outputImage);
// cv::waitKey(1);
}
ros::spinOnce();
loop_rate_.sleep();
}
}
private:
// ROS
ros::NodeHandle nh_;
ros::Subscriber subImage_;
ros::Subscriber subCamInfo_;
ros::Subscriber subGoal_;
ros::Subscriber subPath_;
ros::Publisher pubImage_;
ros::Rate loop_rate_{10};
ros::Time current_image_time_;
tf::TransformListener tf_listener;
// parameters
std::string vizTopic_;
std::string img_topic_;
std::string info_topic_;
std::string path_topic_;
std::string goal_topic_;
std::string robot_frame_;
std::string odom_frame_;
std::string domain_;
float max_depth_;
bool image_flip_;
// Flags
bool intrinsics_init_ = false;
bool image_init_ = false;
bool path_init_ = false;
bool goal_init_ = false;
bool renderer_init_ = false;
// variables
cv::Mat image_;
Eigen::Vector3f goal_;
Eigen::Matrix<float, max_waypoints, 3> path_mat_;
cv::Mat intrinsics_ = cv::Mat::eye(3, 3, CV_64F);
geometry_msgs::Pose pose_;
};
int main(int argc, char** argv) {
ros::init(argc, argv, "VIPlannerViz");
VIPlannerViz node;
node.run();
return 0;
}
| 14,829 | C++ | 35.617284 | 170 | 0.569695 |
leggedrobotics/viplanner/ros/visualizer/src/viplannerViz_open3d.cpp | /*
* Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
* Author: Pascal Roth
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <ros/ros.h>
#include <iostream>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <std_msgs/Bool.h>
#include <std_msgs/Float32.h>
#include <nav_msgs/Path.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PointStamped.h>
#include <geometry_msgs/PolygonStamped.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CompressedImage.h>
#include <sensor_msgs/CameraInfo.h>
#include <tf/transform_listener.h>
// #include <pcl_ros/transforms.h>
#include <tf/transform_datatypes.h>
#include <tf/transform_broadcaster.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <Eigen/Dense>
#include <Eigen/Core>
#include <opencv2/opencv.hpp>
#include <opencv2/core/eigen.hpp>
#include <open3d/Open3D.h>
#include <open3d/geometry/TriangleMesh.h>
#include <open3d/visualization/gui/Application.h>
#include <open3d/visualization/rendering/MaterialRecord.h>
#include <open3d/visualization/rendering/Camera.h>
#include <open3d/visualization/rendering/filament/FilamentEngine.h>
#include <open3d/visualization/rendering/filament/FilamentRenderer.h>
#include <cmath>
#include <cv_bridge/cv_bridge.h>
using namespace open3d;
using namespace std;
using namespace Eigen;
static const double mesh_size = 0.3;
static const int n_waypoints = 51;
Eigen::Matrix3f CAM_TO_ROBOT_FRAME = [] {
Eigen::Matrix3f m;
m << 0, 0, 1,
1, 0, 0,
0, 1, 0;
return m;
}();
// Headless rendering requires Open3D to be compiled with OSMesa support.
// Add -DENABLE_HEADLESS_RENDERING=ON when you run CMake.
static const bool kUseHeadless = true;
class VIPlannerViz {
public:
VIPlannerViz(open3d::visualization::gui::Application& app) : app_(app), nh_("~") {
// Load parameters from the ROS parameter server
// parameter_name, variable_name, default_value
nh_.param<std::string> ("vizTopic", vizTopic_, "/viz_path_depth");
nh_.param<std::string> ("imgTopic", img_topic_, "/depth_camera_front_upper/depth/image_rect_raw");
nh_.param<std::string> ("infoTopic", info_topic_, "/depth_camera_front_upper/depth/camera_info");
nh_.param<std::string> ("pathTopic", path_topic_, "/path");
nh_.param<std::string> ("goalTopic", goal_topic_, "/mp_waypoint");
nh_.param<std::string> ("robot_frame", robot_frame_, "base");
nh_.param<std::string> ("odom_frame", odom_frame_, "odom");
nh_.param<std::string> ("domain", domain_, "depth");
nh_.param<bool> ("image_flip", image_flip_, false);
nh_.param<float> ("max_depth", max_depth_, 10.0);
// Subscribe to the image and the intrinsic matrix
if (domain_ == "rgb") {
subImage_ = nh_.subscribe<sensor_msgs::CompressedImage>(img_topic_, 1, &VIPlannerViz::imageRGBCallback, this);
} else if (domain_ == "depth") {
subImage_ = nh_.subscribe<sensor_msgs::Image>(img_topic_, 1, &VIPlannerViz::imageDepthCallback, this);
} else {
ROS_ERROR("Domain not supported!");
}
subCamInfo_ = nh_.subscribe<sensor_msgs::CameraInfo>(info_topic_, 1, &VIPlannerViz::camInfoCallback, this);
// Subscribe to the path
subPath_ = nh_.subscribe<nav_msgs::Path>(path_topic_, 1, &VIPlannerViz::pathCallback, this);
// Subscribe to the goal
subGoal_ = nh_.subscribe<geometry_msgs::PointStamped>(goal_topic_, 1, &VIPlannerViz::goalCallback, this);
// Publish the image with the path
pubImage_ = nh_.advertise<sensor_msgs::Image>(vizTopic_, 1);
// Initialize the open3d objects
if (kUseHeadless) {
open3d::visualization::rendering::EngineInstance::EnableHeadless();
}
mtl.base_color = Eigen::Vector4f(1.f, 1.f, 1.f, 1.f);
mtl.shader = "defaultUnlit";
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> small_spheres(n_waypoints);
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> small_spheres_fear(n_waypoints);
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> mesh_sphere(n_waypoints);
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> mesh_sphere_fear(n_waypoints);
mesh_box = open3d::geometry::TriangleMesh::CreateBox(mesh_size/20.0);
mesh_box->PaintUniformColor(Vector3d(0.0, 0.0, 1.0)); // blue
for (int i = 0; i < n_waypoints; ++i) {
small_spheres[i] = open3d::geometry::TriangleMesh::CreateSphere(mesh_size/20.0);
small_spheres[i]->PaintUniformColor(Vector3d(0.4, 1.0, 0.1)); // green
small_spheres_fear[i] = open3d::geometry::TriangleMesh::CreateSphere(mesh_size/20.0);
small_spheres_fear[i]->PaintUniformColor(Vector3d(0.99, 0.2, 0.1)); // red
mesh_sphere[i] = open3d::geometry::TriangleMesh::CreateSphere(mesh_size/5.0);
mesh_sphere[i]->PaintUniformColor(Vector3d(0.4, 1.0, 0.1)); // green
mesh_sphere_fear[i] = open3d::geometry::TriangleMesh::CreateSphere(mesh_size/5.0);
mesh_sphere_fear[i]->PaintUniformColor(Vector3d(0.99, 0.2, 0.1)); // red
}
}
// CALLBACKS
void imageRGBCallback(const sensor_msgs::CompressedImage::ConstPtr& rgb_msg)
{
ROS_DEBUG_STREAM("Received rgb image " << rgb_msg->header.frame_id << ": " << rgb_msg->header.stamp.toSec());
// image pose
geometry_msgs::Pose pose_ = poseCallback(rgb_msg->header.frame_id);
// RGB Image
try {
cv::Mat image = cv::imdecode(cv::Mat(rgb_msg->data), cv::IMREAD_COLOR);
// rotate image 90 degrees counter clockwise
if (!image_flip_) {
cv::rotate(image, image, cv::ROTATE_90_COUNTERCLOCKWISE);
}
current_image_time_ = rgb_msg->header.stamp;
image_ = image.clone();
image_init_ = true;
}
catch (cv::Exception& e) {
ROS_ERROR_STREAM("CvBridge Error: " << e.what());
}
}
void imageDepthCallback(const sensor_msgs::Image::ConstPtr& depth_msg)
{
ROS_DEBUG_STREAM("Received depth image " << depth_msg->header.frame_id << ": " << depth_msg->header.stamp.toSec());
// Image time and pose
geometry_msgs::Pose pose_ = poseCallback(depth_msg->header.frame_id); // Assuming that poseCallback is defined somewhere
current_image_time_ = depth_msg->header.stamp;
// Depth image
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(depth_msg, sensor_msgs::image_encodings::TYPE_32FC1);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Convert to Eigen matrix and apply operations
cv::Mat img_mat = cv_ptr->image;
img_mat /= 1000;
cv::Mat mask = cv::Mat::zeros(img_mat.size(), img_mat.type());
cv::compare(img_mat, std::numeric_limits<double>::infinity(), mask, cv::CMP_EQ);
img_mat.setTo(0, mask);
if (image_flip_)
{
cv::flip(img_mat, img_mat, 1); // 1 indicates horizontal flip
}
image_ = img_mat.clone();
image_init_ = true;
}
geometry_msgs::Pose poseCallback(const std::string& frame_id, const std::string& target_frame_id = "")
{
std::string target_frame = target_frame_id.empty() ? odom_frame_ : target_frame_id;
tf::StampedTransform transform;
try {
tf_listener.waitForTransform(target_frame, frame_id, ros::Time(0), ros::Duration(4.0));
tf_listener.lookupTransform(target_frame, frame_id, ros::Time(0), transform);
}
catch (tf::TransformException& e) {
ROS_ERROR_STREAM("Fail to transfer " << frame_id << " into " << target_frame << " frame: " << e.what());
}
geometry_msgs::Pose pose;
tf::poseTFToMsg(transform, pose);
return pose;
}
void camInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& cam_info_msg)
{
if (!intrinsics_init_)
{
ROS_INFO("Received camera info");
// Extract the camera intrinsic matrix from the message
intrinsics_ << cam_info_msg->K[0], cam_info_msg->K[1], cam_info_msg->K[2],
cam_info_msg->K[3], cam_info_msg->K[4], cam_info_msg->K[5],
cam_info_msg->K[6], cam_info_msg->K[7], cam_info_msg->K[8];
intrinsics_init_ = true;
}
}
void pathCallback(const nav_msgs::Path::ConstPtr& path_msg)
{
// Create an Eigen matrix with the same number of rows as the path
Eigen::MatrixXf path_mat_new(path_msg->poses.size(), 3);
// check if path length is same as expected length
if (path_mat_new.rows() != n_waypoints)
{
ROS_ERROR("Path length is not same as expected length");
return;
}
// Copy the x, y, and z coordinates from the path message into the matrix
for (int i = 0; i < path_msg->poses.size(); i++)
{
path_mat_new(i, 0) = path_msg->poses[i].pose.position.x;
path_mat_new(i, 1) = path_msg->poses[i].pose.position.y;
path_mat_new(i, 2) = path_msg->poses[i].pose.position.z;
}
// Assign the new path to the path_ member variable
path_mat_ = path_mat_new;
path_init_ = true;
}
void goalCallback(const geometry_msgs::PointStamped::ConstPtr& goal_msg)
{
// Extract the goal point from the message
float x = goal_msg->point.x;
float y = goal_msg->point.y;
float z = goal_msg->point.z;
// Assign the goal point to the goal_ member variable
goal_ << x, y, z;
goal_init_ = true;
std::cout << "GOAL Received" << std::endl;
}
// HELPER FUNCTIONS
MatrixXf TransformPoints(Vector3f translation, Quaternionf rotation, MatrixXf points) {
// Convert the quaternion to a rotation matrix
Matrix3f rotation_matrix = rotation.toRotationMatrix();
// Multiply the translated points by the rotation matrix
points = points * rotation_matrix.transpose();
// Translate the points by the relative translation vector
points.rowwise() += translation.transpose();
// Print the transformed points
std::cout << points << std::endl;
return points;
}
void getOdom(Eigen::Vector3f& translation, Eigen::Quaternionf& rotation)
{
try
{
// Get the transformation from the reference frame to the target frame
tf::StampedTransform transform;
tf_listener.lookupTransform(odom_frame_, robot_frame_, ros::Time(0), transform);
// Extract the translation and rotation from the transformation
translation << transform.getOrigin().x(), transform.getOrigin().y(), transform.getOrigin().z();
rotation = Eigen::Quaternionf(transform.getRotation().getW(), transform.getRotation().getX(), transform.getRotation().getY(), transform.getRotation().getZ());
}
catch (tf::TransformException& ex)
{
ROS_ERROR("%s", ex.what());
}
}
// RUN NODE
void run() {
Eigen::Vector3f translation;
Eigen::Quaternionf rotation;
// Main loop
while (ros::ok()) {
if (path_init_ && goal_init_ && image_init_ && intrinsics_init_) {
std::cout << "All data received" << std::endl;
if (!renderer_init_) {
auto *renderer =
new open3d::visualization::rendering::FilamentRenderer(
open3d::visualization::rendering::EngineInstance::GetInstance(), intrinsics_(0, 2), intrinsics_(1, 2),
open3d::visualization::rendering::EngineInstance::GetResourceManager()
);
renderer_init_ = true;
std::cout << "Renderer created" << std::endl;
}
// Get the current robot pose
getOdom(translation, rotation);
// Transform the path
MatrixXf transformed_path = TransformPoints(translation, rotation, path_mat_);
// create open3d scene
open3d::visualization::rendering::Open3DScene *scene = new open3d::visualization::rendering::Open3DScene(*renderer);
std::cout << "Scene created" << std::endl;
// Translate the points and add them to the scene
for (int i = 0; i < n_waypoints; ++i) {
small_spheres[i]->Translate(transformed_path.row(i).cast<double>());
scene->AddGeometry("small_sphere" + std::to_string(i), small_spheres[i].get(), mtl);
}
std::cout << "Waypoint added" << std::endl;
// orientate camera
Vector3f cam_translation = Vector3f(pose_.position.x, pose_.position.y, pose_.position.z);
Quaternionf cam_rotation = Quaternionf(pose_.orientation.w, pose_.orientation.x, pose_.orientation.y, pose_.orientation.z);
Matrix3f rotation_matrix = cam_rotation.toRotationMatrix();
Vector3f target_vec = cam_translation + cam_rotation * CAM_TO_ROBOT_FRAME * Vector3f(0, 0, -1);
scene->GetCamera()->SetProjection(60.0f, float(intrinsics_(0, 2)) / float(intrinsics_(1, 2)), 0.1f,
10.0f, open3d::visualization::rendering::Camera::FovType::Vertical);
scene->GetCamera()->LookAt(target_vec, cam_translation,
Vector3f(1, 0, 0));
std::cout << "Camera set" << std::endl;
auto o3dImage = app_.RenderToImage(*renderer, scene->GetView(), scene->GetScene(),
intrinsics_(0, 2), intrinsics_(1, 2));
if (intrinsics_(0, 2) != image_.size[0] || intrinsics_(1, 2) != image_.size[1]) {
throw std::runtime_error("Image sizes do not match");
}
std::cout << "Image rendered" << std::endl;
// Convert Open3D image to OpenCV format
cv::Mat o3dMat((*o3dImage).height_, (*o3dImage).width_, CV_8UC3, (*o3dImage).data_.data());
cv::cvtColor(o3dMat, o3dMat, cv::COLOR_RGB2BGR);
// Create mask where Open3D image is not white
cv::Mat mask;
cv::cvtColor(o3dMat, mask, cv::COLOR_BGR2GRAY);
cv::threshold(mask, mask, 1, 255, cv::THRESH_BINARY);
// Blend images together
cv::Mat blended = image_.clone();
float alpha = 0.0;
cv::addWeighted(blended, 1-alpha, o3dMat, alpha, 0, blended, CV_8UC3);
o3dMat.copyTo(blended, mask);
std::cout << "Image blended" << std::endl;
// Publish as ROS image
cv_bridge::CvImage cv_image;
cv_image.header.stamp = current_image_time_;
cv_image.encoding = sensor_msgs::image_encodings::BGR8;
cv_image.image = blended;
pubImage_.publish(cv_image.toImageMsg());
// Show resulting image
cv::imshow("Overlay", blended);
cv::waitKey(1);
delete scene;
}
ros::spinOnce();
loop_rate_.sleep();
}
delete renderer;
app_.OnTerminate();
}
private:
// input Argument
open3d::visualization::gui::Application &app_;
// ROS
ros::NodeHandle nh_;
ros::Subscriber subImage_;
ros::Subscriber subCamInfo_;
ros::Subscriber subGoal_;
ros::Subscriber subPath_;
ros::Publisher pubImage_;
ros::Rate loop_rate_{10};
ros::Time current_image_time_;
tf::TransformListener tf_listener;
// parameters
std::string vizTopic_;
std::string img_topic_;
std::string info_topic_;
std::string path_topic_;
std::string goal_topic_;
std::string robot_frame_;
std::string odom_frame_;
std::string domain_;
float max_depth_;
bool image_flip_;
// Flags
bool intrinsics_init_ = false;
bool image_init_ = false;
bool path_init_ = false;
bool goal_init_ = false;
bool renderer_init_ = false;
// variables
cv::Mat image_;
Eigen::Vector3f goal_;
Eigen::Matrix<float, n_waypoints, 3> path_mat_;
Eigen::Matrix<float, 3, 3> intrinsics_;
geometry_msgs::Pose pose_;
// INIT OPEN3d objects
open3d::visualization::rendering::MaterialRecord mtl;
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> small_spheres;
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> small_spheres_fear;
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> mesh_sphere;
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> mesh_sphere_fear;
std::shared_ptr<open3d::geometry::TriangleMesh> mesh_box;
open3d::visualization::rendering::FilamentRenderer *renderer;
};
int main(int argc, char** argv) {
ros::init(argc, argv, "VIPlannerViz");
open3d::visualization::gui::Application &app = open3d::visualization::gui::Application::GetInstance();
app.Initialize("/usr/local/include/open3d/include/open3d/resources");
VIPlannerViz node(app);
node.run();
return 0;
}
| 18,015 | C++ | 37.331915 | 170 | 0.589065 |
leggedrobotics/viplanner/ros/visualizer/config/viz_config_depth_anymal_d.yaml | # general cfg
image_flip: True
max_depth: 15
domain: depth
# ros topics
vizTopic: /viplanner/viz_path_depth
imgTopic: /depth_camera_front_upper/depth/image_rect_raw
infoTopic: /depth_camera_front_upper/depth/camera_info
goalTopic: /mp_waypoint
pathTopic: /viplanner/path
# frame ids
robot_frame: base
odom_frame: odom
| 451 | YAML | 27.249998 | 70 | 0.552106 |
leggedrobotics/viplanner/ros/visualizer/config/viz_config_rgb_anymal_c.yaml | # general cfg
image_flip: True
max_depth: 15
domain: rgb
# ros topics
vizTopic: /viplanner/viz_path_rgb
imgTopic: /depth_camera_front/color/image_raw/compressed
infoTopic: /depth_camera_front/color/camera_info
goalTopic: /mp_waypoint
pathTopic: /viplanner/path
# frame ids
robot_frame: base
odom_frame: odom
| 441 | YAML | 26.624998 | 70 | 0.546485 |
leggedrobotics/viplanner/ros/visualizer/config/viz_config_depth_anymal_c.yaml | # general cfg
image_flip: False
max_depth: 15
domain: depth
# ros topics
vizTopic: /viplanner/viz_path_depth
imgTopic: /depth_camera_front/depth/image_rect_raw
infoTopic: /depth_camera_front/depth/camera_info
goalTopic: /mp_waypoint
pathTopic: /viplanner/path
# frame ids
robot_frame: base
odom_frame: odom
| 440 | YAML | 26.562498 | 64 | 0.545455 |
leggedrobotics/viplanner/ros/visualizer/config/viz_config_rgb_anymal_d.yaml | # general cfg
image_flip: True
max_depth: 15
domain: rgb
# ros topics
vizTopic: /viplanner/viz_path_rgb
imgTopic: /wide_angle_camera_front/image_raw/compressed
infoTopic: /wide_angle_camera_front/camera_info
goalTopic: /mp_waypoint
pathTopic: /viplanner/path
# frame ids
robot_frame: base
odom_frame: odom
| 439 | YAML | 26.499998 | 69 | 0.544419 |
leggedrobotics/viplanner/ros/viplanner_pkgs/package.xml | <package>
<name>viplanner_pkgs</name>
<version>1.0.0</version>
<description>Visual Imperative Planner Packages</description>
<license>BSD</license>
<maintainer email="[email protected]">Pascal Roth</maintainer>
<author email="[email protected]">Pascal Roth</author>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>joy</build_depend>
<build_depend>ps3joy</build_depend>
<build_depend>path_follower</build_depend>
<build_depend>viplanner_node</build_depend>
<build_depend>viplanner_viz</build_depend>
<build_depend>waypoint_rviz_plugin</build_depend>
<run_depend>joy</run_depend>
<run_depend>ps3joy</run_depend>
<run_depend>path_follower</run_depend>
<run_depend>viplanner_node</run_depend>
<run_depend>viplanner_viz</run_depend>
<run_depend>waypoint_rviz_plugin</run_depend>
</package>
| 832 | XML | 29.851851 | 63 | 0.735577 |
leggedrobotics/viplanner/omniverse/README.md | # ViPlanner Omniverse Extension
The ViPlanner Omniverse Extension offers a sophisticated testing environment for ViPlanner.
Within NVIDIA Isaac Sim as a photorealistic simulator, this extension provides an assessment tool for ViPlanner's performance across diverse environments.
The extension is developed using the [Orbit Framework](https://isaac-orbit.github.io/).
**Remark**
The extension for `Matterport` and `Unreal Engine` meshes with semantic information is currently getting updated to the latest Orbit version and will be available soon. An intermediate solution is given [here](https://github.com/pascal-roth/orbit_envs).
## Installation
To install the ViPlanner extension for Isaac Sim version 2023.1.1, follow these steps:
1. Install Isaac Sim using the [Orbit installation guide](https://isaac-orbit.github.io/orbit/source/setup/installation.html).
2. Clone the orbit repo, checkout commit `477cd6b3f` and link the viplanner extension. The specific commit is necessary as Orbit is under active development and the extension is not yet compatible with the latest version.
```
git clone [email protected]:NVIDIA-Omniverse/orbit.git
cd orbit
git checkout 477cd6b3f
cd source/extensions
ln -s {VIPLANNER_DIR}/omniverse/extension/omni.viplanner .
```
3. TEMPORARY: To use Matterport with semantic information within Isaac Sim, a new extension has been developed as part of this work. Currently, all parts are getting updated to the latest Orbit version. A temporary solution that is sufficient for the demo script is available [here](https://github.com/pascal-roth/orbit_envs). Please also clone and link it into orbit.
```
git clone [email protected]:pascal-roth/orbit_envs.git
cd orbit/source/extension
ln -s {ORBIT_ENVS}/extensions/omni.isaac.matterport .
```
4. Then run the orbit installer script and additionally install ViPlanner in the Isaac Sim virtual environment.
```
./orbit.sh -i -e
./orbit.sh -p -m pip install -e {VIPLANNER_DIR}
```
**Remark**
Also in orbit, it is necessary to comply with PEP660 for the install. This requires the following versions (as described [here](https://stackoverflow.com/questions/69711606/how-to-install-a-package-using-pip-in-editable-mode-with-pyproject-toml) in detail)
- [pip >= 21.3](https://pip.pypa.io/en/stable/news/#v21-3)
```
./orbit.sh -p -m pip install --upgrade pip
```
- [setuptools >= 64.0.0](https://github.com/pypa/setuptools/blob/main/CHANGES.rst#v6400)
```
./orbit.sh -p -m pip install --upgrade setuptools
```
## Usage
A demo script is provided to run the planner in three different environments: [Matterport](https://niessner.github.io/Matterport/), [Carla](https://carla.org//), and [NVIDIA Warehouse](https://docs.omniverse.nvidia.com/isaacsim/latest/features/environment_setup/assets/usd_assets_environments.html#warehouse).
In each scenario, the goal is represented as a movable cube within the environment.
To run the demo, download the model: [[checkpoint](https://drive.google.com/file/d/1PY7XBkyIGESjdh1cMSiJgwwaIT0WaxIc/view?usp=sharing)] [[config](https://drive.google.com/file/d/1r1yhNQAJnjpn9-xpAQWGaQedwma5zokr/view?usp=sharing)] and the environment files. Then adjust the paths (marked as `${USER_PATH_TO_USD}`) in the corresponding config files.
### Matterport
[Config](./extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py)
To download Matterport datasets, please refer to the [Matterport3D](https://niessner.github.io/Matterport/) website. The dataset should be converted to USD format using Isaac Sim by executing the following steps:
1. Import the `.obj` file (located under `matterport_mesh`) into Isaac Sim by going to `File -> Import`.
2. Fix potential import setting such as Rotation and Scale. (`Property Panel -> Transform -> Rotate:unitsResolve = 0.0; Scale:unitsResolve = [1.0, 1.0, 1.0]`)
3. Export the scene as USD (`File -> Save as`).
```
./orbit.sh -p {VIPLANNER_DIR}/omniverse/standalone/viplanner_demo.py --scene matterport --model_dir {MODEL_DIR}
```
### Carla
[Download USD Link](https://drive.google.com/file/d/1wZVKf2W0bSmP1Wm2w1XgftzSBx0UR1RK/view?usp=sharing)
:warning: Due to some code changes, the semantics are here not correctly received. We are working on a fix.
```
./orbit.sh -p {VIPLANNER_DIR}/omniverse/standalone/viplanner_demo.py --scene carla --model_dir {MODEL_DIR}
```
### NVIDIA Warehouse
[Download USD Link](https://drive.google.com/file/d/1QXxuak-1ZmgKkxhE0EGfDydApVr6LrsF/view?usp=sharing) [Config](./extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py)
```
./orbit.sh -p {VIPLANNER_DIR}/omniverse/standalone/viplanner_demo.py --scene warehouse --model_dir {MODEL_DIR}
```
## Data Collection and Evaluation
Script for data collection and evaluation are getting updated to the latest Orbit version and will be available soon. If you are interested in the current state, please contact us.
| 4,870 | Markdown | 53.730336 | 368 | 0.768994 |
leggedrobotics/viplanner/omniverse/standalone/viplanner_demo.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the rigid objects class.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
# omni-isaac-orbit
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.")
parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.")
parser.add_argument("--conv_distance", default=0.2, type=float, help="Distance for a goal considered to be reached.")
parser.add_argument(
"--scene", default="matterport", choices=["matterport", "carla", "warehouse"], type=str, help="Scene to load."
)
parser.add_argument("--model_dir", default=None, type=str, help="Path to model directory.")
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.core.utils.prims as prim_utils
import torch
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.orbit.envs import RLTaskEnv
from omni.viplanner.config import (
ViPlannerCarlaCfg,
ViPlannerMatterportCfg,
ViPlannerWarehouseCfg,
)
from omni.viplanner.viplanner import VIPlannerAlgo
"""
Main
"""
def main():
"""Imports all legged robots supported in Orbit and applies zero actions."""
# create environment cfg
if args_cli.scene == "matterport":
env_cfg = ViPlannerMatterportCfg()
goal_pos = torch.tensor([7.0, -12.5, 1.0])
elif args_cli.scene == "carla":
env_cfg = ViPlannerCarlaCfg()
goal_pos = torch.tensor([111.0, -137.0, 1.0])
elif args_cli.scene == "warehouse":
env_cfg = ViPlannerWarehouseCfg()
goal_pos = torch.tensor([3, -4.5, 1.0])
else:
raise NotImplementedError(f"Scene {args_cli.scene} not yet supported!")
env = RLTaskEnv(env_cfg)
obs = env.reset()[0]
# set goal cube
VisualCuboid(
prim_path="/World/goal", # The prim path of the cube in the USD stage
name="waypoint", # The unique name used to retrieve the object from the scene later on
position=goal_pos, # Using the current stage units which is in meters by default.
scale=torch.tensor([0.15, 0.15, 0.15]), # most arguments accept mainly numpy arrays.
size=1.0,
color=torch.tensor([1, 0, 0]), # RGB channels, going from 0-1
)
goal_pos = prim_utils.get_prim_at_path("/World/goal").GetAttribute("xformOp:translate")
# pause the simulator
env.sim.pause()
# load viplanner
viplanner = VIPlannerAlgo(model_dir=args_cli.model_dir)
goals = torch.tensor(goal_pos.Get(), device=env.device).repeat(env.num_envs, 1)
# initial paths
_, paths, fear = viplanner.plan_dual(
obs["planner_image"]["depth_measurement"], obs["planner_image"]["semantic_measurement"], goals
)
# Simulate physics
while simulation_app.is_running():
# If simulation is paused, then skip.
if not env.sim.is_playing():
env.sim.step(render=~args_cli.headless)
continue
obs = env.step(action=paths.view(paths.shape[0], -1))[0]
# apply planner
goals = torch.tensor(goal_pos.Get(), device=env.device).repeat(env.num_envs, 1)
if torch.any(
torch.norm(obs["planner_transform"]["cam_position"] - goals)
> viplanner.train_config.data_cfg[0].max_goal_distance
):
print(
f"[WARNING]: Max goal distance is {viplanner.train_config.data_cfg[0].max_goal_distance} but goal is {torch.norm(obs['planner_transform']['cam_position'] - goals)} away from camera position! Please select new goal!"
)
env.sim.pause()
continue
goal_cam_frame = viplanner.goal_transformer(
goals, obs["planner_transform"]["cam_position"], obs["planner_transform"]["cam_orientation"]
)
_, paths, fear = viplanner.plan_dual(
obs["planner_image"]["depth_measurement"], obs["planner_image"]["semantic_measurement"], goal_cam_frame
)
paths = viplanner.path_transformer(
paths, obs["planner_transform"]["cam_position"], obs["planner_transform"]["cam_orientation"]
)
# draw path
viplanner.debug_draw(paths, fear, goals)
if __name__ == "__main__":
# Run the main function
main()
# Close the simulator
simulation_app.close()
| 4,809 | Python | 33.855072 | 231 | 0.658765 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/setup.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Installation script for the 'omni.isaac.waypoints' python package."""
from setuptools import setup
# Installation operation
setup(
name="omni-isaac-waypoints",
author="Pascal Roth",
author_email="[email protected]",
version="0.0.1",
description="Extension to extract waypoints in 3D environments.",
keywords=["robotics"],
include_package_data=True,
python_requires="==3.7.*",
packages=["omni.isaac.waypoints"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7"],
zip_safe=False,
)
# EOF
| 718 | Python | 24.678571 | 89 | 0.689415 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/config/extension.toml | [package]
version = "0.0.1"
title = "Waypoint extension"
description="Extension to extract waypoints in 3D environments."
authors =["Pascal Roth"]
repository = "https://gitlab-master.nvidia.com/mmittal/omni_isaac_orbit"
category = "robotics"
keywords = ["kit", "robotics"]
readme = "docs/README.md"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
# Main python module this extension provides.
[[python.module]]
name = "omni.isaac.waypoints"
[[python.module]]
name = "omni.isaac.waypoints.scripts"
| 534 | TOML | 23.318181 | 72 | 0.71161 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/omni/isaac/waypoints/scripts/recorder_ui.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import gc
# python
import os
import carb
# omni
import omni
import omni.client
import omni.ext
# isaac-core
import omni.ui as ui
# omni-isaac-ui
from omni.isaac.ui.ui_utils import btn_builder, get_style, setup_ui_headers, str_builder
# isaac-waypoints
from omni.isaac.waypoints.recorder import Recorder
EXTENSION_NAME = "Waypoint Recorder"
class WaypointExtension(omni.ext.IExt):
"""Extension to record Waypoints in Isaac Sim"""
def on_startup(self, ext_id):
self._ext_id = ext_id
self._usd_context = omni.usd.get_context()
self._window = omni.ui.Window(
EXTENSION_NAME, width=400, height=500, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
# init recorder class and get path to extension
self._extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext_id)
self.recorder = Recorder()
# set additional parameters
self._input_fields: dict = {} # dictionary to store values of buttion, float fields, etc.
# build ui
self.build_ui()
return
##
# UI Build functions
##
def build_ui(self):
with self._window.frame:
with ui.VStack(spacing=5, height=0):
self._build_info_ui()
self._build_recorder_ui()
self._build_display_ui()
async def dock_window():
await omni.kit.app.get_app().next_update_async()
def dock(space, name, location, pos=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, pos)
return window
tgt = ui.Workspace.get_window("Viewport")
dock(tgt, EXTENSION_NAME, omni.ui.DockPosition.LEFT, 0.33)
await omni.kit.app.get_app().next_update_async()
self._task = asyncio.ensure_future(dock_window())
def _build_info_ui(self):
title = EXTENSION_NAME
doc_link = "https://github.com/leggedrobotics/omni_isaac_orbit"
overview = "Extension to record waypoints in any Environment and export them to a .json file."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
return
def _build_recorder_ui(self):
frame = ui.CollapsableFrame(
title="Record Waypoints",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
# get save directory
kwargs = {
"label": "Save Directory",
"type": "stringfield",
"default_val": "",
"tooltip": "Click the Folder Icon to Set Filepath",
"use_folder_picker": True,
}
self._input_fields["save_path"] = str_builder(**kwargs)
self._input_fields["save_path"].add_value_changed_fn(self._check_save_path)
kwargs = {
"label": "Save Filename",
"type": "stringfield",
"default_val": "waypoints",
}
self._input_fields["file_name"] = str_builder(**kwargs)
self._input_fields["file_name"].add_value_changed_fn(self.recorder.set_filename)
self._input_fields["start_point"] = btn_builder(
"Start-Point", text="Record", on_clicked_fn=self._set_start_point
)
self._input_fields["start_point"].enabled = False
self._input_fields["way_point"] = btn_builder(
"Intermediate-Point", text="Record", on_clicked_fn=self._set_way_point
)
self._input_fields["way_point"].enabled = False
self._input_fields["end_point"] = btn_builder(
"End-Point", text="Record", on_clicked_fn=self._set_end_point
)
self._input_fields["end_point"].enabled = False
self._input_fields["reset"] = btn_builder("Reset", text="Reset", on_clicked_fn=self.recorder.reset)
self._input_fields["reset"].enabled = True
return
def _build_display_ui(self):
frame = ui.CollapsableFrame(
title="Waypoint Information",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
# control parameters
pass
return
##
# Shutdown Helpers
##
def on_shutdown(self):
if self._window:
self._window = None
gc.collect()
##
# Recorder Helper
##
def _check_save_path(self, path):
path = path.get_value_as_string()
if not os.path.isfile(path):
self._input_fields["start_point"].enabled = True
self.recorder.set_save_path(path=path)
else:
self._input_fields["start_point"].enabled = False
carb.log_warn(f"Directory at save path {path} does not exist!")
return
def _set_start_point(self) -> None:
# set start point
self.recorder.set_start_point()
# enable intermediate waypoints
self._input_fields["start_point"].enabled = False
self._input_fields["way_point"].enabled = True
return
def _set_way_point(self) -> None:
# add intermediate waypoint to list
self.recorder.add_way_point()
# enable end point
self._input_fields["end_point"].enabled = True
return
def _set_end_point(self) -> None:
# set end point
self.recorder.set_end_point()
# enable / disable buttons
self._input_fields["way_point"].enabled = False
self._input_fields["end_point"].enabled = False
self._input_fields["start_point"].enabled = True
return
# EoF
| 6,681 | Python | 30.819047 | 115 | 0.568777 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/omni/isaac/waypoints/scripts/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .recorder_ui import WaypointExtension
__all__ = ["WaypointExtension"]
# EoF
| 232 | Python | 18.416665 | 60 | 0.728448 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/omni/isaac/waypoints/recorder/recorder.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import json
# python
import os
from typing import List
import numpy as np
# omni
import omni
# isaac-debug
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import scipy.spatial.transform as tf
# isaac-core
from omni.isaac.core.objects import VisualCuboid
from pxr import UsdGeom
class Recorder:
"""
Record arbitrary number of waypoints and save them as .json file
"""
cube_scale = 100 # convert from meters to cm
def __init__(self) -> None:
# init buffers
self.start_point: List[float] = [0.0] * 3
self.end_point: List[float] = [0.0] * 3
self.way_points: List[List[float]] = []
# init params
self.save_path: str = None
self.file_name: str = "waypoints"
# Acquire draw interface
self.draw_interface = omni_debug_draw.acquire_debug_draw_interface()
# cube
self.cube = VisualCuboid(
prim_path="/Waypoint", # The prim path of the cube in the USD stage
name="waypoint", # The unique name used to retrieve the object from the scene later on
position=np.array([0, 0, 1.0]), # Using the current stage units which is in meters by default.
scale=np.array([0.25, 0.25, 0.25]) * self.cube_scale, # most arguments accept mainly numpy arrays.
size=1.0,
color=np.array([1, 0.4, 0]), # RGB channels, going from 0-1
)
# identfy up axis of the stage (y or z)
stage = omni.usd.get_context().get_stage()
if UsdGeom.GetStageUpAxis(stage) == UsdGeom.Tokens.y:
self.rot_mat = tf.Rotation.from_euler("XYZ", [90, 90, 0], degrees=True).as_matrix()
elif UsdGeom.GetStageUpAxis(stage) == UsdGeom.Tokens.z:
self.rot_mat = np.eye(3)
else:
raise ValueError("Stage Up Axis not supported")
return
def set_save_path(self, path: str) -> None:
self.save_path = path
return
def set_filename(self, name) -> None:
self.file_name = name.get_value_as_string()
return
def set_start_point(self) -> None:
# get coordinates of the start
start_point = self._get_cube_coords()
# save start point with z-up axis
self.start_point = np.matmul(self.rot_mat, start_point).tolist()
# draw start point
self.draw_interface.draw_points([start_point], [(0, 1, 0, 1)], [10]) # green
return
def add_way_point(self) -> None:
# get coordinates of the cube
way_point = self._get_cube_coords()
# save way point with z-up axis
self.way_points.append(np.matmul(self.rot_mat, way_point).tolist())
# draw start point
self.draw_interface.draw_points([way_point], [(0, 0, 1, 1)], [10]) # blue
return
def set_end_point(self) -> None:
"""
Set the end point of the path and save all waypoints as .json file with the following structure:
{
start: [x, y, z],
end: [x, y, z],
waypoints: [[x, y, z], [x, y, z], ...]
}
All points are saved in the z-up axis convention.
"""
# get coordinates of the end
end_point = self._get_cube_coords()
# save end point with z-up axis
self.end_point = np.matmul(self.rot_mat, end_point).tolist()
# draw start point
self.draw_interface.draw_points([end_point], [(1, 0, 0, 1)], [10]) # red
# save points
if self.file_name.endswith(".json"):
file_path = os.path.join(self.save_path, self.file_name)
else:
file_path = os.path.join(self.save_path, self.file_name + ".json")
data = {"start": self.start_point, "end": self.end_point, "waypoints": self.way_points}
with open(file_path, "w") as file:
json.dump(data, file)
return
def reset(self) -> None:
self.start_point = [0.0] * 3
self.end_point = [0.0] * 3
self.way_points = []
self.draw_interface.clear_points()
return
""" Helper functions """
def _get_cube_coords(self) -> np.ndarray:
pose = omni.usd.utils.get_world_transform_matrix(self.cube.prim)
pose = np.array(pose).T
return pose[:3, 3]
# EoF
| 4,426 | Python | 31.313868 | 111 | 0.584953 |
leggedrobotics/viplanner/omniverse/extension/omni.waypoints/omni/isaac/waypoints/recorder/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .recorder import Recorder
__all__ = ["Recorder"]
# EoF
| 211 | Python | 16.666665 | 60 | 0.706161 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/setup.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Installation script for the 'omni.viplanner' python package."""
from setuptools import setup
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# generic
"numpy",
"scipy>=1.7.1",
# RL
"torch>=1.9.0",
]
# Installation operation
setup(
name="omni-isaac-viplanner",
author="Pascal Roth",
author_email="[email protected]",
version="0.0.1",
description="Extension to include ViPlanner: Visual Semantic Imperative Learning for Local Navigation",
keywords=["robotics", "rl"],
include_package_data=True,
python_requires=">=3.7",
install_requires=INSTALL_REQUIRES,
packages=["omni.viplanner"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7"],
zip_safe=False,
)
# EOF
| 941 | Python | 23.789473 | 107 | 0.682253 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/visual_imperative_planner.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import Dict, Optional
# python
import numpy as np
import torch
# omni-isaac-anymal
from omni.isaac.anymal.config import ANYmalCfg, ROSPublisherCfg, VIPlannerCfg
from omni.isaac.anymal.policy import Agent
from omni.isaac.anymal.utils import (
AnymalROSPublisher,
AnymalROSSubscriber,
TwistController,
)
from omni.isaac.anymal.utils.ros_utils import check_roscore_running, init_rosnode
from omni.isaac.anymal.utils.twist_controller_new import TwistControllerNew
from omni.isaac.anymal.viplanner import VIPlanner
# omni-isaac-core
from omni.isaac.core.objects import VisualCuboid
# omni-isaac-orbit
from omni.isaac.orbit.robots.legged_robot import LeggedRobot
from omni.isaac.orbit.sensors.camera import Camera
from omni.isaac.orbit.sensors.height_scanner import HeightScanner
from tqdm import tqdm
class VIPlannerANYmal(Agent):
"""
Visual Imperative Planner to guide ANYway to a waypoint defined by a cube in the world.
Two versions available:
- Isaac Twist Controller (default), Twist Controller is implemented in Python, no ROS exchange has to be done
- ROS Twist Controller (old), Twist Controller is implemented in C++, path has to be published to ROS and twist command are received
"""
def __init__(
self,
cfg: ANYmalCfg,
camera_sensors: Dict[str, Camera],
robot: LeggedRobot,
height_scanner: HeightScanner,
ros_controller: bool = False,
planner_cfg: Optional[VIPlannerCfg] = None,
) -> None:
# init agent
super().__init__(cfg.rl_policy, robot, height_scanner)
self._anymal_cfg = cfg
self._camera_sensors = camera_sensors
self._ros_controller = ros_controller
# viplanner
self.planner: VIPlanner = None
# waypoint cube
self.cube: VisualCuboid = None
# planner cfg
self._planner_cfg = planner_cfg if planner_cfg else VIPlannerCfg()
if self._ros_controller:
# init planner config
self._planner_cfg.ros_pub = True
# init ROS publisher config
self._ros_publisher_cfg = ROSPublisherCfg(sensor_pub=False)
# setup cube as waypoint and ros connection
self.ros_publisher: AnymalROSPublisher = None
self.ros_subscriber: AnymalROSSubscriber = None
else:
self._planner_cfg.ros_pub = False
self.twist: TwistController = None
self._setup()
# reset once at initialization
self.reset()
# get message
self.title += "with Visual Imperative Planner \n"
self.msg += "\n\n"
self.msg += f"" # TODO: add more info
return
def compute_command_ros(self, step_size: float) -> None:
"""Compute the command for the robot using the ROS Twist Controller"""
# get command from joystick planner
last_command, command_time = self.twist.get_command()
# check if last command is not too long ago (would happen if goal is reached)
if command_time > (self.sim.current_time - self._planner_cfg.look_back_factor * step_size):
return torch.tensor(last_command, device=self.robot.device)
else:
return torch.zeros(3, device=self.robot.device)
def compute_command_isaac(self, step_size: float) -> None:
"""Compute the command for the robot using the Python Twist Controller"""
# get command from twist controller
last_command = self.twist.compute(self.planner.traj_waypoints_odom, self.planner.fear)
try:
return torch.tensor(last_command, device=self.robot.device)
except TypeError:
return torch.zeros(3, device=self.robot.device)
def reset(self) -> None:
super().reset()
self.planner.reset()
if not self._ros_controller:
self.twist.reset()
# reset pbar
self.pbar.reset()
return
##
# Helper Functions
##
def _setup(self) -> None:
"""Setup cube and the ros connection to the smart joystick"""
# cube
self._setup_cube()
# viplanner
self.planner = VIPlanner(
anymal_cfg=self._anymal_cfg, vip_cfg=self._planner_cfg, camera_sensors=self._camera_sensors
)
# for ROS based controller
if self._ros_controller:
# init rosnode
check_roscore_running()
init_rosnode("anymal_node")
# init publisher and subscriber
self.ros_publisher = AnymalROSPublisher(
anymal_cfg=self._anymal_cfg,
ros_cfg=self._ros_publisher_cfg,
camera_sensors=self._camera_sensors,
lidar_sensors=self._lidar_sensors,
)
self.twist = AnymalROSSubscriber()
# define function to compute command
self.compute_command = self.compute_command_ros
else:
# self.twist = TwistController(
# cfg=self._planner_cfg.twist_controller_cfg,
# cfg_vip=self._planner_cfg,
# cfg_anymal=self._anymal_cfg,
# camera_sensors=self._camera_sensors,
# )
self.twist = TwistControllerNew(
cfg=self._planner_cfg.twist_controller_cfg,
cfg_vip=self._planner_cfg,
cfg_anymal=self._anymal_cfg,
robot=self.robot,
)
# define function to compute command
self.compute_command = self.compute_command_isaac
# setup pbar
self._setup_pbar()
return
def _setup_cube(self) -> None:
"""cube as the definition of a goalpoint"""
self.cube = VisualCuboid(
prim_path=self._planner_cfg.goal_prim, # The prim path of the cube in the USD stage
name="waypoint", # The unique name used to retrieve the object from the scene later on
position=np.array([5, 0, 1.0]), # Using the current stage units which is in meters by default.
scale=np.array([0.15, 0.15, 0.15]), # most arguments accept mainly numpy arrays.
size=1.0,
color=np.random.uniform((1, 0, 0)), # RGB channels, going from 0-1
)
return
# progress bar
def _setup_pbar(self):
"""Setup progress bar"""
self.pbar = tqdm(total=100, position=0, leave=False, bar_format="{desc}{percentage:.0f}%|{bar}|")
return
def _update_pbar(self):
"""Update progress bar"""
if self.planner.is_reset:
return
desc = (
f"Time Elapsed: {self.sim.current_time - self.planner.start_time:.2f}s | "
f"Walked Distance: {self.planner.max_goal_distance-self.planner.distance_to_goal:.2f}/{self.planner.max_goal_distance:.2f}m | "
f"Twist: {self.twist.twist}"
)
self.pbar.set_description(desc)
percentage_completed_path = (1 - (self.planner.distance_to_goal / self.planner.max_goal_distance)) * 100
update_percentage = percentage_completed_path - self.pbar.n
if update_percentage > 0:
self.pbar.update(update_percentage)
else:
self.pbar.update(0)
return
# EoF
| 7,418 | Python | 35.367647 | 139 | 0.617147 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/vip_algo.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
from typing import Optional
# omni
import carb
import numpy as np
# python
import torch
from viplanner.config import TrainCfg
# viplanner src
from viplanner.plannernet import (
PRE_TRAIN_POSSIBLE,
AutoEncoder,
DualAutoEncoder,
get_m2f_cfg,
)
from viplanner.traj_cost_opt.traj_opt import TrajOpt
torch.set_default_dtype(torch.float32)
class VIPlannerAlgo:
def __init__(self, model_dir: str, m2f_model_dir: Optional[str] = None, viplanner: bool = True) -> None:
"""Apply VIPlanner Algorithm
Args:
model_dir (str): Directory that include model.pt and model.yaml
"""
super().__init__()
assert os.path.exists(model_dir), "Model directory does not exist"
if viplanner:
assert os.path.isfile(os.path.join(model_dir, "model.pt")), "Model file does not exist"
assert os.path.isfile(os.path.join(model_dir, "model.yaml")), "Model config file does not exist"
else:
assert os.path.isfile(os.path.join(model_dir, "plannernet_scripted.pt")), "Model file does not exist"
# load model
self.train_config: TrainCfg = None
self.pixel_mean = None
self.pixel_std = None
self.load_model(model_dir, m2f_model_dir, viplanner)
self.traj_generate = TrajOpt()
return None
def load_model(self, model_dir: str, m2f_model_dir: Optional[str] = None, viplanner: bool = True) -> None:
if viplanner:
# load train config
self.train_config: TrainCfg = TrainCfg.from_yaml(os.path.join(model_dir, "model.yaml"))
carb.log_info(
f"Model loaded using sem: {self.train_config.sem}, rgb: {self.train_config.rgb}, knodes: {self.train_config.knodes}, in_channel: {self.train_config.in_channel}"
)
if isinstance(self.train_config.data_cfg, list):
self.max_goal_distance = self.train_config.data_cfg[0].max_goal_distance
self.max_depth = self.train_config.data_cfg[0].max_depth
self.depth_scale = self.train_config.data_cfg[0].depth_scale
else:
self.max_goal_distance = self.train_config.data_cfg.max_goal_distance
self.max_depth = self.train_config.data_cfg.max_depth
self.depth_scale = self.train_config.data_cfg.depth_scale
if self.train_config.rgb or self.train_config.sem:
if self.train_config.rgb and self.train_config.pre_train_sem:
assert (
PRE_TRAIN_POSSIBLE
), "Pretrained model not available since either detectron2 or mask2former not correctly setup"
pre_train_cfg = os.path.join(m2f_model_dir, self.train_config.pre_train_cfg)
pre_train_weights = (
os.path.join(m2f_model_dir, self.train_config.pre_train_weights)
if self.train_config.pre_train_weights
else None
)
m2f_cfg = get_m2f_cfg(pre_train_cfg)
self.pixel_mean = m2f_cfg.MODEL.PIXEL_MEAN
self.pixel_std = m2f_cfg.MODEL.PIXEL_STD
else:
m2f_cfg = None
pre_train_weights = None
self.net = DualAutoEncoder(self.train_config, m2f_cfg=m2f_cfg, weight_path=pre_train_weights)
else:
self.net = AutoEncoder(self.train_config.in_channel, self.train_config.knodes)
# get model and load weights
try:
model_state_dict, _ = torch.load(os.path.join(model_dir, "model.pt"))
except ValueError:
model_state_dict = torch.load(os.path.join(model_dir, "model.pt"))
self.net.load_state_dict(model_state_dict)
else:
self.train_config: TrainCfg = TrainCfg(rgb=False, sem=False)
self.max_goal_distance = self.train_config.data_cfg.max_goal_distance
self.max_depth = self.train_config.data_cfg.max_depth
self.depth_scale = self.train_config.data_cfg.depth_scale
self.net = torch.jit.load(os.path.join(model_dir, "plannernet_scripted.pt"))
# inference script = no grad for model
self.net.eval()
# move to GPU if available
if torch.cuda.is_available():
self.net = self.net.cuda()
self.cuda_avail = True
else:
carb.log_warn("CUDA not available, VIPlanner will run on CPU")
self.cuda_avail = False
return
def plan(self, image: torch.Tensor, goal_robot_frame: torch.Tensor) -> tuple:
image = image.expand(-1, 3, -1, -1)
keypoints, fear = self.net(image, goal_robot_frame)
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return keypoints, traj, fear
def plan_dual(self, dep_image: torch.Tensor, sem_image: torch.Tensor, goal_robot_frame: torch.Tensor) -> tuple:
keypoints, fear = self.net(dep_image, sem_image, goal_robot_frame)
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return keypoints, traj, fear
# EoF
| 5,404 | Python | 39.037037 | 176 | 0.603997 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .controller_cfg import LocomotionRlControllerCfg
from .eval_cfg import ANYmalEvaluatorConfig
from .ros_cfg import ROSPublisherCfg
from .sensor_cfg import (
ANYMAL_C_CAMERA_SENSORS,
ANYMAL_C_LIDAR_SENSORS,
ANYMAL_D_CAMERA_SENSORS,
ANYMAL_D_LIDAR_SENSORS,
ANYMAL_FOLLOW,
)
from .vip_config import TwistControllerCfg, VIPlannerCfg
from .walking_cfg import ANYmalCfg, SensorCfg, SimCfg, TerrainCfg, ViewerCfg
__all__ = [
# configs
"ANYmalCfg",
"SimCfg",
"ViewerCfg",
"TerrainCfg",
"SensorCfg",
"LocomotionRlControllerCfg",
"ROSPublisherCfg",
"VIPlannerCfg",
"TwistControllerCfg",
"ANYmalEvaluatorConfig",
# Perception Sensor Settings
"ANYMAL_D_CAMERA_SENSORS",
"ANYMAL_D_LIDAR_SENSORS",
"ANYMAL_C_CAMERA_SENSORS",
"ANYMAL_C_LIDAR_SENSORS",
"ANYMAL_FOLLOW",
]
# EoF
| 1,005 | Python | 23.536585 | 76 | 0.704478 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/eval_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from dataclasses import dataclass, field
from typing import List, Optional
# orbit-assets
from omni.isaac.assets import ASSETS_RESOURCES_DIR
@dataclass
class ANYmalEvaluatorConfig:
# termination conditions
max_time = 60 # seconds
max_remain_time = 5 # seconds
# cost map file
cost_map_dir: Optional[str] = None # "/home/pascal/viplanner/imperative_learning/data/cost_maps"
cost_map_name: Optional[str] = None # "2n8kARJN3HM_cost_map_long_2"
# use previous results or previous generated waypoints
use_prev_results: bool = True
use_existing_explored_waypoints: bool = True
handcrafted_waypoint_file: Optional[str] = None # "2n8kARJN3HM_waypoints_long" # _2
waypoint_dir: Optional[str] = "/home/pascal/viplanner/imperative_learning/data/waypoints"
# NOTE: can either load waypoint generated by the waypoint extension (define file under handcrafted_waypoint_file)
# or load previously explored waypoints (define file under handcrafted_waypoint_file), if neither env will
# be explored
repeat_waypoints: Optional[int] = 50 # number of times to repeat the waypoints, create distribution of results
# waypoint exploration parameters
num_pairs: int = 500 # number of start-goal pairs to explore
min_goal_dist: int = 5 # meters
max_goal_dist: int = 15 # meters
num_connections: int = 3 # number of connections when building the graph of all samples
seed: int = 1 # random seed for collection of start-goal points
# multi model
multi_model: bool = False
models: List[str] = field(default_factory=list)
save_dir: str = os.path.join(ASSETS_RESOURCES_DIR, "vip_models")
# intermediate result saving period
save_period: int = 10 # waypoints
@property
def waypoint_file(self):
assert all([self.waypoint_dir, self.handcrafted_waypoint_file]), "Waypoint file not specified"
return os.path.join(self.waypoint_dir, f"{self.handcrafted_waypoint_file}.json")
# EoF
| 2,180 | Python | 36.603448 | 118 | 0.712385 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/evaluator.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import asyncio
import datetime
import json
import os
import pickle
import random
import shutil
from abc import abstractmethod
from typing import List, Tuple
# omni
import carb
import cv2
import networkx as nx
import numpy as np
# isaac-debug
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import scipy.spatial.transform as tf
import torch
# isaac-anymal
from omni.isaac.anymal.config import (
ANYMAL_FOLLOW,
ANYmalCfg,
ANYmalEvaluatorConfig,
VIPlannerCfg,
)
from omni.isaac.anymal.robot import ANYmal
from omni.isaac.anymal.tasks import VIPlannerANYmal
from omni.isaac.anymal.utils.camera_utils import get_cam_pose
from omni.isaac.anymal.utils.gif_utils import create_gif
# isaac-core
from omni.isaac.core.simulation_context import SimulationContext
# isaac-orbit
from omni.isaac.orbit.utils.math import convert_quat, quat_mul
from pxr import Usd
# viplanner
from viplanner.utils.eval_utils import BaseEvaluator
class ANYmalOrbitEvaluator(BaseEvaluator):
def __init__(
self,
cfg: ANYmalEvaluatorConfig,
cfg_anymal: ANYmalCfg,
cfg_planner: VIPlannerCfg,
) -> None:
# get args
self._cfg = cfg
self._cfg_anymal = cfg_anymal
self._cfg_planner = cfg_planner
# change flag
if self._cfg_anymal.viewer.debug_vis:
print(
"WARNING: Debug visualization will be switched off since markers do not have semantic label and lead to errors."
)
self._cfg_anymal.viewer.debug_vis = False
# super init
super().__init__(
distance_tolerance=self._cfg_planner.conv_dist,
obs_loss_threshold=self._cfg_planner.obs_loss_threshold,
cost_map_dir=self._cfg.cost_map_dir,
cost_map_name=self._cfg.cost_map_name,
)
# Acquire draw interface
self.draw_interface = omni_debug_draw.acquire_debug_draw_interface()
# init ANYmal with corresponding agent
self._anymal: ANYmal = None
self._agent: VIPlannerANYmal = None
# get simulation context
self.sim: SimulationContext = None
# flags
self.use_waypoint_file: bool = True if self._cfg.waypoint_dir and self._cfg.handcrafted_waypoint_file else False
return
@abstractmethod
def load_scene(self) -> None:
"""Load scene."""
raise NotImplementedError
@abstractmethod
def explore_env(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Setup explorer."""
raise NotImplementedError
@abstractmethod
def post_setup(self) -> None:
"""Post step."""
pass
@abstractmethod
def get_env_name(self) -> str:
"""Get environment name."""
raise NotImplementedError
##
# Buffers
##
def create_buffers(self) -> None:
# create standard buffers
super().create_buffers()
# add additional buffers
self.goal_reached: np.ndarray = np.zeros(self._nbr_paths, dtype=bool)
self.goal_within_fov: np.ndarray = np.ones(self._nbr_paths, dtype=bool)
self.base_collision: np.ndarray = np.zeros(self._nbr_paths, dtype=bool)
self.knee_collision: np.ndarray = np.zeros(self._nbr_paths, dtype=bool)
self.walking_time: np.ndarray = np.ones(self._nbr_paths) * self._cfg.max_time
self.skip_waypoint: np.ndarray = np.zeros(self._nbr_paths, dtype=bool)
##
# Run Simulation
##
def run_single(self, show_plot: bool = True, repeat_idx: str = "") -> None:
"""RUN SINGLE MODEL"""
eval_dir = os.path.join(self._cfg_planner.model_dir, f"eval_{self.get_env_name()}", repeat_idx)
os.makedirs(eval_dir, exist_ok=True)
# check if results already exist
if self._cfg.use_prev_results:
print(f"[INFO] Using previous results from {eval_dir}!")
success_use_prev_results, start_idx = self.load_eval_arrays(eval_dir=eval_dir)
else:
success_use_prev_results = False
start_idx = 0
if not success_use_prev_results:
self.run(eval_dir, start_idx)
# create eval stats and plots
self.save_eval_arrays(eval_dir=eval_dir)
self._filter_statistics()
self.eval_statistics()
self.save_eval_results()
self.plt_single_model(eval_dir=eval_dir, show=show_plot)
else:
self.eval_statistics()
return
def run_repeat(self) -> None:
# adjust configs
self._cfg_anymal.rec_path = True
self._cfg_anymal.rec_sensor = False
self._cfg_anymal.follow_camera = True
start_point = self.waypoints["start"]
assert (
self._cfg.repeat_waypoints
), "To repeat waypoints, please specify the repeat_waypoints flag in the config!"
if self._cfg.use_prev_results:
repeat_indexes = []
for file in os.listdir(os.path.join(self._cfg_planner.model_dir, f"eval_{self.get_env_name()}")):
if file.startswith("repeat_") and file[len("repeat_") :].isdigit():
index = int(file[len("repeat_") :])
repeat_indexes.append(index)
start_idx = max(repeat_indexes) + 1
else:
start_idx = 0
for repeat_idx in range(start_idx, self._cfg.repeat_waypoints):
# reset anymal to start position
self._cfg_anymal.translation_x = start_point[0]
self._cfg_anymal.translation_y = start_point[1]
self._cfg_anymal.translation_z = 1.0 # start_point[2]
self.sim.play()
# self._anymal.robot._process_info_cfg()
# reset robot
self._anymal._reset_robot()
# reset planner
# self._agent.reset()
self.sim.pause()
self.run_single(show_plot=False, repeat_idx=f"repeat_{repeat_idx}")
self.reset()
return
def run_multi(self, show_single_plot: bool = False) -> None:
"""RUN MULTI MODEL"""
print(
f"[INFO] Running multi model evaluation with the models defined in ANYmalEvaluatorConfig! \n"
f"The model defined under VIPlannerCfg will not be used! The used models are: \n"
f"{self._cfg.models}"
)
length_path_list = []
length_goal_list = []
goal_distances_list = []
obs_loss_list = []
for model_dir in self._cfg.models:
print(f"[INFO] Running model {model_dir}!")
# switch model and update it in the config
self._agent.planner.switch_model(model_dir)
self._cfg_planner.model_dir = model_dir
# run for new model
self.run_single(show_plot=show_single_plot)
length_path_list.append(self.length_path)
length_goal_list.append(self.length_goal)
goal_distances_list.append(self.goal_distances)
obs_loss_list.append(self.loss_obstacles)
self.reset()
self.plt_comparison(
length_goal_list,
length_path_list,
goal_distances_list,
self._cfg.models,
self._cfg.save_dir,
obs_loss_list,
model_names=["VIPlanner", "iPlanner"],
)
return
def run(self, eval_dir: str, start_idx: int = 0) -> None:
# init camera buffers by rendering a first time (otherwise will stop simulation)
self.sim.play()
self._anymal.sensors_camera[self._agent.planner.cam_path["rgb"]].update(dt=0.0)
self._anymal.sensors_camera[self._agent.planner.cam_path["depth"]].update(dt=0.0)
self.sim.pause()
# iterate over all waypoints
for idx in range(self._nbr_paths - start_idx):
idx_curr = idx + start_idx
if self.use_waypoint_file:
next_goalpoint = self.waypoints["waypoints"][idx_curr]
else:
next_goalpoint = list(self.waypoints[idx_curr].values())[0]
# set new goal
self._agent.cube.set_world_pose(next_goalpoint)
# reset counter and flags
counter = 0
start_time = self.sim.current_time
past_robot_position = self._anymal.robot.data.root_pos_w.numpy()[0, :2].copy()
robot_position_time = self.sim.current_time
self.length_goal[idx_curr] = (
np.linalg.norm(past_robot_position - next_goalpoint[:2]) - self._agent.planner._cfg_vip.conv_dist
)
if self._cfg_anymal.follow_camera:
cam_follow_save_path = os.path.join(
eval_dir,
"eval_video",
self._cfg.handcrafted_waypoint_file + f"_waypoint{idx_curr}_of_{self.nbr_paths}"
if self.use_waypoint_file
else f"random_seed{self._cfg.seed}_pairs{self._cfg.num_pairs}_waypoint{idx_curr}_of_{self._nbr_paths}",
)
os.makedirs(cam_follow_save_path, exist_ok=True)
if self._cfg_anymal.rec_sensor:
sensor_save_paths = []
for sensor in self._anymal.sensors_camera.keys():
sensor_save_path = os.path.join(
eval_dir,
sensor,
self._cfg.handcrafted_waypoint_file + f"_waypoint{idx_curr}_of_{self.nbr_paths}"
if self.use_waypoint_file
else f"random_seed{self._cfg.seed}_pairs{self._cfg.num_pairs}_waypoint{idx_curr}_of_{self._nbr_paths}",
)
os.makedirs(sensor_save_path, exist_ok=True)
sensor_save_paths.append(sensor_save_path)
self.sim.play()
base_net_contact_force = self._anymal.base_contact.get_net_contact_forces(
clone=False, dt=self.sim.get_physics_dt()
)
if (base_net_contact_force > 0.0).any():
print(f"Waypoint {idx_curr}:\t Start Position Base collides, will discard waypoint!")
self.base_collision[idx_curr] = True
self.skip_waypoint[idx_curr] = True
self.sim_reset(idx_curr, next_goalpoint=next_goalpoint, eval_dir=eval_dir)
continue
knee_net_contact_force = self._anymal.knee_contact.get_net_contact_forces(
clone=False, dt=self.sim.get_physics_dt()
)
knee_net_contact_force = knee_net_contact_force.view(-1, 4, 3)
if (knee_net_contact_force > 0.0).any():
print(f"Waypoint {idx_curr}:\t Start Position Knee collides, will discard waypoint!")
self.knee_collision[idx_curr] = True
self.skip_waypoint[idx_curr] = True
self.sim_reset(idx_curr, next_goalpoint=next_goalpoint, eval_dir=eval_dir)
continue
# collect path
path = []
while True:
self.sim.step()
counter += 1
if self._agent.twist.goal_reached:
self.goal_reached[idx_curr] = True
self.walking_time[idx_curr] = self.sim.current_time - start_time
print(
f"Waypoint {idx_curr}:\t Goal reached within {self.walking_time[idx_curr]}s ({counter} steps)."
)
break
# check if robot is stuck and get path length
if (
self._anymal.robot.data.root_pos_w.numpy()[0, :2].round(decimals=1)
== past_robot_position.round(decimals=1)
).all():
if self.sim.current_time - robot_position_time > self._cfg.max_remain_time:
print(f"Waypoint {idx_curr}:\t Robot is stuck!")
break
else:
self.length_path[idx_curr] += np.linalg.norm(
self._anymal.robot.data.root_pos_w.numpy()[0, :2] - past_robot_position
)
past_robot_position = self._anymal.robot.data.root_pos_w.numpy()[0, :2].copy()
path.append(self._anymal.sensors_camera[self._agent.planner.cam_path["rgb"]]._compute_ros_pose()[0])
robot_position_time = self.sim.current_time
# contact forces
base_net_contact_force = self._anymal.base_contact.get_net_contact_forces(
clone=False, dt=self.sim.get_physics_dt()
)
if (base_net_contact_force > 0.0).any() and not self.base_collision[idx_curr]:
self.base_collision[idx_curr] = True
knee_net_contact_force = self._anymal.knee_contact.get_net_contact_forces(
clone=False, dt=self.sim.get_physics_dt()
)
knee_net_contact_force = knee_net_contact_force.view(-1, 4, 3)
if (knee_net_contact_force > 0.0).any() and not self.knee_collision[idx_curr]:
self.knee_collision[idx_curr] = True
# feet_net_contact_force = self._anymal.foot_contact.get_net_contact_forces(clone=False, dt=self.sim.get_physics_dt())
# feet_net_contact_force = feet_net_contact_force.view(-1, 4, 3)
# check for max time
if (self.sim.current_time - start_time) >= self._cfg.max_time:
print(f"Waypoint {idx_curr}:\t Goal NOT reached.")
break
# eval video
if self._cfg_anymal.follow_camera and counter % self._cfg_anymal.rec_frequency == 0:
# set to constant height and orientation
pos = (
tf.Rotation.from_quat(
convert_quat(self._anymal.robot.data.root_quat_w.clone().numpy()[0], "xyzw")
).as_matrix()
@ np.asarray(ANYMAL_FOLLOW.pos)
+ self._anymal.robot.data.root_pos_w.numpy()[0]
)
pos[2] = 1.7 # constant height
target = self._anymal.robot.data.root_pos_w.clone().numpy()[0]
extra_world_frame = tf.Rotation.from_quat(
convert_quat(self._anymal.robot.data.root_quat_w.clone().numpy()[0], "xyzw")
).as_matrix() @ np.array([1, 0, 0])
target += extra_world_frame
target[2] = 0.7 # constant height
self._anymal.follow_cam.set_world_pose_from_view(
pos,
target,
)
self._anymal.follow_cam.update(self._cfg_anymal.sim.dt)
# write image
cv2.imwrite(
os.path.join(cam_follow_save_path, "step" + f"{counter}".zfill(5) + ".png"),
cv2.cvtColor(self._anymal.follow_cam.data.output["rgb"], cv2.COLOR_BGR2RGB),
)
if self._cfg_anymal.rec_sensor and counter % self._cfg_anymal.rec_frequency == 0:
for idx, sensor in enumerate(self._anymal.sensors_camera.values()):
for data_type, data_array in sensor.data.output.items():
if data_array is None:
continue
if data_type == "rgb" or data_type == "semantic_segmentation":
if isinstance(data_array, dict):
# collect image and transfer it to viplanner color space
sem_image = data_array["data"]
sem_idToLabels = data_array["info"]["idToLabels"]
data_array = self._agent.planner.sem_color_transfer(sem_image, sem_idToLabels)
cv2.imwrite(
os.path.join(
sensor_save_paths[idx], data_type + "_step" + f"{counter}".zfill(5) + ".png"
),
cv2.cvtColor(data_array.astype(np.uint8), cv2.COLOR_BGR2RGB),
)
elif data_type == "distance_to_image_plane":
if isinstance(self._agent.planner.planner.train_config.data_cfg, list):
depth_scale = self._agent.planner.planner.train_config.data_cfg[0].depth_scale
else:
depth_scale = self._agent.planner.planner.train_config.data_cfg.depth_scale
cv2.imwrite(
os.path.join(
sensor_save_paths[idx], data_type + "_step" + f"{counter}".zfill(5) + ".png"
),
(data_array * depth_scale).astype(np.uint16),
)
# add current position to draw interface to show robot path
if counter % 100 == 0:
self.draw_interface.draw_points(
self._anymal.robot.data.root_pos_w.tolist(), [(1, 1, 1, 1)], [5] # white
)
# pause and reset anymal, planner, ...
self.sim.pause()
self.draw_interface.clear_points()
self.sim_reset(idx_curr, next_goalpoint, eval_dir, path)
# save intermediate results
if idx_curr % self._cfg.save_period == 0:
os.makedirs(os.path.join(eval_dir, f"pre_{idx_curr}"), exist_ok=True)
self.save_eval_arrays(eval_dir=os.path.join(eval_dir, f"pre_{idx_curr}"), suffix=f"_{idx_curr}")
if os.path.exists(os.path.join(eval_dir, f"pre_{int(idx_curr-self._cfg.save_period)}")):
shutil.rmtree(os.path.join(eval_dir, f"pre_{int(idx_curr-self._cfg.save_period)}"))
# save git if cam follower is activated
# if self._cfg_anymal.follow_camera and counter > self._cfg_anymal.rec_frequency:
# try:
# create_gif(
# cam_follow_save_path,
# gif_name=f"waypoint{idx_curr}",
# # speedup by factor of self._cfg_anymal.follow_camera_frequency
# duration=(self.sim.current_time - self._agent.planner.start_time) / counter,
# )
# except:
# carb.log_warn("Could not create gif!")
return
##
# Sim Setup and Reset
##
def setup(self) -> None:
# load scene to init simulation context
self.load_scene()
# get the simulationContext
self.sim: SimulationContext = SimulationContext().instance()
# load waypoints
self.setup_waypoints()
# create buffers
self.create_buffers()
# setup anymal
self.anymal_setup()
# post setup script
self.post_setup()
return
def anymal_setup(self) -> None:
print("Initializing ANYmal and setup callbacks ...")
# init anymal
self._anymal = ANYmal(self._cfg_anymal)
self._anymal.setup_sync()
# init anymal agent
self._agent_setup()
print("ANYmal initialized.")
return
def _agent_setup(self) -> None:
self._agent = VIPlannerANYmal(
cfg=self._cfg_anymal,
camera_sensors=self._anymal.sensors_camera,
robot=self._anymal.robot,
height_scanner=self._anymal.height_scanner,
ros_controller=False,
planner_cfg=self._cfg_planner,
)
self._agent.planner.set_planner_callback()
asyncio.ensure_future(self._agent.set_walk_callback())
# prevent local goal to be visible --> messes up semantic and depth images
self._agent.cube.set_visibility(False)
return
def _get_rot_to_point(self, start: list, end: list) -> tuple:
# set the initial rotation to point to the first waypoint
angle = np.arctan2(end[1] - start[1], end[0] - start[0])
rot_quat = tf.Rotation.from_euler("z", angle, degrees=False).as_quat()
return tuple(convert_quat(rot_quat, "wxyz").tolist())
def sim_reset(self, idx: int, next_goalpoint: np.array, eval_dir: str, path: List[torch.Tensor] = []) -> None:
# save distance depth camera to goal and if goal was within fov at the starting position
# NOTE: base position cannot be taken since the path is determined from the camera position which has an offset
cam_pos, _ = get_cam_pose(self._anymal.sensors_camera[self._agent.planner.cam_path["depth"]]._sensor_prim)
self.goal_within_fov[idx] = not self._agent.planner.goal_outside_fov
self.goal_distances[idx] = max(
[np.linalg.norm(next_goalpoint[:2] - cam_pos[:2]) - self._agent.planner._cfg_vip.conv_dist, 0.0]
)
if len(path) > 0:
straight_distance = np.linalg.norm(path[-1][:2] - path[0][:2])
self.path_extension[idx] = (self.length_path[idx] - straight_distance) / straight_distance
if self._use_cost_map:
self.loss_obstacles[idx] = self._get_cost_map_loss(np.vstack(path))
if self._cfg_anymal.rec_path:
np.save(os.path.join(eval_dir, f"waypoint{idx}_path.npy"), np.vstack(path))
# reset the robot to new start position if necessary
if not (self.goal_reached[idx] and self.use_waypoint_file) and idx + 1 < self._nbr_paths:
# move anymal to new start position
if self.use_waypoint_file:
next_goalpoint[2] = 1.0
self._anymal.robot.cfg.init_state.pos = tuple(next_goalpoint)
self._anymal.robot.cfg.init_state.rot = self._get_rot_to_point(
next_goalpoint, self.waypoints["waypoints"][idx + 1]
)
else:
self._anymal.robot.cfg.init_state.pos = list(self.waypoints[idx + 1].keys())[0]
self._anymal.robot.cfg.init_state.rot = self._get_rot_to_point(
np.array(list(self.waypoints[idx + 1].keys())[0]), list(self.waypoints[idx + 1].values())[0]
)
self._anymal.robot._process_info_cfg()
# reset robot
self._anymal._reset_robot()
# reset planner
self._agent.reset()
# reset pbar
self._agent.pbar.close()
self._agent._setup_pbar()
return
##
# Eval Stats
##
def _filter_statistics(self) -> None:
# remove skipped waypoints
print(f"Waypoint skipped {sum(self.skip_waypoint)} due to knee or base collision in start position.")
self.goal_reached = self.goal_reached[self.skip_waypoint == False]
self.goal_within_fov = self.goal_within_fov[self.skip_waypoint == False]
self.base_collision = self.base_collision[self.skip_waypoint == False]
self.knee_collision = self.knee_collision[self.skip_waypoint == False]
self.walking_time = self.walking_time[self.skip_waypoint == False]
self.goal_distances = self.goal_distances[self.skip_waypoint == False]
self.length_goal = self.length_goal[self.skip_waypoint == False]
self.length_path = self.length_path[self.skip_waypoint == False]
self.loss_obstacles = self.loss_obstacles[self.skip_waypoint == False]
self.path_extension = self.path_extension[self.skip_waypoint == False]
return
def eval_statistics(self) -> None:
# perform general eval stats
super().eval_statistics()
# further eval stats
within_fov_rate = sum(self.goal_within_fov) / len(self.goal_within_fov)
avg_time = (
sum(self.walking_time[self.goal_reached]) / len(self.walking_time[self.goal_reached])
if len(self.walking_time[self.goal_reached]) > 0
else np.inf
)
base_collision_rate = sum(self.base_collision) / len(self.base_collision)
knee_collision_rate = sum(self.knee_collision) / len(self.knee_collision)
print(
f"Avg time (success): {avg_time} \n"
f"Goal within FOV: {within_fov_rate} \n"
f"Base collision rate: {base_collision_rate} \n"
f"Knee collision rate: {knee_collision_rate}"
)
# extend eval stats
self.eval_stats["within_fov_rate"] = within_fov_rate
self.eval_stats["avg_time"] = avg_time
self.eval_stats["base_collision_rate"] = base_collision_rate
self.eval_stats["knee_collision_rate"] = knee_collision_rate
return
def save_eval_results(self) -> None:
save_name = self._cfg.handcrafted_waypoint_file if self.use_waypoint_file else self.get_env_name()
return super().save_eval_results(self._agent._planner_cfg.model_dir, save_name)
def get_save_prefix(self) -> str:
return (
self._cfg.handcrafted_waypoint_file
if self.use_waypoint_file
else self.get_env_name() + f"_seed{self._cfg.seed}_pairs{self._cfg.num_pairs}"
)
def save_eval_arrays(self, eval_dir: str, suffix: str = "") -> None:
subdirectories = [name for name in os.listdir(eval_dir) if os.path.isdir(os.path.join(eval_dir, name))]
pre_directories = [subdir for subdir in subdirectories if "pre" in subdir] if len(subdirectories) > 0 else []
if len(pre_directories) > 0:
[shutil.rmtree(os.path.join(eval_dir, pre)) for pre in pre_directories]
prefix: str = self.get_save_prefix()
np.save(os.path.join(eval_dir, prefix + f"_goal_reached{suffix}.npy"), self.goal_reached)
np.save(os.path.join(eval_dir, prefix + f"_goal_within_fov{suffix}.npy"), self.goal_within_fov)
np.save(os.path.join(eval_dir, prefix + f"_base_collision{suffix}.npy"), self.base_collision)
np.save(os.path.join(eval_dir, prefix + f"_knee_collision{suffix}.npy"), self.knee_collision)
np.save(os.path.join(eval_dir, prefix + f"_walking_time{suffix}.npy"), self.walking_time)
np.save(os.path.join(eval_dir, prefix + f"_goal_distances{suffix}.npy"), self.goal_distances)
np.save(os.path.join(eval_dir, prefix + f"_length_goal{suffix}.npy"), self.length_goal)
np.save(os.path.join(eval_dir, prefix + f"_length_path{suffix}.npy"), self.length_path)
np.save(os.path.join(eval_dir, prefix + f"_loss_obstacles{suffix}.npy"), self.loss_obstacles)
np.save(os.path.join(eval_dir, prefix + f"_skip_waypoint{suffix}.npy"), self.skip_waypoint)
np.save(os.path.join(eval_dir, prefix + f"_path_extension{suffix}.npy"), self.path_extension)
return
def load_eval_arrays(self, eval_dir: str, suffix: str = "") -> Tuple[bool, int]:
try:
self._load_eval_arrays(eval_dir, suffix)
self._filter_statistics()
return True, 0
except FileNotFoundError:
print(f"[INFO] No previous results found in {eval_dir}, search for preliminary results!")
subdirectories = [name for name in os.listdir(eval_dir) if os.path.isdir(os.path.join(eval_dir, name))]
pre_directories = [subdir for subdir in subdirectories if "pre" in subdir] if len(subdirectories) > 0 else []
if len(pre_directories) > 1:
raise ValueError(f"Multiple pre directories found {pre_directories}, please only keep the most recent one")
elif len(pre_directories) == 1:
try:
eval_dir = os.path.join(eval_dir, pre_directories[0])
idx = pre_directories[0][3:]
self._load_eval_arrays(eval_dir, idx)
print(f"[INFO] Found preliminary results in {eval_dir}, continue from {idx} waypoint!")
return False, int(idx[1:])
except FileNotFoundError:
print(f"[INFO] No preliminary results found in {eval_dir}, start from scratch!")
return False, 0
else:
print(f"[INFO] No preliminary results found in {eval_dir}, start from scratch!")
return False, 0
def _load_eval_arrays(self, eval_dir: str, suffix: str = "") -> None:
prefix: str = self.get_save_prefix()
self.goal_reached = np.load(os.path.join(eval_dir, prefix + f"_goal_reached{suffix}.npy"))
self.goal_within_fov = np.load(os.path.join(eval_dir, prefix + f"_goal_within_fov{suffix}.npy"))
self.base_collision = np.load(os.path.join(eval_dir, prefix + f"_base_collision{suffix}.npy"))
self.knee_collision = np.load(os.path.join(eval_dir, prefix + f"_knee_collision{suffix}.npy"))
self.walking_time = np.load(os.path.join(eval_dir, prefix + f"_walking_time{suffix}.npy"))
self.goal_distances = np.load(os.path.join(eval_dir, prefix + f"_goal_distances{suffix}.npy"))
self.length_goal = np.load(os.path.join(eval_dir, prefix + f"_length_goal{suffix}.npy"))
self.length_path = np.load(os.path.join(eval_dir, prefix + f"_length_path{suffix}.npy"))
self.loss_obstacles = np.load(os.path.join(eval_dir, prefix + f"_loss_obstacles{suffix}.npy"))
self.skip_waypoint = np.load(os.path.join(eval_dir, prefix + f"_skip_waypoint{suffix}.npy"))
self.path_extension = np.load(os.path.join(eval_dir, prefix + f"_path_extension{suffix}.npy"))
return
##
# Waypoint functions
##
def setup_waypoints(self) -> np.ndarray:
if self.use_waypoint_file:
print(f"Loading waypoints from {self._cfg.waypoint_file} ...", end=" ")
# load waypoints
self._load_waypoints()
# define start-points
start_point = self.waypoints["start"]
# set number of waypoint pairs
self.set_nbr_paths(len(self.waypoints["waypoints"]))
print("Waypoints loaded.")
else:
save_waypoint_path = os.path.join(
self._cfg.waypoint_dir,
f"explored_{self.get_env_name()}_seed{self._cfg.seed}_pairs{self._cfg.num_pairs}",
)
if self._cfg.use_existing_explored_waypoints and os.path.isfile(save_waypoint_path + ".pkl"):
print(f"[INFO] Loading explored waypoints from {save_waypoint_path} ...", end=" ")
with open(save_waypoint_path + ".pkl", "rb") as f:
self.waypoints = pickle.load(f)
print("Waypoints loaded.")
else:
print(
"[INFO] No waypoints specified. Using random exploration to select start-goals. Generating now ..."
)
sample_points, nn_idx, collision, distance = self.explore_env()
nbr_points = len(sample_points)
# get edge indices
idx_edge_start = np.repeat(np.arange(nbr_points), repeats=self._cfg.num_connections, axis=0)
idx_edge_end = nn_idx.reshape(-1)
# filter collision edges and distances
idx_edge_end = idx_edge_end[~collision.reshape(-1)]
idx_edge_start = idx_edge_start[~collision.reshape(-1)]
distance = distance[~collision.reshape(-1)]
# init graph
graph = nx.Graph()
# add nodes with position attributes
graph.add_nodes_from(list(range(nbr_points)))
pos_attr = {i: {"pos": sample_points[i]} for i in range(nbr_points)}
nx.set_node_attributes(graph, pos_attr)
# add edges with distance attributes
graph.add_edges_from(list(map(tuple, np.stack((idx_edge_start, idx_edge_end), axis=1))))
distance_attr = {
(i, j): {"distance": distance[idx]} for idx, (i, j) in enumerate(zip(idx_edge_start, idx_edge_end))
}
nx.set_edge_attributes(graph, distance_attr)
# get all shortest paths
odom_goal_distances = dict(
nx.all_pairs_dijkstra_path_length(graph, cutoff=self._cfg.max_goal_dist * 5, weight="distance")
)
# map distance to idx pairs
random.seed(self._cfg.seed)
distance_map = {}
for curr_distance in range(self._cfg.min_goal_dist, int(self._cfg.max_goal_dist)):
# get all nodes with a distance to the goal of curr_distance
pairs = []
for key, value in odom_goal_distances.items():
norm_distance = np.linalg.norm(sample_points[key] - sample_points[list(value.keys())], axis=1)
decisions = np.where(
np.logical_and(norm_distance >= curr_distance, norm_distance <= curr_distance + 1)
)[0]
if len(decisions) > 0:
entries = np.array(list(value.keys()))[decisions]
[pairs.append({key: entry}) for entry in entries]
# randomly select certain pairs
distance_map[curr_distance + 1] = random.sample(
pairs,
min(len(pairs), int(self._cfg.num_pairs / (self._cfg.max_goal_dist - self._cfg.min_goal_dist))),
)
waypoints_idx = []
for values in distance_map.values():
waypoints_idx.extend(values)
self.waypoints = []
for idxs in waypoints_idx:
self.waypoints.append(
{tuple(graph.nodes[list(idxs.keys())[0]]["pos"]): graph.nodes[list(idxs.values())[0]]["pos"]}
)
# save waypoints
os.makedirs(self._cfg.waypoint_dir, exist_ok=True)
if os.path.isfile(save_waypoint_path + ".pkl"):
print(f"[INFO] File already exists {save_waypoint_path}, will save new one with time!")
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open(save_waypoint_path + now + ".pkl", "wb") as fp:
pickle.dump(self.waypoints, fp)
else:
with open(save_waypoint_path + ".pkl", "wb") as fp:
pickle.dump(self.waypoints, fp)
# define start points
start_point = list(self.waypoints[0].keys())[0]
# define number of waypoints / paths
self.set_nbr_paths(len(self.waypoints))
print("Done.")
# set start position and spawn position for anymal
self._cfg_anymal.translation_x = start_point[0]
self._cfg_anymal.translation_y = start_point[1]
self._cfg_anymal.translation_z = 1.0 # start_point[2]
return start_point
def _load_waypoints(self) -> None:
"""
Expected that the waypoints have been recorded with the omni.isaac.waypoint extension and saved in .json format.
Structure of the json file:
{
start: [x, y, z],
end: [x, y, z],
waypoints: [[x, y, z], [x, y, z], ...]
}
"""
if self._cfg.waypoint_file.endswith(".json"):
self.waypoints = json.load(open(self._cfg.waypoint_file))
else:
self.waypoints = json.load(open(self._cfg.waypoint_file + ".json"))
# apply scale
self.waypoints["start"] = [x for x in self.waypoints["start"]]
self.waypoints["end"] = [x for x in self.waypoints["end"]]
self.waypoints["waypoints"] = [[x for x in waypoint] for waypoint in self.waypoints["waypoints"]]
# draw waypoints
self.draw_interface.draw_points([self.waypoints["start"]], [(1.0, 0.4, 0.0, 1.0)], [(10)]) # orange
self.draw_interface.draw_points([self.waypoints["end"]], [(0.0, 1.0, 0.0, 1.0)], [(10)]) # green
self.draw_interface.draw_points(
self.waypoints["waypoints"],
[(0.0, 0.0, 1.0, 1.0)] * len(self.waypoints["waypoints"]), # blue
[(10)] * len(self.waypoints["waypoints"]),
)
# attach end as further goal-point
self.waypoints["waypoints"].append(self.waypoints["end"])
return
# EoF
| 36,927 | Python | 44.144254 | 134 | 0.55542 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/walking_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Simulation configuration for the robot.
Note:
These are originally taken from the locomotion/velocity.py environment in Orbit.
"""
# python
import os
from dataclasses import dataclass, field
from typing import List
# orbit-assets
from omni.isaac.assets import ASSETS_DATA_DIR, ASSETS_RESOURCES_DIR
# orbit
from omni.isaac.orbit.robots.config.anymal import ANYMAL_B_CFG, ANYMAL_C_CFG
from omni.isaac.orbit.robots.legged_robot import LeggedRobotCfg
from omni.isaac.orbit.sensors.height_scanner import HeightScannerCfg
from omni.isaac.orbit.sensors.height_scanner.utils import create_points_from_grid
from omni.isaac.orbit.utils.configclass import configclass
# omni-isaac-anymal
from .controller_cfg import LocomotionRlControllerCfg
@dataclass
class ANYmalCfg:
"""Configuration for the walking extension."""
# simulator
sim: SimCfg = SimCfg()
viewer: ViewerCfg = ViewerCfg()
# scene
terrain: TerrainCfg = TerrainCfg()
# controller
rl_policy: LocomotionRlControllerCfg = LocomotionRlControllerCfg(
checkpoint_path=os.path.join(ASSETS_RESOURCES_DIR, "policy", "policy_obs_to_action_exp.pt"),
)
# robot
robot: List[LeggedRobotCfg] = field(default_factory=lambda: [ANYMAL_C_CFG, ANYMAL_C_CFG]) # ANYmal D not available
sensor: SensorCfg = SensorCfg()
height_scanner: HeightScannerCfg = HeightScannerCfg(
sensor_tick=0.0,
points=create_points_from_grid(size=(1.6, 1.0), resolution=0.1),
offset=(0.0, 0.0, 0.0),
direction=(0.0, 0.0, -1.0),
max_distance=1.0,
)
# translation and rotation
translation_x: float = 0.0
translation_y: float = 0.0
translation_z: float = 0.7
quat: tuple = (1.0, 0.0, 0.0, 0.0) # w,x,y,z
# prim path
prim_path: str = "/World/Anymal_c/Robot"
# ANYmal type
anymal_type: int = 0 # 0: ANYmal C, 1: ANYmal D
# record data for evaluation
follow_camera: bool = True
rec_frequency: int = 1 # nbr of sim.steps between two camera updates
rec_path: bool = True
rec_sensor: bool = True
# set functions
def _set_translation_x(self, value: list):
self.translation_x = value
def _set_translation_y(self, value: list):
self.translation_y = value
def _set_translation_z(self, value: list):
self.translation_z = value
def _set_prim_path(self, value: str):
self.prim_path = value
def _set_anymal_type(self, value: int):
self.anymal_type = value
# get functions
def get_translation(self):
return (self.translation_x, self.translation_y, self.translation_z)
# EoF
| 2,780 | Python | 27.96875 | 119 | 0.684173 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/vip_anymal.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import time
from typing import Dict, Optional
# omni
import carb
# python
import numpy as np
# omni-isaac-core
import omni.isaac.core.utils.prims as prim_utils
import open3d as o3d
# ROS
import rospy
import scipy.spatial.transform as tf
import torch
import torchvision.transforms as transforms
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Path
# omni-isaac-anymal
from omni.isaac.anymal.config import ANYmalCfg, VIPlannerCfg
from omni.isaac.anymal.utils import get_cam_pose
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.debug_draw import _debug_draw
from omni.isaac.orbit.robots.legged_robot import LeggedRobot
# omni-isaac-orbit
from omni.isaac.orbit.sensors.camera import Camera
from PIL import Image
from std_msgs.msg import Int16
# viplanner
from viplanner.config import TrainCfg, VIPlannerSemMetaHandler
from .vip_algo import VIPlannerAlgo
class VIPlanner:
"""
Visual Imperative Planner for Anymal
"""
debug: bool = False
def __init__(
self,
anymal_cfg: ANYmalCfg,
vip_cfg: VIPlannerCfg,
camera_sensors: Dict[str, Camera],
) -> None:
self._cfg_anymal: ANYmalCfg = anymal_cfg
self._cfg_vip: VIPlannerCfg = vip_cfg
self._camera_sensors = camera_sensors
# Simulation context
self.sim: SimulationContext = SimulationContext.instance()
# ANYmal model and camera paths
if self._cfg_vip.use_mount_cam:
self.cam_path: dict = self._cfg_vip.cam_path["mount"]
elif self._cfg_anymal.anymal_type == 0: # ANYmal C
self.cam_path: dict = self._cfg_vip.cam_path["ANYmal_C"]
elif self._cfg_anymal.anymal_type == 1: # ANYmal D
self.cam_path: dict = self._cfg_vip.cam_path["ANYmal_D"]
else:
raise ValueError(
f"ANYmal type {self._cfg_anymal.anymal_type} not supported!\n"
"Either select '0' for ANYmal_C and '1' for ANYmal_D"
)
if "rgb" in self.cam_path and "depth" in self.cam_path:
self.same_cam: bool = False if self.cam_path["rgb"] != self.cam_path["depth"] else True
# planner status
if self._cfg_vip.ros_pub:
self.planner_status = Int16()
self.planner_status.data = 0
# additional variables
self.fear: float = 0.0
self.traj_waypoints_np: np.ndarray = np.zeros(0)
self.traj_waypoints_odom: np.ndarray = np.zeros(0)
self._step = 0 # number of times the waypoint have been generated, used together with the frequency
self.distance_to_goal: float = 0.0
self.max_goal_distance: float = 0.0 + 1.0e-9 # to avoid division by zero
self.start_time: float = 0.0
##
# SETUP
##
# check for cuda
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# setup planner
self.planner = VIPlannerAlgo(self._cfg_vip.model_dir, self._cfg_vip.m2f_model_dir, self._cfg_vip.viplanner)
# check camera sensor
self._check_camera()
# setup goal transforms
self.goal_pos = prim_utils.get_prim_at_path(self._cfg_vip.goal_prim).GetAttribute("xformOp:translate")
self.goal_pos_prev = np.zeros(3) # previous goal position to check if goal has changed
# get field of view
self.alpha_fov: float = 0.0
self.get_fov()
# setup pixel array for warping of the semantic image (only if semantics activated)
self.pix_depth_cam_frame: np.ndarray = np.zeros(
(
self._camera_sensors[self.cam_path["depth"]].data.image_shape[0]
* self._camera_sensors[self.cam_path["depth"]].data.image_shape[1],
3,
)
)
if self.planner.train_config.sem or self.planner.train_config.rgb:
self._compute_pixel_tensor()
# get transforms for images
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize(self.planner.train_config.img_input_size),
]
)
# setup waypoint display in Isaac
self.draw = _debug_draw.acquire_debug_draw_interface()
self.point_list = [(1, 0, 0.5)] * self._cfg_vip.num_points_network_return
self.color = [(0.4, 1.0, 0.1, 1.0)] # green
self.color_fear = [(1.0, 0.4, 0.1, 1.0)] # red
self.color_path = [(1.0, 0.5, 0.0, 1.0)] # orange
self.size = [5.0]
# setup semantic meta data if carla is used
if self._cfg_vip.sem_origin == "isaac":
self.viplanner_sem_meta = VIPlannerSemMetaHandler()
# setup ROS
if self._cfg_vip.ros_pub:
self.path_pub = rospy.Publisher(self._cfg_vip.path_topic, Path, queue_size=10)
self.fear_path_pub = rospy.Publisher(self._cfg_vip.path_topic + "_fear", Path, queue_size=10)
self.status_pub = rospy.Publisher(self._cfg_vip.status_topic, Int16, queue_size=10)
# save image
if self._cfg_vip.save_images:
# Create annotator output directory
file_path = os.path.join(os.getcwd(), "_out_annot", "")
self.dir = os.path.dirname(file_path)
os.makedirs(self.dir, exist_ok=True)
# time
self.time_measurement: bool = False
self.time_collect: float = 0.0
self.time_save: float = 0.0
# flags
self.goal_outside_fov: bool = False
return
##
# Public Functions
##
def set_planner_callback(self, val: bool = True) -> None:
##
# Setup callbacks
##
if val:
self.sim.add_physics_callback("vip_callback", callback_fn=self._planner_callback)
else:
self.sim.remove_physics_callback("vip_callback")
return
def switch_model(self, model_dir: str, m2f_model_dir: Optional[str] = None) -> None:
if m2f_model_dir is None and self._cfg_vip.m2f_model_dir is not None:
m2f_model_dir = self._cfg_vip.m2f_model_dir
# delete previous model from GPU
if self.planner.cuda_avail:
del self.planner.net
# load new model
self.planner.load_model(model_dir, m2f_model_dir)
return
##
# Internal Functions
##
def _check_camera(self) -> None:
assert self._camera_sensors[self.cam_path["depth"]]._is_spawned, "Front Depth Camera not spawned!"
assert (
"distance_to_image_plane" in self._camera_sensors[self.cam_path["depth"]].cfg.data_types
), "Missing data_type 'distance_to_image_plane' for front depth camera"
if self.planner.train_config.sem or self.planner.train_config.rgb:
assert self._camera_sensors[self.cam_path["rgb"]]._is_spawned, "Front RGB Camera not spawned!"
assert (
"semantic_segmentation" in self._camera_sensors[self.cam_path["rgb"]].cfg.data_types
), "Missing data_type 'semantic_segmentation' for front camera"
if self._cfg_vip.rgb_debug:
assert (
"rgb" in self._camera_sensors[self.cam_path["rgb"]].cfg.data_types
), "Missing data_type 'rgb' for front RGB camera"
return
def _planner_callback(self, dt) -> None:
# only plan with given frequency
if self._step % self._cfg_vip.planner_freq == 0:
# reset step counter
self._step = 0
# compute
self._camera_sensors[self.cam_path["depth"]].update(dt)
if not self.same_cam and self.planner.train_config.sem:
if self._cfg_vip.sem_origin == "isaac":
# run complete update if carla
self._camera_sensors[self.cam_path["rgb"]].update(dt)
else:
# for matterport data will be written in camera by matterport callback, only update pose
(
self._camera_sensors[self.cam_path["rgb"]].data.position,
self._camera_sensors[self.cam_path["rgb"]].data.orientation,
) = self._camera_sensors[self.cam_path["rgb"]]._compute_ros_pose()
elif not self.same_cam and self.planner.train_config.rgb:
self._camera_sensors[self.cam_path["rgb"]].update(dt)
self._compute()
# increment step counter
self._step += 1
return
def _compute(self) -> None:
# get goal pos
goal = np.asarray(self.goal_pos.Get())
cam_pos, cam_rot_quat = get_cam_pose(self._camera_sensors[self.cam_path["depth"]]._sensor_prim)
cam_rot = tf.Rotation.from_quat(cam_rot_quat).as_matrix()
# check if goal already reached --> exit here
self.distance_to_goal = np.sqrt((goal[0] - cam_pos[0]) ** 2 + (goal[1] - cam_pos[1]) ** 2)
if self.distance_to_goal < self._cfg_vip.conv_dist:
carb.log_info("GOAL REACHED!")
# planner status -> Success
if self._cfg_vip.ros_pub and self.planner_status.data == 0:
self.planner_status.data = 1
self.status_pub.publish(self.planner_status)
return
elif self._cfg_vip.ros_pub:
self.planner_status.data = 0
self.status_pub.publish(self.planner_status)
carb.log_verbose(f"DISTANCE TO GOAL: {self.distance_to_goal}")
# if goal is too far away --> project on max_goal_distance circle around robot
if self.distance_to_goal > self.planner.max_goal_distance:
goal[:2] = cam_pos[:2] + (goal[:2] - cam_pos[:2]) / self.distance_to_goal * self.planner.max_goal_distance
# apply rotation to goal --> transform goal into camera frame
goal_cam_frame = goal - cam_pos
goal_cam_frame[2] = 0 # trained with z difference of 0
goal_cam_frame = goal_cam_frame @ cam_rot
goal_cam_frame = torch.tensor(goal_cam_frame, dtype=torch.float32, device=self.device).unsqueeze(0)
# check if goal pos has changed
if not np.all(goal == self.goal_pos_prev):
self.goal_pos_prev = goal
self.max_goal_distance = self.distance_to_goal
self.start_time = self.sim.current_time
self.is_reset = False
# check if goal is in fov
if abs(torch.atan2(goal_cam_frame[0, 1], goal_cam_frame[0, 0])) > self.alpha_fov / 2:
self.goal_outside_fov = True
else:
self.goal_outside_fov = False
carb.log_info(
f"New goal position: {goal} received in distance {self.distance_to_goal} (out FOV: {self.goal_outside_fov})"
)
print(
f"[VIPlanner INFO] New goal position: {goal} received in distance {self.distance_to_goal} (out FOV: {self.goal_outside_fov})"
)
start = time.time()
# Collect Groundtruth
depth_image = self._camera_sensors[self.cam_path["depth"]].data.output["distance_to_image_plane"]
depth_image[~np.isfinite(depth_image)] = 0 # set all inf or nan values to 0
depth_image[depth_image > self.planner.max_depth] = 0.0
depth_image_torch = self.transform(depth_image) # declare as new variable since reused in semantic warp
depth_image_torch = depth_image_torch.unsqueeze(0).to(self.device)
# time for collecting data
self.time_collect = time.time() - start
if self.planner.train_config.sem:
# check if semantics available
if self._cfg_vip.sem_origin not in ["isaac", "callback"]:
carb.log_error(
f"Unknown data source '{self._cfg_vip.sem_origin}'! Select either 'isaac' or 'callback'!"
)
return
if self._camera_sensors[self.cam_path["rgb"]].data.output["semantic_segmentation"] is None:
carb.log_warn("No semantic segmentation data available! No waypoint generated in this step!")
return
elif isinstance(self._camera_sensors[self.cam_path["rgb"]].data.output["semantic_segmentation"], dict) and [
label_class_dict["class"]
for label_class_dict in self._camera_sensors[self.cam_path["rgb"]]
.data.output["semantic_segmentation"]["info"]["idToLabels"]
.values()
] == ["BACKGROUND", "UNLABELLED"]:
carb.log_warn(
"Semantic data only of type BACKGROUND and UNLABELLED! No waypoint generated in this step!"
)
return
# handling for carla using orbit camera class to generate the data
sem_image: np.ndarray = np.zeros(
(
self._camera_sensors[self.cam_path["rgb"]].data.image_shape[1],
self._camera_sensors[self.cam_path["rgb"]].data.image_shape[0],
)
)
if self._cfg_vip.sem_origin == "isaac":
# collect image
sem_image = self._camera_sensors[self.cam_path["rgb"]].data.output["semantic_segmentation"]["data"]
sem_idToLabels = self._camera_sensors[self.cam_path["rgb"]].data.output["semantic_segmentation"][
"info"
]["idToLabels"]
sem_image = self.sem_color_transfer(sem_image, sem_idToLabels)
else:
sem_image = self._camera_sensors[self.cam_path["rgb"]].data.output["semantic_segmentation"]
# overlay semantic image on depth image
sem_image = self._get_overlay_semantics(sem_image, depth_image, depth_rot=cam_rot)
# move to tensor
sem_image = self.transform(sem_image.astype(np.uint8))
sem_image = sem_image.unsqueeze(0).to(self.device)
# update time
self.time_collect = time.time() - start
# run network
_, traj_waypoints, self.fear = self.planner.plan_dual(depth_image_torch, sem_image, goal_cam_frame)
elif self.planner.train_config.rgb:
if self._camera_sensors[self.cam_path["rgb"]].data.output["rgb"] is None:
carb.log_warn("No rgb data available! No waypoint generated in this step!")
return
rgb_image = self._camera_sensors[self.cam_path["rgb"]].data.output["rgb"]
# overlay semantic image on depth image
rgb_image = self._get_overlay_semantics(rgb_image, depth_image, depth_rot=cam_rot)
# apply mean and std normalization
rgb_image = (rgb_image - self.planner.pixel_mean) / self.planner.pixel_std
# move to tensor
rgb_image = self.transform(rgb_image)
rgb_image = rgb_image.unsqueeze(0).to(self.device)
# update time
self.time_collect = time.time() - start
# run network
_, traj_waypoints, self.fear = self.planner.plan_dual(depth_image_torch, rgb_image, goal_cam_frame)
else:
# run network
_, traj_waypoints, self.fear = self.planner.plan(depth_image_torch, goal_cam_frame)
self.traj_waypoints_np = traj_waypoints.cpu().detach().numpy().squeeze(0)
self.traj_waypoints_np = self.traj_waypoints_np[1:, :] # first twist command is zero --> remove it
# plot trajectory
self.traj_waypoints_odom = self.traj_waypoints_np @ cam_rot.T + cam_pos # get waypoints in world frame
self.draw.clear_lines()
if self.fear > self._cfg_vip.fear_threshold:
self.draw.draw_lines(
self.traj_waypoints_odom.tolist()[:-1],
self.traj_waypoints_odom.tolist()[1:],
self.color_fear * len(self.traj_waypoints_odom.tolist()[1:]),
self.size * len(self.traj_waypoints_odom.tolist()[1:]),
)
self.draw.draw_lines(
[cam_pos.tolist()],
[goal.tolist()],
self.color_fear,
[2.5],
)
else:
self.draw.draw_lines(
self.traj_waypoints_odom.tolist()[:-1],
self.traj_waypoints_odom.tolist()[1:],
self.color * len(self.traj_waypoints_odom.tolist()[1:]),
self.size * len(self.traj_waypoints_odom.tolist()[1:]),
)
self.draw.draw_lines(
[cam_pos.tolist()],
[goal.tolist()],
self.color_path,
[2.5],
)
if self._cfg_vip.ros_pub:
self._pub_path(waypoints=self.traj_waypoints_np)
else:
carb.log_info(f"New waypoints generated! \n {self.traj_waypoints_np}")
if self._cfg_vip.save_images:
start = time.time()
self._save_depth(depth_image, self.dir + "/depth_front_step_" + str(self._step))
self._save_rgb() if self._cfg_vip.rgb_debug else None
self.time_save = time.time() - start
if self.time_measurement:
print(f"Time collect: {self.time_collect} \t Time save: {self.time_save}")
return
def reset(self) -> None:
"""Reset the planner variables."""
self.fear: float = 0.0
self.traj_waypoints_np: np.ndarray = np.zeros(0)
self.traj_waypoints_odom: np.ndarray = np.zeros(0)
self._step = 0
self.goal_outside_fov: bool = False
self.goal_pos_prev: np.ndarray = np.zeros(3)
self.distance_to_goal: float = 0.0
self.max_goal_distance: float = 0.0 + 1.0e-9
self.start_time: float = 0.0
self.is_reset: bool = True
return
def _pub_path(self, waypoints: torch.Tensor) -> None:
path = Path()
fear_path = Path()
curr_time = rospy.Time.from_sec(self.sim.current_time)
for p in waypoints:
pose = PoseStamped()
pose.header.stamp = curr_time
pose.header.frame_id = "odom"
pose.pose.position.x = p[0]
pose.pose.position.y = p[1]
pose.pose.position.z = p[2]
path.poses.append(pose)
# add header
path.header.frame_id = fear_path.header.frame_id = "odom"
path.header.stamp = fear_path.header.stamp = curr_time
# publish fear path
# if self.is_fear_reaction:
# fear_path.poses = copy.deepcopy(path.poses)
# path.poses = path.poses[:1]
# publish path
# self.fear_path_pub.publish(fear_path)
self.path_pub.publish(path)
return
def get_fov(self) -> None:
# load intrinsics --> used to calculate fov
intrinsics = self._camera_sensors[self.cam_path["depth"]].data.intrinsic_matrix
self.alpha_fov = 2 * np.arctan(intrinsics[0, 0] / intrinsics[0, 2])
return
""" Helper to warp semantic image to depth image """
def _get_overlay_semantics(self, sem_img: np.ndarray, depth_img: np.ndarray, depth_rot: np.ndarray) -> np.ndarray:
# get semantic rotation matrix
sem_pos, sem_rot_quat = get_cam_pose(self._camera_sensors[self.cam_path["rgb"]]._sensor_prim)
sem_rot = tf.Rotation.from_quat(sem_rot_quat).as_matrix()
sem_rot = sem_rot.astype(np.float64)
depth_rot = depth_rot.astype(np.float64)
# project depth pixels into 3d space
# dep_im_reshaped = depth_img.reshape(-1, 1)
dep_im_reshaped = depth_img.reshape(-1, 1)
points = (
dep_im_reshaped * (depth_rot @ self.pix_depth_cam_frame.T).T
+ self._camera_sensors[self.cam_path["depth"]].data.position
)
# transform points to semantic camera frame
points_sem_cam_frame = (sem_rot.T @ (points - sem_pos).T).T
# normalize points
points_sem_cam_frame_norm = points_sem_cam_frame / points_sem_cam_frame[:, 0][:, np.newaxis]
# reorder points be camera convention (z-forward)
points_sem_cam_frame_norm = points_sem_cam_frame_norm[:, [1, 2, 0]] * np.array([-1, -1, 1])
# transform points to pixel coordinates
pixels = (self._camera_sensors[self.cam_path["rgb"]].data.intrinsic_matrix @ points_sem_cam_frame_norm.T).T
# filter points outside of image
filter_idx = (
(pixels[:, 0] >= 0)
& (pixels[:, 0] < sem_img.shape[1])
& (pixels[:, 1] >= 0)
& (pixels[:, 1] < sem_img.shape[0])
)
# get semantic annotation
sem_annotation = np.zeros((pixels.shape[0], 3), dtype=np.uint16)
sem_annotation[filter_idx] = sem_img[pixels[filter_idx, 1].astype(int), pixels[filter_idx, 0].astype(int)]
# reshape to image
sem_img_warped = sem_annotation.reshape(depth_img.shape[0], depth_img.shape[1], 3)
# DEBUG
if self.debug:
import matplotlib.pyplot as plt
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.imshow(depth_img)
ax2.imshow(sem_img_warped / 255)
ax3.imshow(depth_img)
ax3.imshow(sem_img_warped / 255, alpha=0.5)
plt.show()
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
o3d.visualization.draw_geometries([pcd])
return sem_img_warped
"""Semantic Image Color Transfer"""
def sem_color_transfer(self, sem_image: np.ndarray, sem_idToLabels: dict) -> np.ndarray:
"""Convert semantic segmentation image to viplanner color space
Args:
sem_image (np.ndarray): sem_image as received by the simulation
sem_idToLabels (dict): information about which class is which index in sem_image
Returns:
np.ndarray: sem_image in viplanner color space
"""
if not sem_idToLabels:
carb.log_warn("No semantic segmentation data available! No waypoint generated in this step!")
return
for k, v in sem_idToLabels.items():
if not dict(v):
sem_idToLabels[k] = {"class": "static"}
elif "BACKGROUND" == v["class"]:
sem_idToLabels[k] = {"class": "static"}
elif "UNLABELLED" == v["class"]:
sem_idToLabels[k] = {"class": "static"}
# color mapping
sem_idToColor = np.array(
[
[
int(k),
self.viplanner_sem_meta.class_color[v["class"]][0],
self.viplanner_sem_meta.class_color[v["class"]][1],
self.viplanner_sem_meta.class_color[v["class"]][2],
]
for k, v in sem_idToLabels.items()
]
)
# order colors by their id and necessary to account for missing indices (not guaranteed to be consecutive)
sem_idToColorMap = np.zeros((max(sem_idToColor[:, 0]) + 1, 3), dtype=np.uint8)
for cls_color in sem_idToColor:
sem_idToColorMap[cls_color[0]] = cls_color[1:]
# colorize semantic image
try:
sem_image = sem_idToColorMap[sem_image.reshape(-1)].reshape(sem_image.shape + (3,))
except IndexError:
print("IndexError: Semantic image contains unknown labels")
return
return sem_image
# EoF
| 23,546 | Python | 39.951304 | 141 | 0.578739 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/extension_scripts/vip_config.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from dataclasses import MISSING
import numpy as np
# orbit-assets
from omni.isaac.assets import ASSETS_RESOURCES_DIR
# omni-isaac-orbit
from omni.isaac.orbit.utils.configclass import configclass
@configclass
class TwistControllerCfg:
lookAheadDistance: float = 0.5
minPointsWithinLookAhead: int = 3
two_way_drive: bool = False
switch_time_threshold: float = 1.0
maxSpeed: float = 0.5
maxAccel: float = 2.5 / 100.0 # 2.5 / 100
joyYaw: float = 1.0
yawRateGain: float = 7.0 # 3.5
stopYawRateGain: float = 7.0 # 3.5
maxYawRate: float = 90.0 * np.pi / 360
dirDiffThre: float = 0.7
stopDisThre: float = 0.4
slowDwnDisThre: float = 0.3
slowRate1: float = 0.25
slowRate2: float = 0.5
noRotAtGoal: bool = True
autonomyMode: bool = False
# extra functionality
stuck_time_threshold: float = 2.0
stuck_avoidance_duration: int = 30 # number of steps stuck avoidance twist will be executed
@configclass
class VIPlannerCfg:
"""Configuration for the ROS publishing for Waypoint Follower and VIPlanner (ROS)."""
viplanner: bool = True
"""Use VIPlanner or iPlanner"""
model_dir: str = os.path.join(
ASSETS_RESOURCES_DIR,
"vip_models/plannernet_env2azQ1b91cZZ_new_colorspace_ep100_inputDepSem_costSem_optimSGD_new_colorspace_sharpend_indoor",
)
"""Path to the model directory (expects a model.pt and model.yaml file in the directory)."""
sem_origin: str = (
"isaac" # "isaac" or "callback (in case the semantics cannot be generated in isaac e.g. matterport)"
)
"""Data source of the environment --> important for color mapping of the semantic segmentation"""
m2f_model_dir: str = os.path.join(ASSETS_RESOURCES_DIR, "vip_models", "m2f_models")
"""Path to mask2former model for direct RGB input (directly including config file and model weights)"""
planner_freq: int = 20
"""Frequency of the planner in Hz."""
goal_prim: str = "/World/waypoint"
"""The prim path of the cube in the USD stage"""
cam_path: dict = {
"ANYmal_C": {"rgb": "front_depth", "depth": "front_depth"},
"ANYmal_D": {"rgb": "front_rgb", "depth": "front_depth"},
"mount": {"rgb": "viplanner_rgb", "depth": "viplanner_depth"},
}
use_mount_cam: bool = False
"""Camera Path names as defined in config.sensor_cfg that should be used to render the network inputs"""
rgb_debug: bool = False
"""Save RGB images together with depth (mainly for debug reasons)."""
num_points_network_return: int = 51
"""Number of points the network returns."""
conv_dist: float = 0.5
"""Distance to the goal to save that it has been reached successfully"""
obs_loss_threshold: float = 0.3
"""Obstacle threshold to consider a path as successful"""
path_topic: str = "/path"
"""Topic to publish the path."""
status_topic: str = "/status"
"""Topic to publish the planner status."""
save_images: bool = False
"""Save depth images to disk."""
ros_pub: bool = False
"""Publish the path and status to ROS (only needed for VIPlanner ROS)."""
look_back_factor: int = 15
"""Look back factor for the path."""
fear_threshold: float = 0.5
# twist controller config
twist_controller_cfg: TwistControllerCfg = TwistControllerCfg()
# EoF
| 3,516 | Python | 34.887755 | 128 | 0.667235 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/matterport_exploration/sampler_config.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Config for Exploration/ Data Sampling in Matterport3D Dataset
"""
# python
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
@dataclass
class SamplerCfg:
points_per_m2: int = 20
"""Number of random points per m2 of the mesh surface area."""
device = "cuda" if torch.cuda.is_available() else "cpu"
"""Device to use for computations."""
height: float = 0.5
"""Height to use for the random points."""
min_height: float = 0.2
"""Maximum height to be considered an accessible point for the robot"""
ground_height: float = -0.1
"""Height of the ground plane"""
min_wall_distance: float = 0.5
"""Minimum distance to a wall to be considered an accessible point for the robot"""
x_angle_range: Tuple[float, float] = (-2.5, 2.5)
y_angle_range: Tuple[float, float] = (-2, 5) # negative angle means in isaac convention: look down
# ANYmal D: (-2, 5) <-> ANYmal C: (25, 35)
# NOTE: the angles should follow the isaac convention, i.e. x-forward, y-left, z-up
"""Range of the x and y angle of the camera (in degrees), will be randomly selected according to a uniform distribution"""
min_hit_rate: float = 0.8
"""Don't use a point if the hit rate is below this value"""
min_avg_hit_distance: float = 0.5
"""Don't use a point if the max hit distance is below this value"""
min_std_hit_distance: float = 0.5
"""Don't use a point if the std hit distance is below this value"""
conv_rate: float = 0.9
"""Rate of faces that are covered by three different images, used to terminate the exploration"""
# DEPTH CAMERA
cam_depth_prim: str = "/cam_depth"
cam_depth_resolution: Tuple[int, int] = (848, 480) # (width, height)
cam_depth_focal_length: float = 1.93 # in mm
# ANYmal D wide_angle_camera: 1.0 <-> ANYmal C realsense: 1.93 <-> RealSense D455: 1.93
cam_depth_clipping_range: Tuple[float, float] = (0.01, 1000.0)
cam_depth_aperture: float = 3.8 # in mm
cam_depth_intrinsics: Optional[Tuple[float]] = (430.31607, 0.0, 428.28408, 0.0, 430.31607, 244.00695, 0.0, 0.0, 1.0)
# ANYmal C/D: (423.54608, 0.0, 427.69815, 0.0, 423.54608, 240.17773, 0.0, 0.0, 1.0)
# RealSense D455: (430.31607, 0.0, 428.28408, 0.0, 430.31607, 244.00695, 0.0, 0.0, 1.0)
# NOTE: either provide the aperture or the camera matrix (if both, the camera matrix will be used)
"""Depth camera configuration"""
tf_pos: tuple = (0.0, 0.0, 0.0) # (translation in depth frame)
# ANYmal D: (-0.002, 0.025, 0.042) <-> ANYmal C and RealSense D455: (0.0, 0.0, 0.0)
tf_quat: tuple = (0.0, 0.0, 0.0, 1.0) # xyzw quaternion format (rotation in depth frame)
# ANYmal D: (0.001, 0.137, -0.000, 0.991) <-> ANYmal C and RealSense D455: (0.0, 0.0, 0.0, 1.0)
tf_quat_convention: str = "roll-pitch-yaw" # or "isaac"
# NOTE: if the quat follows the roll-pitch-yaw convention, i.e. x-forward, y-right, z-down, will be converted to the isaac convention
"""transformation from depth (src) to semantic camera (target)"""
# SEMANTIC CAMERA
cam_sem_prim: str = "/cam_sem"
cam_sem_resolution: Tuple[int, int] = (1280, 720)
# ANYmal D wide_angle_camera: (1440, 1080) <-> ANYmal C realsense (848, 480) <-> RealSense D455 (1280, 720)
cam_sem_focal_length: float = 1.93 # in mm (for ANYmal C100 - https://anymal-research.docs.anymal.com/user_manual/anymal_c100/release-23.02/documents/anymal_c_hardware_guide/main.html?highlight=wide+angle#achg-sssec-wide-angle-cameras)
# ANYmal D wide_angle_camera: 1.93 <-> ANYmal C realsense: 1.0 <-> RealSense D455: 1.93
cam_sem_clipping_range: Tuple[float, float] = (0.01, 1000.0)
cam_sem_aperture: float = 3.8 # in mm
cam_sem_intrinsics: Optional[Tuple[float]] = (644.15496, 0.0, 639.53125, 0.0, 643.49212, 366.30880, 0.0, 0.0, 1.0)
# ANYmal D wide_angle_camera: (575.60504, 0.0, 745.73121, 0.0, 578.56484, 519.52070, 0.0, 0.0, 1.0)
# ANYmal C realsense: (423.54608, 0.0, 427.69815, 0.0, 423.54608, 240.17773, 0.0, 0.0, 1.0)
# RealSense D455: (644.15496, 0.0, 639.53125, 0.0, 643.49212, 366.30880, 0.0, 0.0, 1.0)
# NOTE: either provide the aperture or the camera matrix (if both, the camera matrix will be used)
"""Semantic camera configuration"""
cam_sem_rgb: bool = True
"""Whether to record rgb images or not"""
# SAVING
max_images: int = 2000
"""Maximum number of images recorded"""
save_path: str = "/home/pascal/viplanner/imperative_learning/data"
suffix: Optional[str] = "cam_mount"
"""Path to save the data to (directly with env name will be created)"""
# EoF
| 4,821 | Python | 51.413043 | 240 | 0.651525 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/matterport_exploration/random_exploration.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import random
import time
from typing import Tuple
import carb
import numpy as np
# omni
import omni
import scipy.spatial.transform as tf
import torch
from omni.isaac.matterport.config import SamplerCfg
# omni-isaac-matterport
from omni.isaac.matterport.semantics import MatterportWarp
# omni-isaac-orbit
from omni.isaac.orbit.sensors.camera import Camera, PinholeCameraCfg
from scipy.spatial import KDTree
# python
from scipy.stats import qmc
class RandomExplorer:
debug = False
time_measurement: bool = False
def __init__(self, domains: MatterportWarp, cfg: SamplerCfg = None):
# config
self._cfg_explorer = SamplerCfg() if cfg is None else cfg
# domains
self.domains: MatterportWarp = domains
# setup cameras and writer
ply_file = os.path.split(self.domains._cfg.import_file_ply)[1]
if self._cfg_explorer.suffix is not None and isinstance(self._cfg_explorer.suffix, str):
suffix = "_" + self._cfg_explorer.suffix
else:
suffix = ""
self.save_path = os.path.join(self._cfg_explorer.save_path, ply_file[:-4] + suffix)
# get camera centers
self.nbr_points: int = 0
self.camera_centers: torch.Tensor = torch.empty((0, 3), dtype=torch.float32)
# get_variations
self.x_angles: np.ndarray = np.zeros(self.nbr_points)
self.y_angles: np.ndarray = np.zeros(self.nbr_points)
self.z_angles: np.ndarray = np.zeros(self.nbr_points)
# setup conv
self.nbr_faces: int = 0
self.face_idx: torch.Tensor = torch.zeros((0,), dtype=torch.int64)
self.conv_crit = True
self.pt_idx = 0
self.save_idx = 0
# time measurements
self.time_raycast: float = 0.0
self.time_render: float = 0.0
self.time_save: float = 0.0
self.time_total: float = 0.0
return
##
# Public Function
##
def setup(self) -> None:
self._setup_cameras()
self.domains.init_save(save_path=self.save_path)
self._get_sample_points()
self._get_view_variations()
self._setup_conv()
return
def explore(self) -> None:
# get cam_data
cam_data_depth = self.domains.cameras[0]
cam_data_sem = self.domains.cameras[1]
# start sim if RGB images should be rendered
if self._cfg_explorer.cam_sem_rgb:
self.domains.sim.play()
while self.conv_crit:
total_start = time.time()
# get current variation in camera position and rotation of the semantic camera
# rotations follow the Isaac convention: x-forward, y-left, z-up
cam_data_sem.pos = self.camera_centers[self.pt_idx]
cam_rot_sem_from_odom = np.array(
[self.x_angles[self.pt_idx], self.y_angles[self.pt_idx], int(self.z_angles[self.pt_idx])]
)
cam_rot_sem_from_odom_mat = tf.Rotation.from_euler("xyz", cam_rot_sem_from_odom, degrees=True).as_matrix()
cam_data_sem.rot = torch.tensor(cam_rot_sem_from_odom_mat, dtype=torch.float32)
carb.log_verbose(f"Point: {self.pt_idx} \tsem camera pose: {cam_data_sem.pos} {cam_rot_sem_from_odom}")
# get depth camera rotation relative to the semantic camera rotation and convert it to Isaac convention
# Isaac Convention: x-forward, y-left, z-up
if self._cfg_explorer.tf_quat_convention == "isaac":
cam_rot_sem_from_depth = tf.Rotation.from_quat(self._cfg_explorer.tf_quat).as_matrix()
elif self._cfg_explorer.tf_quat_convention == "roll-pitch-yaw":
cam_rot_sem_from_depth = tf.Rotation.from_quat(self._cfg_explorer.tf_quat).as_euler("XYZ", degrees=True)
cam_rot_sem_from_depth[[1, 2]] *= -1
cam_rot_sem_from_depth = tf.Rotation.from_euler("XYZ", cam_rot_sem_from_depth, degrees=True).as_matrix()
else:
raise ValueError(f"tf_quat_convention {self._cfg_explorer.tf_quat_convention} not supported")
# get depth camera pose and rotation from the semantic camera pose and rotation
cam_rot_depth_from_odom = np.matmul(cam_rot_sem_from_odom_mat, cam_rot_sem_from_depth.T)
cam_data_depth.rot = torch.tensor(cam_rot_depth_from_odom, dtype=torch.float32)
vec_depth_to_sem_odom_frame = np.matmul(cam_rot_depth_from_odom, self._cfg_explorer.tf_pos)
cam_data_depth.pos = cam_data_sem.pos - torch.tensor(vec_depth_to_sem_odom_frame, dtype=torch.float32)
# do raycasting
start = time.time()
hit_rate = 1.0
for cam_data in self.domains.cameras:
# get new ray directions in world frame
cam_data.ray_directions = self.domains._get_ray_directions(
cam_data.pos, cam_data.rot, cam_data.pixel_coords
)
# raycast
cam_data.ray_hit_coords, cam_data.ray_face_indices, cam_data.ray_distances = self.domains._raycast(
cam_data.pos.repeat(len(cam_data.ray_directions)),
cam_data.ray_directions,
cam_rot=cam_data.rot,
pix_offset=cam_data.pixel_offset,
)
# filter inf values
hit_rate_single_cam = torch.isfinite(cam_data.ray_distances).sum() / len(cam_data.ray_distances)
hit_rate = min(hit_rate, hit_rate_single_cam)
carb.log_verbose(f"Point: {self.pt_idx} \tRate of rays hitting the mesh: {hit_rate_single_cam}")
cam_data.ray_hit_coords[~torch.isfinite(cam_data.ray_hit_coords)] = 0
cam_data.ray_distances[~torch.isfinite(cam_data.ray_distances)] = 0
self.time_raycast = time.time() - start
# filter points with insufficient hit rate and too small min distance (use the semantic camera)
if hit_rate < self._cfg_explorer.min_hit_rate:
print(f"Point: {self.pt_idx} \trejected due to insufficient hit rate")
self.pt_idx += 1
continue
elif torch.mean(cam_data_sem.ray_distances) < self._cfg_explorer.min_avg_hit_distance:
print(f"Point: {self.pt_idx} \trejected due to too small average hit distance")
self.pt_idx += 1
continue
elif torch.std(cam_data_sem.ray_distances) < self._cfg_explorer.min_std_hit_distance:
print(f"Point: {self.pt_idx} \trejected due to too small standard deviation of hit distance")
self.pt_idx += 1
continue
# DEBUG
if self.debug:
# set camera to the random selected pose
self.domains.draw.clear_points()
self.domains.draw.draw_points(
random.choices(cam_data_sem.ray_hit_coords.cpu().tolist(), k=5000),
self.domains.colors_2,
self.domains.sizes,
)
self.domains.draw.draw_points(
random.choices(cam_data_sem.pixel_coords.cpu().tolist(), k=5000),
self.domains.colors_3,
self.domains.sizes,
)
# render and save data
for idx, cam_data in enumerate(self.domains.cameras):
start = time.time()
self.domains._render(cam_data)
self.time_render = time.time() - start
if cam_data.visualize:
start = time.time()
self.domains._update_visualization(cam_data)
self.time_visualize = time.time() - start
start = time.time()
self.domains._save_data(cam_data, self.save_idx, cam_idx=idx)
self.time_save = time.time() - start
# DEBUG
if self.debug:
import matplotlib.pyplot as plt
_, axs = plt.subplots(1, 2, figsize=(15, 5))
axs[0].imshow(cam_data_depth.render_depth)
axs[1].imshow(cam_data_sem.render_sem)
plt.show()
# check convergence according to semantic camera
ray_face_filtered = cam_data_sem.ray_face_indices[cam_data_sem.ray_face_indices != -1]
self.face_idx[ray_face_filtered.long().cpu()] += 1
conv_face = torch.sum(self.face_idx > 2)
conv_rate = conv_face / self.nbr_faces
if conv_rate > self._cfg_explorer.conv_rate or self.save_idx > self._cfg_explorer.max_images:
self.conv_crit = False
self.time_total = time.time() - total_start
# Update messages
face1_count = torch.sum(self.face_idx >= 1).item()
print(
f"Point: {self.pt_idx} \t Save Idx: {self.save_idx} \t Faces 1: {face1_count} <=> {(round(float(face1_count / self.nbr_faces * 100), 6))} (%)"
f"\t Faces 3: {conv_face} <=> {(round(float(conv_rate*100), 6))} (%) \t in {self.time_total}s"
)
if self.time_measurement:
print(
f"Raycast: {self.time_raycast} \t Render: {self.time_render} \t Visualize: {self.time_visualize}"
f"\t Save: {self.time_save} \n Overall: {self.time_total}"
)
# update index
self.pt_idx += 1
self.save_idx += 1
if self.pt_idx >= self.nbr_points - 1:
self.conv_crit = False
print(
f"All points have been sampled, currently {self.save_idx} points saved. If more points are "
f"needed, increase the number of points per m2"
)
self.domains._end_save()
return
##
# Helper Sample Points
##
def _setup_cameras(self) -> None:
"""Setup the cameras for the exploration."""
stage = omni.usd.get_context().get_stage()
# depth camera
if self._cfg_explorer.cam_depth_intrinsics is not None:
intrinscis = np.array(self._cfg_explorer.cam_depth_intrinsics).reshape(3, 3)
horizontalAperture = (
self._cfg_explorer.cam_depth_resolution[0]
* self._cfg_explorer.cam_depth_focal_length
/ intrinscis[0, 0]
)
else:
horizontalAperture = self._cfg_explorer.cam_depth_aperture
depth_cam_prim = stage.DefinePrim(self._cfg_explorer.cam_depth_prim, "Camera")
depth_cam_prim.GetAttribute("focalLength").Set(self._cfg_explorer.cam_depth_focal_length) # set focal length
depth_cam_prim.GetAttribute("clippingRange").Set(
self._cfg_explorer.cam_depth_clipping_range
) # set clipping range
depth_cam_prim.GetAttribute("horizontalAperture").Set(horizontalAperture) # set aperture
self.domains.register_camera(
depth_cam_prim,
self._cfg_explorer.cam_depth_resolution[0],
self._cfg_explorer.cam_depth_resolution[1],
depth=True,
visualization=self.debug,
)
# semantic and rgb camera
if self._cfg_explorer.cam_sem_intrinsics is not None:
intrinscis = np.array(self._cfg_explorer.cam_sem_intrinsics).reshape(3, 3)
horizontalAperture = (
self._cfg_explorer.cam_sem_resolution[0] * self._cfg_explorer.cam_sem_focal_length / intrinscis[0, 0]
)
else:
horizontalAperture = self._cfg_explorer.cam_sem_aperture
sem_cam_prim = stage.DefinePrim(self._cfg_explorer.cam_sem_prim, "Camera")
sem_cam_prim.GetAttribute("focalLength").Set(self._cfg_explorer.cam_sem_focal_length) # set focal length
sem_cam_prim.GetAttribute("horizontalAperture").Set(horizontalAperture) # set aperture
sem_cam_prim.GetAttribute("clippingRange").Set(self._cfg_explorer.cam_sem_clipping_range) # set clipping range
if self._cfg_explorer.cam_sem_rgb:
orbit_cam_cfg = PinholeCameraCfg(
width=self._cfg_explorer.cam_sem_resolution[0],
height=self._cfg_explorer.cam_sem_resolution[1],
)
orbit_cam_cfg.usd_params.clipping_range = self._cfg_explorer.cam_sem_clipping_range
orbit_cam_cfg.usd_params.focal_length = self._cfg_explorer.cam_sem_focal_length
orbit_cam_cfg.usd_params.horizontal_aperture = horizontalAperture
orbit_cam = Camera(orbit_cam_cfg)
orbit_cam.spawn(self._cfg_explorer.cam_sem_prim + "_rgb")
orbit_cam.initialize()
else:
orbit_cam = None
self.domains.register_camera(
sem_cam_prim,
self._cfg_explorer.cam_sem_resolution[0],
self._cfg_explorer.cam_sem_resolution[1],
semantics=True,
rgb=self._cfg_explorer.cam_sem_rgb,
visualization=self.debug,
omni_cam=orbit_cam,
)
return
def _get_sample_points(self) -> None:
# get min, max of the mesh in the xy plane
x_min = self.domains.mesh.bounds[0][0]
x_max = self.domains.mesh.bounds[1][0]
y_min = self.domains.mesh.bounds[0][1]
y_max = self.domains.mesh.bounds[1][1]
max_area = (x_max - x_min) * (y_max - y_min)
# init sampler as qmc
sampler = qmc.Halton(d=2, scramble=False)
# determine number of samples to dram
nbr_points = int(max_area * self._cfg_explorer.points_per_m2)
# get raw samples origins
points = sampler.random(nbr_points)
points = qmc.scale(points, [x_min, y_min], [x_max, y_max])
heights = np.ones((nbr_points, 1)) * self._cfg_explorer.height
ray_origins = torch.from_numpy(np.hstack((points, heights)))
ray_origins = ray_origins.type(torch.float32)
# get ray directions in negative z direction
ray_directions = torch.zeros((nbr_points, 3), dtype=torch.float32)
ray_directions[:, 2] = -1.0
# raycast
ray_hits_world_down, _, _ = self.domains._raycast(
ray_origins * torch.tensor([1, 1, 2]), # include objects above the robot
ray_directions,
cam_rot=torch.eye(3),
pix_offset=torch.zeros_like(ray_origins),
)
z_depth = torch.abs(ray_hits_world_down[:, 2] - ray_origins[:, 2] * 2)
# filter points outside the mesh and within walls
filter_inside_mesh = torch.isfinite(z_depth) # outside mesh
filter_inside_mesh[
ray_hits_world_down[:, 2] < self._cfg_explorer.ground_height
] = False # above holes in the ground
print(f"filtered {nbr_points - filter_inside_mesh.sum()} points outside of mesh")
filter_outside_wall = z_depth > (self._cfg_explorer.min_height + ray_origins[:, 2])
print(f"filtered {nbr_points - filter_outside_wall.sum()} points inside wall")
filter_combined = torch.all(torch.stack((filter_inside_mesh, filter_outside_wall), dim=1), dim=1)
print(f"filtered total of {round(float((1 - filter_combined.sum() / nbr_points) * 100), 4)} % of points")
if self.debug:
import copy
import open3d as o3d
o3d_mesh = self.domains.mesh.as_open3d
o3d_mesh.compute_vertex_normals()
odom_vis_list = [o3d_mesh]
small_sphere = o3d.geometry.TriangleMesh.create_sphere(0.05) # successful trajectory points
camera_centers = ray_origins.cpu().numpy()
for idx, camera_center in enumerate(camera_centers):
if filter_combined[idx]:
small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) # green
else:
small_sphere.paint_uniform_color([1.0, 0.1, 0.1]) # red
odom_vis_list.append(
copy.deepcopy(small_sphere).translate((camera_center[0], camera_center[1], camera_center[2]))
)
o3d.visualization.draw_geometries(odom_vis_list)
self.camera_centers = ray_origins[filter_combined].type(torch.float32)
# free gpu memory
ray_origins = filter_combined = filter_inside_mesh = filter_outside_wall = z_depth = ray_hits_world_down = None
# enforce a minimum distance to the walls
angles = np.linspace(-np.pi, np.pi, 20)
ray_directions = tf.Rotation.from_euler("z", angles, degrees=False).as_matrix() @ np.array([1, 0, 0])
ray_hit = []
for ray_direction in ray_directions:
ray_direction_torch = (
torch.from_numpy(ray_direction).repeat(self.camera_centers.shape[0], 1).type(torch.float32)
)
ray_hits_world, _, _ = self.domains._raycast(
self.camera_centers,
ray_direction_torch,
cam_rot=torch.eye(3),
pix_offset=torch.zeros_like(ray_direction_torch),
)
ray_hit.append(
torch.norm(ray_hits_world - self.camera_centers, dim=1) > self._cfg_explorer.min_wall_distance
)
# check if every point has the minimum distance in every direction
without_wall = torch.all(torch.vstack(ray_hit), dim=0)
if self.debug:
import copy
import open3d as o3d
o3d_mesh = self.domains.mesh.as_open3d
o3d_mesh.compute_vertex_normals()
odom_vis_list = [o3d_mesh]
small_sphere = o3d.geometry.TriangleMesh.create_sphere(0.05) # successful trajectory points
camera_centers = self.camera_centers.cpu().numpy()
for idx, camera_center in enumerate(camera_centers):
if without_wall[idx]:
small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) # green
else:
small_sphere.paint_uniform_color([1.0, 0.1, 0.1]) # red
odom_vis_list.append(
copy.deepcopy(small_sphere).translate((camera_center[0], camera_center[1], camera_center[2]))
)
o3d.visualization.draw_geometries(odom_vis_list)
print(f"filtered {self.camera_centers.shape[0] - without_wall.sum()} points too close to walls")
self.camera_centers = self.camera_centers[without_wall].type(torch.float32)
self.nbr_points = self.camera_centers.shape[0]
return
def _construct_kdtree(self, num_neighbors: int = 50) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# construct kdtree to find nearest neighbors of points
kdtree = KDTree(self.camera_centers.cpu().numpy())
_, nearest_neighbors_idx = kdtree.query(self.camera_centers.cpu().numpy(), k=num_neighbors + 1, workers=-1)
# remove first neighbor as it is the point itself
nearest_neighbors_idx = torch.tensor(nearest_neighbors_idx[:, 1:], dtype=torch.int64)
# define origin and neighbor points
origin_point = torch.repeat_interleave(self.camera_centers, repeats=num_neighbors, axis=0)
neighbor_points = self.camera_centers[nearest_neighbors_idx, :].reshape(-1, 3)
distance = torch.norm(origin_point - neighbor_points, dim=1)
# check for collision with raycasting
_, _, hit_depth = self.domains._raycast(
origin_point,
origin_point - neighbor_points,
cam_rot=torch.eye(3),
pix_offset=torch.zeros_like(origin_point),
)
hit_depth[torch.isnan(hit_depth)] = self.domains._cfg.max_depth
# filter connections that collide with the environment
collision = (hit_depth < distance).reshape(-1, num_neighbors)
return nearest_neighbors_idx, collision, distance
def _get_view_variations(self):
# the variation around the up axis (z-axis) has to be picked in order to avoid that the camera faces a wall
# done by construction a graph of all sample points and pick the z angle in order to point at one of the neighbors of the node
# get nearest neighbors and check for collision
nearest_neighbors_idx, collision, _ = self._construct_kdtree()
# remove nodes
all_collision_idx = torch.all(collision, dim=1)
# select neighbor with the largest distance that is not in collision
direction_neighbor_idx = torch.hstack(
[
(collision_single_node == False).nonzero().reshape(-1)[-1]
for collision_single_node in collision[~all_collision_idx, :]
]
)
direction_neighbor_idx = torch.vstack(
(torch.arange(nearest_neighbors_idx.shape[0])[~all_collision_idx], direction_neighbor_idx)
).T
selected_neighbor_idx = nearest_neighbors_idx[direction_neighbor_idx[:, 0], direction_neighbor_idx[:, 1]]
if self.debug:
import copy
import open3d as o3d
o3d_mesh = self.domains.mesh.as_open3d
o3d_mesh.compute_vertex_normals()
odom_vis_list = [o3d_mesh]
small_sphere = o3d.geometry.TriangleMesh.create_sphere(0.05) # successful trajectory points
camera_centers = self.camera_centers[nearest_neighbors_idx[0]].cpu().numpy()
for idx, camera_center in enumerate(camera_centers):
if collision[0][idx]: # in collision or nan
small_sphere.paint_uniform_color([1.0, 0.4, 0.0]) # orange
elif idx == direction_neighbor_idx[0][1]: # selected neighbor
small_sphere.paint_uniform_color([0.0, 0.0, 1.0]) # blue
else:
small_sphere.paint_uniform_color([0.1, 1.0, 0.1]) # green
odom_vis_list.append(
copy.deepcopy(small_sphere).translate((camera_center[0], camera_center[1], camera_center[2]))
)
small_sphere.paint_uniform_color([1.0, 0.1, 0.1]) # red
odom_vis_list.append(
copy.deepcopy(small_sphere).translate(
(
self.camera_centers[0][0].cpu().numpy(),
self.camera_centers[0][1].cpu().numpy(),
self.camera_centers[0][2].cpu().numpy(),
)
)
)
# check if selected neighbor idx is correct by plotting the neighbor again
small_sphere.paint_uniform_color([0.0, 0.0, 1.0]) # blue
neighbor = self.camera_centers[selected_neighbor_idx[0]].cpu().numpy()
odom_vis_list.append(copy.deepcopy(small_sphere).translate((neighbor[0], neighbor[1], neighbor[2])))
# draw line
line_set = o3d.geometry.LineSet(
o3d.utility.Vector3dVector(self.camera_centers.cpu().numpy()),
o3d.utility.Vector2iVector(np.array([[0, selected_neighbor_idx[0].cpu().numpy()]])),
)
line_set.colors = o3d.utility.Vector3dVector([[0.99, 0.99, 0.1]])
odom_vis_list.append(line_set)
o3d.visualization.draw_geometries(odom_vis_list)
# get the z angle of the neighbor that is closest to the origin point
neighbor_direction = self.camera_centers[~all_collision_idx, :] - self.camera_centers[selected_neighbor_idx, :]
self.z_angles = np.rad2deg(torch.atan2(neighbor_direction[:, 1], neighbor_direction[:, 0]).cpu().numpy())
if self.debug:
import copy
import open3d as o3d
o3d_mesh = self.domains.mesh.as_open3d
o3d_mesh.compute_vertex_normals()
odom_vis_list = [o3d_mesh]
small_sphere = o3d.geometry.TriangleMesh.create_sphere(0.05) # successful trajectory points
small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) # green
camera_centers = self.camera_centers.cpu().numpy()
for camera_center in camera_centers:
odom_vis_list.append(
copy.deepcopy(small_sphere).translate((camera_center[0], camera_center[1], camera_center[2]))
)
colors = [[0.99, 0.99, 0.1] for i in range(len(camera_centers))]
neighbor_map = []
selected_neighbor_idx_counter = 0
for curr_center in range(self.camera_centers.shape[0]):
if not all_collision_idx[curr_center]:
neighbor_map.append(
[curr_center, selected_neighbor_idx[selected_neighbor_idx_counter].cpu().numpy()]
)
selected_neighbor_idx_counter += 1
line_set = o3d.geometry.LineSet(
o3d.utility.Vector3dVector(camera_centers), o3d.utility.Vector2iVector(np.array(neighbor_map))
)
line_set.colors = o3d.utility.Vector3dVector(colors)
odom_vis_list.append(line_set)
o3d.visualization.draw_geometries(odom_vis_list)
# filter points that have no neighbors that are not in collision and update number of points
self.camera_centers = self.camera_centers[~all_collision_idx, :]
self.nbr_points = self.camera_centers.shape[0]
# vary the rotation of the forward and horizontal axis (in camera frame) as a uniform distribution within the limits
self.x_angles = np.random.uniform(
self._cfg_explorer.x_angle_range[0], self._cfg_explorer.y_angle_range[1], self.nbr_points
)
self.y_angles = np.random.uniform(
self._cfg_explorer.y_angle_range[0], self._cfg_explorer.y_angle_range[1], self.nbr_points
)
return
def _setup_conv(self):
# index array
self.nbr_faces = len(self.domains.mesh.metadata["_ply_raw"]["face"]["data"])
self.face_idx = torch.zeros(self.nbr_faces, dtype=torch.int64)
return
# # save data helpers ###
# def _init_save(self, save_path: Optional[str] = None) -> None:
# if save_path is not None:
# self._cfg.save_path = save_path
# # create directories
# os.makedirs(self._cfg.save_path, exist_ok=True)
# os.makedirs(os.path.join(self._cfg.save_path, "semantics"), exist_ok=True)
# os.makedirs(os.path.join(self._cfg.save_path, "depth"), exist_ok=True)
# # save camera configurations
# intrinsics = np.zeros((len(self.cameras), 9))
# for idx, curr_cam in enumerate(self.cameras):
# intrinsics[idx] = curr_cam.data.intrinsic_matrices[0].cpu().numpy().flatten()
# np.savetxt(os.path.join(self._cfg.save_path, "intrinsics.txt"), intrinsics, delimiter=",")
# def _save_data(self) -> None:
# # TODO: use replicator writer, currently too slow
# for camera in self.cameras:
# suffix = f"_{camera.cfg.prim_path}"
# cam_suffix = f"_cam{cam_idx}" if len(self.cameras) > 1 else ""
# # SEMANTICS
# if cam_data.semantic:
# cv2.imwrite(
# os.path.join(self._cfg.save_path, "semantics", f"{idx}".zfill(4) + cam_suffix + ".png"),
# cv2.cvtColor(cam_data.render_sem.astype(np.uint8), cv2.COLOR_RGB2BGR),
# )
# # DEPTH
# if cam_data.depth:
# cv2.imwrite(
# os.path.join(self._cfg.save_path, "depth", f"{idx}".zfill(4) + cam_suffix + ".png"),
# cam_data.render_depth,
# )
# # camera pose in robotics frame (x forward, y left, z up)
# rot_quat = tf.Rotation.from_matrix(cam_data.rot.cpu().numpy()).as_quat() # get quat as (x, y, z, w) format
# pose = np.hstack((cam_data.pos.cpu().numpy(), rot_quat))
# cam_data.poses = np.append(cam_data.poses, pose.reshape(1, -1), axis=0)
# return
# def _end_save(self) -> None:
# # save camera poses
# for idx, cam_data in enumerate(self.cameras):
# np.savetxt(
# os.path.join(self._cfg.save_path, f"camera_extrinsic_cam{idx}.txt"),
# cam_data.poses[1:],
# delimiter=",",
# )
# return
| 28,423 | Python | 42.729231 | 158 | 0.582697 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/matterport_exploration/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .random_exploration import RandomExplorer
__all__ = ["RandomExplorer"]
# EoF
| 233 | Python | 18.499998 | 60 | 0.729614 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/run_scripts/anymal_run_warehouse.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Launch Omniverse Toolkit first.
"""
# python
import argparse
import json
# omni-isaac-orbit
from omni.isaac.kit import SimulationApp
# add argparse arguments
parser = argparse.ArgumentParser("Welcome to Orbit: Omniverse Robotics Environments!")
parser.add_argument("--headless", action="store_false", default=True, help="Force display off at all times.")
args_cli = parser.parse_args()
# launch omniverse app
config = {"headless": args_cli.headless}
launcher = SimulationApp(config)
"""
Rest everything follows.
"""
import os
# python
from typing import Tuple
import numpy as np
# isaac-core
from omni.isaac.core.objects.ground_plane import GroundPlane
from omni.isaac.core.utils import extensions
# enable ROS bridge extension --> otherwise rospy cannot be imported
extensions.enable_extension("omni.isaac.ros_bridge")
extensions.enable_extension("omni.kit.manipulator.viewport")
# isaac-anymal
from omni.isaac.anymal.config import (
ANYmalCfg,
ANYmalEvaluatorConfig,
SensorCfg,
TwistControllerCfg,
VIPlannerCfg,
)
from omni.isaac.anymal.viplanner.evaluator import ANYmalOrbitEvaluator
# orbit-assets
from omni.isaac.assets import ASSETS_RESOURCES_DIR
# isaac-carla
from omni.isaac.carla.configs import DATA_DIR, CarlaExplorerConfig, CarlaLoaderConfig
from omni.isaac.carla.scripts import CarlaExplorer, CarlaLoader
class ANYmalRunCarla(ANYmalOrbitEvaluator):
def __init__(
self,
cfg: ANYmalEvaluatorConfig,
cfg_carla: CarlaLoaderConfig = CarlaLoaderConfig(),
cfg_explore: CarlaExplorerConfig = CarlaExplorerConfig(),
cfg_anymal: ANYmalCfg = ANYmalCfg(),
cfg_planner: VIPlannerCfg = VIPlannerCfg(),
) -> None:
# configs
self._cfg_carla = cfg_carla
self._cfg_explore = cfg_explore
# run init
super().__init__(cfg, cfg_anymal, cfg_planner)
return
def load_scene(self) -> None:
print("Loading scene...")
if self._cfg_carla.groundplane:
self._cfg_carla.groundplane = False
self._groundplane = True
else:
self._groundplane = False
self._loader = CarlaLoader(self._cfg_carla)
self._loader.load()
print("DONE")
return
def explore_env(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
explorer: CarlaExplorer = CarlaExplorer(self._cfg_explore, self._cfg_carla)
explorer._get_cam_position()
nearest_neighor_idx, collision, distance = explorer._construct_kdtree(num_neighbors=self._cfg.num_connections)
if self._groundplane:
# add groundplane back
_ = GroundPlane(
"/World/GroundPlane", z_position=0.25, physics_material=self._loader.material, visible=False
)
return explorer.camera_positions, nearest_neighor_idx, collision, distance
def get_env_name(self) -> str:
return os.path.splitext(self._cfg_carla.usd_name)[0]
def _load_waypoints(self) -> None:
"""
Expected that the waypoints have been recorded with the omni.isaac.waypoint extension and saved in .json format.
Structure of the json file:
{
start: [x, y, z],
end: [x, y, z],
waypoints: [[x, y, z], [x, y, z], ...]
}
"""
if self._cfg.waypoint_file.endswith(".json"):
self.waypoints = json.load(open(self._cfg.waypoint_file))
else:
self.waypoints = json.load(open(self._cfg.waypoint_file + ".json"))
# apply scale
self.waypoints["start"] = [x * self._cfg_carla.scale for x in self.waypoints["start"]]
self.waypoints["end"] = [x * self._cfg_carla.scale for x in self.waypoints["end"]]
self.waypoints["waypoints"] = [
[x * self._cfg_carla.scale for x in waypoint] for waypoint in self.waypoints["waypoints"]
]
# draw waypoints
self.draw_interface.draw_points([self.waypoints["start"]], [(1.0, 0.4, 0.0, 1.0)], [(10)]) # orange
self.draw_interface.draw_points([self.waypoints["end"]], [(0.0, 1.0, 0.0, 1.0)], [(10)]) # green
self.draw_interface.draw_points(
self.waypoints["waypoints"],
[(0.0, 0.0, 1.0, 1.0)] * len(self.waypoints["waypoints"]), # blue
[(10)] * len(self.waypoints["waypoints"]),
)
# attach end as further goal-point
self.waypoints["waypoints"].append(self.waypoints["end"])
return
if __name__ == "__main__":
# configs
cfg = ANYmalEvaluatorConfig(
handcrafted_waypoint_file=None, # "warehouse_paper", #
cost_map_dir="/home/pascal/viplanner/imperative_learning/data/warehouse_multiple_shelves_without_ppl_ext_sem_space", #
cost_map_name="cost_map_sem",
models=[
os.path.join(
ASSETS_RESOURCES_DIR,
"vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDepSem_costSem_optimSGD_combi_more_data_neg05",
),
os.path.join(
ASSETS_RESOURCES_DIR, "vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDep_costSem_optimSGD_depth_carla"
),
],
multi_model=False,
num_pairs=500,
use_prev_results=True,
repeat_waypoints=None,
)
cfg_carla = CarlaLoaderConfig(
root_path="/home/pascal/viplanner/env/warehouse",
usd_name="warehouse_multiple_shelves_without_ppl_ext_sem_space.usd", #
suffix="",
prim_path="/World/Warehouse",
scale=1.0,
axis_up="Z",
cw_config_file=None,
sem_mesh_to_class_map=os.path.join(DATA_DIR, "warehouse", "keyword_mapping.yml"),
groundplane=False,
people_config_file=os.path.join(DATA_DIR, "warehouse", "people_cfg.yml"),
vehicle_config_file=None,
)
cfg_carla_explore = CarlaExplorerConfig(
nb_more_people=None,
max_cam_recordings=100,
points_per_m2=0.3,
space_limiter="SM_WallA",
carla_filter=None,
indoor_filter=False,
)
cfg_planner = VIPlannerCfg(
model_dir="/home/pascal/viplanner/imperative_learning/code/iPlanner/iplanner/models",
# model_dir=os.path.join(
# ASSETS_RESOURCES_DIR,
# "vip_models/plannernet_env2azQ1b91cZZ_cam_mount_ep100_inputDepSem_costSem_optimSGD_new_cam_mount_combi_lossWidthMod_wgoal4.0_warehouse",
# ),
sem_origin="isaac",
twist_controller_cfg=TwistControllerCfg(
lookAheadDistance=1.2,
stopDisThre=0.2,
),
use_mount_cam=True,
conv_dist=0.8,
viplanner=False,
)
cfg_anymal = ANYmalCfg(
anymal_type=1, # 0: ANYmal C, 1: ANYmal D
sensor=SensorCfg(
cam_front_rgb=False,
cam_front_depth=False,
cam_viplanner_rgb=True,
cam_viplanner_depth=True,
),
rec_path=True,
rec_sensor=False,
follow_camera=True,
)
# init class
run = ANYmalRunCarla(
cfg=cfg,
cfg_carla=cfg_carla,
cfg_explore=cfg_carla_explore,
cfg_anymal=cfg_anymal,
cfg_planner=cfg_planner,
)
run.setup()
if not cfg.multi_model and cfg.repeat_waypoints is None:
run.run_single()
elif not cfg.multi_model:
run.run_repeat()
else:
run.run_multi()
# Close the simulator
launcher.close()
# EoF
| 7,604 | Python | 31.224576 | 150 | 0.621778 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/run_scripts/anymal_run_carla.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Launch Omniverse Toolkit first.
"""
# python
import argparse
import json
# omni-isaac-orbit
from omni.isaac.kit import SimulationApp
# add argparse arguments
parser = argparse.ArgumentParser("Welcome to Orbit: Omniverse Robotics Environments!")
parser.add_argument("--headless", action="store_false", default=True, help="Force display off at all times.")
args_cli = parser.parse_args()
# launch omniverse app
config = {"headless": args_cli.headless}
launcher = SimulationApp(config)
"""
Rest everything follows.
"""
import os
# python
from typing import Tuple
import numpy as np
# isaac-core
from omni.isaac.core.objects.ground_plane import GroundPlane
from omni.isaac.core.utils import extensions
# enable ROS bridge extension --> otherwise rospy cannot be imported
extensions.enable_extension("omni.isaac.ros_bridge")
extensions.enable_extension("omni.kit.manipulator.viewport")
# isaac-anymal
from omni.isaac.anymal.config import (
ANYmalCfg,
ANYmalEvaluatorConfig,
SensorCfg,
TwistControllerCfg,
VIPlannerCfg,
)
from omni.isaac.anymal.viplanner.evaluator import ANYmalOrbitEvaluator
# orbit-assets
from omni.isaac.assets import ASSETS_RESOURCES_DIR
# isaac-carla
from omni.isaac.carla.configs import CarlaExplorerConfig, CarlaLoaderConfig
from omni.isaac.carla.scripts import CarlaExplorer, CarlaLoader
class ANYmalRunCarla(ANYmalOrbitEvaluator):
def __init__(
self,
cfg: ANYmalEvaluatorConfig,
cfg_carla: CarlaLoaderConfig = CarlaLoaderConfig(),
cfg_explore: CarlaExplorerConfig = CarlaExplorerConfig(),
cfg_anymal: ANYmalCfg = ANYmalCfg(),
cfg_planner: VIPlannerCfg = VIPlannerCfg(),
) -> None:
# configs
self._cfg_carla = cfg_carla
self._cfg_explore = cfg_explore
# run init
super().__init__(cfg, cfg_anymal, cfg_planner)
return
def load_scene(self) -> None:
print("Loading scene...")
if self._cfg_carla.groundplane:
self._cfg_carla.groundplane = False
self._groundplane = True
else:
self._groundplane = False
self._loader = CarlaLoader(self._cfg_carla)
self._loader.load()
print("DONE")
return
def explore_env(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
explorer: CarlaExplorer = CarlaExplorer(self._cfg_explore, self._cfg_carla)
explorer._get_cam_position()
nearest_neighor_idx, collision, distance = explorer._construct_kdtree(num_neighbors=self._cfg.num_connections)
if self._groundplane:
# add groundplane back
_ = GroundPlane(
"/World/GroundPlane", z_position=0.25, physics_material=self._loader.material, visible=False
)
return explorer.camera_positions, nearest_neighor_idx, collision, distance
def get_env_name(self) -> str:
return os.path.splitext(self._cfg_carla.usd_name)[0]
def _load_waypoints(self, scale: bool = False) -> None:
"""
Expected that the waypoints have been recorded with the omni.isaac.waypoint extension and saved in .json format.
Structure of the json file:
{
start: [x, y, z],
end: [x, y, z],
waypoints: [[x, y, z], [x, y, z], ...]
}
"""
if self._cfg.waypoint_file.endswith(".json"):
self.waypoints = json.load(open(self._cfg.waypoint_file))
else:
self.waypoints = json.load(open(self._cfg.waypoint_file + ".json"))
# apply scale
if scale:
self.waypoints["start"] = [x * self._cfg_carla.scale for x in self.waypoints["start"]]
self.waypoints["end"] = [x * self._cfg_carla.scale for x in self.waypoints["end"]]
self.waypoints["waypoints"] = [
[x * self._cfg_carla.scale for x in waypoint] for waypoint in self.waypoints["waypoints"]
]
# draw waypoints
self.draw_interface.draw_points([self.waypoints["start"]], [(1.0, 0.4, 0.0, 1.0)], [(10)]) # orange
self.draw_interface.draw_points([self.waypoints["end"]], [(0.0, 1.0, 0.0, 1.0)], [(10)]) # green
self.draw_interface.draw_points(
self.waypoints["waypoints"],
[(0.0, 0.0, 1.0, 1.0)] * len(self.waypoints["waypoints"]), # blue
[(10)] * len(self.waypoints["waypoints"]),
)
# attach end as further goal-point
self.waypoints["waypoints"].append(self.waypoints["end"])
return
if __name__ == "__main__":
# configs
cfg = ANYmalEvaluatorConfig(
handcrafted_waypoint_file="crosswalk_paper_changed", # "waypoints_carla_eval", # "crosswalk_paper_extended_3" "crosswalk_paper_extended_5"
cost_map_dir="/home/pascal/viplanner/imperative_learning/data/town01_cam_mount_train", # use the map without people added !
cost_map_name="cost_map_sem_sharpend",
models=[
os.path.join(
ASSETS_RESOURCES_DIR,
"vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDepSem_costSem_optimSGD_combi_more_data_neg05",
),
os.path.join(
ASSETS_RESOURCES_DIR, "vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDep_costSem_optimSGD_depth_carla"
),
],
multi_model=False,
num_pairs=500,
use_prev_results=False,
repeat_waypoints=5,
)
cfg_carla = CarlaLoaderConfig(
groundplane=True,
)
cfg_carla_explore = CarlaExplorerConfig(
nb_more_people=0,
max_cam_recordings=15000,
points_per_m2=0.3,
)
cfg_planner = VIPlannerCfg(
# model_dir=os.path.join(ASSETS_RESOURCES_DIR,"vip_models/plannernet_env2azQ1b91cZZ_ep100_inputDep_costSem_optimSGD_depth_carla"),
model_dir=os.path.join(
ASSETS_RESOURCES_DIR,
"vip_models/plannernet_env2azQ1b91cZZ_cam_mount_ep100_inputDepSem_costSem_optimSGD_new_cam_mount_combi_lossWidthMod_wgoal4.0_warehouse",
# "/home/pascal/viplanner/imperative_learning/code/iPlanner/iplanner/models",
),
sem_origin="isaac",
twist_controller_cfg=TwistControllerCfg(
lookAheadDistance=1.2,
),
use_mount_cam=True,
conv_dist=0.8,
viplanner=True,
# fear_threshold=1.0,
)
cfg_anymal = ANYmalCfg(
anymal_type=1, # 0: ANYmal C, 1: ANYmal D
sensor=SensorCfg(
cam_front_rgb=False,
cam_front_depth=False,
cam_viplanner_rgb=True,
cam_viplanner_depth=True,
),
)
# init class
run = ANYmalRunCarla(
cfg=cfg,
cfg_carla=cfg_carla,
cfg_explore=cfg_carla_explore,
cfg_anymal=cfg_anymal,
cfg_planner=cfg_planner,
)
run.setup()
if not cfg.multi_model and cfg.repeat_waypoints is None:
run.run_single()
elif not cfg.multi_model:
run.run_repeat()
else:
run.run_multi()
# Close the simulator
launcher.close()
# EoF
| 7,236 | Python | 31.599099 | 148 | 0.626727 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/carla_exploration/config.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from dataclasses import dataclass
from typing import Optional, Tuple
from omni.isaac.carla.config import DATA_DIR
# isaac-orbit
from omni.isaac.orbit.sensors.camera import PinholeCameraCfg
@dataclass
class CarlaExplorerConfig:
"""Configuration for the CarlaMap class."""
# coverage parameters
points_per_m2: float = 0.5
obs_loss_threshold: float = 0.8
max_cam_recordings: Optional[int] = 10000 # if None, not limitation is applied
# indoor filter (for outdoor maps filter inside of buildings as traversable, for inside maps set to False)
indoor_filter: bool = True
carla_filter: Optional[str] = os.path.join(DATA_DIR, "town01", "area_filter_cfg.yml")
# nomoko model
nomoko_model: bool = False
# are limiter --> only select area within the defined prim names (e.g. "Road_SideWalk")
space_limiter: Optional[str] = "Road_Sidewalk" # carla: "Road_Sidewalk" nomoko None park: MergedRoad05
# robot height
robot_height = 0.7 # m
# depth camera
camera_cfg_depth: PinholeCameraCfg = PinholeCameraCfg(
sensor_tick=0,
height=480,
width=848,
data_types=["distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(
focal_length=1.93, clipping_range=(0.01, 1.0e5), horizontal_aperture=3.8
),
)
camera_intrinsics_depth: Optional[Tuple[float]] = None
# ANYmal D/C realsense: (423.54608, 0.0, 427.69815, 0.0, 423.54608, 240.17773, 0.0, 0.0, 1.0)
# RealSense D455: (430.31607, 0.0, 428.28408, 0.0, 430.31607, 244.00695, 0.0, 0.0, 1.0)
# ANYmal D wide_angle_camera: 1.0 <-> ANYmal C realsense: 1.93 <-> RealSense D455: 1.93
camera_prim_depth: str = "/World/CameraSensor_depth"
# semantic camera
camera_cfg_sem: PinholeCameraCfg = PinholeCameraCfg(
sensor_tick=0,
height=720, # 480, # 1080
width=1280, # 848, # 1440
data_types=["rgb", "semantic_segmentation"],
usd_params=PinholeCameraCfg.UsdCameraCfg(
focal_length=1.93, clipping_range=(0.01, 1.0e5), horizontal_aperture=3.8
),
)
# ANYmal D wide_angle_camera: (1440, 1080) <-> ANYmal C realsense (848, 480) <-> RealSense D455 (1280, 720)
# ANYmal D wide_angle_camera: 1.93 <-> ANYmal C realsense: 1.93 <-> RealSense D455: 1.93
camera_intrinsics_sem: Optional[Tuple[float]] = None
# ANYmal D wide_angle_camera: (575.60504, 0.0, 745.73121, 0.0, 578.56484, 519.52070, 0.0, 0.0, 1.0)
# ANYmal C realsense: (423.54608, 0.0, 427.69815, 0.0, 423.54608, 240.17773, 0.0, 0.0, 1.0)
# RealSense D455: (644.15496, 0.0, 639.53125, 0.0, 643.49212, 366.30880, 0.0, 0.0, 1.0)
camera_prim_sem: str = "/World/CameraSensor_sem"
x_angle_range: Tuple[float, float] = (-5, 5) # downtilt angle of the camera in degree
y_angle_range: Tuple[float, float] = (
-2,
5,
) # downtilt angle of the camera in degree --> isaac convention, positive is downwards
# image suffix
depth_suffix = "_cam0"
sem_suffix = "_cam1"
# transformation from depth (src) to semantic camera (target)
tf_pos: tuple = (0.0, 0.0, 0.0) # (translation in depth frame)
# ANYmal D: (-0.002, 0.025, 0.042) <-> ANYmal C and RealSense D455: (0.0, 0.0, 0.0)
tf_quat: tuple = (0.0, 0.0, 0.0, 1.0) # xyzw quaternion format (rotation in depth frame)
# ANYmal D: (0.001, 0.137, -0.000, 0.991) <-> ANYmal C and RealSense D455: (0.0, 0.0, 0.0, 1.0)
tf_quat_convention: str = "roll-pitch-yaw" # or "isaac"
# NOTE: if the quat follows the roll-pitch-yaw convention, i.e. x-forward, y-right, z-down, will be converted to the isaac convention
# high resolution depth for reconstruction (in city environment can otherwise lead to artifacts)
# will now also take the depth image of the rgb camera and use its depth images for reconstruction
high_res_depth: bool = False
# output_dir
output_root: Optional[str] = None # if None, output dir is stored under root_dir
output_dir_name: str = "town01"
ros_p_mat: bool = True # save intrinsic matrix in ros P-matrix format
depth_scale: float = 1000.0 # scale depth values before saving s.t. mm resolution can be achieved
# add more people to the scene
nb_more_people: Optional[int] = 1200 # if None, no people are added
random_seed: Optional[int] = 42 # if None, no seed is set
@property
def output_dir(self) -> str:
if self.output_root is not None:
return os.path.join(self.output_root, self.output_dir_name)
else:
return os.path.join(self.root_path, self.output_dir_name)
| 4,838 | Python | 46.910891 | 137 | 0.648615 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/intern/carla_exploration/explorer.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import random
import time
from typing import Tuple
# omniverse
import carb
import cv2
# python
import numpy as np
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import scipy.spatial.transform as tf
import yaml
# isaac-carla
from omni.isaac.carla.configs import CarlaExplorerConfig, CarlaLoaderConfig
from omni.isaac.core.objects import VisualCuboid
# isaac-core
from omni.isaac.core.simulation_context import SimulationContext
# isaac-orbit
from omni.isaac.orbit.sensors.camera import Camera
from omni.isaac.orbit.utils.math import convert_quat
from omni.physx import get_physx_scene_query_interface
from pxr import Gf, Usd, UsdGeom
from scipy.spatial import KDTree
from scipy.stats import qmc
# isaac-anymal
from viplanner.config.viplanner_sem_meta import VIPlannerSemMetaHandler
from .loader import CarlaLoader
class CarlaExplorer:
debug: bool = False
def __init__(self, cfg: CarlaExplorerConfig, cfg_load: CarlaLoaderConfig) -> None:
self._cfg = cfg
self._cfg_load = cfg_load
# check simulation context
if SimulationContext.instance():
self.sim: SimulationContext = SimulationContext.instance()
else:
carb.log_error("CarlaExplorer can only be loaded in a running simulationcontext!\nRun CarlaLoader!")
# Acquire draw interface
self.draw_interface = omni_debug_draw.acquire_debug_draw_interface()
# VIPlanner Semantic Meta Handler and mesh to sem class mapping
if self._cfg_load.sem_mesh_to_class_map is not None:
self.vip_sem_meta: VIPlannerSemMetaHandler = VIPlannerSemMetaHandler()
with open(self._cfg_load.sem_mesh_to_class_map) as f:
self.class_keywords = yaml.safe_load(f)
# init buffers
self.camera_positions: np.ndarray = np.array([])
self.cam_angles: np.ndarray = np.array([])
self.nbr_points: int = 0
# get camera
self.camera_depth: Camera = None
self.camera_semantic: Camera = None
return
def explore(self) -> None:
# init camera
self._camera_init()
# define camera positions and targets
self._get_cam_position()
self._get_cam_target()
# record rgb, depth, semantic segmentation at the camera posiitions
self._domain_recorder()
return
""" Exploration Helper Functions """
def _raycast_check(self, ray_origins: np.ndarray, ray_directions: np.ndarray, max_distance: float):
"""
Check which object is hit by the raycast and give back the position, loss and class name of the hit object
"""
start = time.time()
hits = [
get_physx_scene_query_interface().raycast_closest(
carb.Float3(ray_single), carb.Float3(ray_dir), max_distance
)
for ray_single, ray_dir in zip(ray_origins, ray_directions)
]
end = time.time()
print("took ", end - start, "s for raycast the possible camera points")
# if point achieved a hit, get the hit point and the hit object
hit_pt_obj = [
(np.array(single_hit["position"]), single_hit["collision"].lower())
for single_hit in hits
if single_hit["hit"]
]
hit_idx = [idx for idx, single_hit in enumerate(hits) if single_hit["hit"]]
# get offset
offset = np.array([0.0, 0.0, self._cfg.robot_height])
# get semantic class for each points and the corresponding cost
hit_class_name = np.zeros(len(hit_pt_obj), dtype=str)
hit_loss = np.zeros(len(hit_pt_obj))
hit_position = np.zeros((len(hit_pt_obj), 3))
if self._cfg_load.sem_mesh_to_class_map is not None:
for idx, single_hit in enumerate(hit_pt_obj):
success = False
for class_name, keywords in self.class_keywords.items():
if any([keyword.lower() in single_hit[1] for keyword in keywords]):
hit_class_name[idx] = class_name
hit_loss[idx] = self.vip_sem_meta.class_loss[class_name]
hit_position[idx] = single_hit[0] + offset # add offset to get the center of the point
success = True
break
assert success, f"No class found for hit object: {single_hit}"
else:
hit_position = np.array([single_hit[0] + offset for single_hit in hit_pt_obj])
return hit_position, hit_loss, hit_class_name, hit_idx
def _get_cam_position(self) -> None:
"""
Get suitable robot positions for exploration of the map. Robot positions are are dense cover of the map
Args:
points_per_m2 (float, optional): points per m^2. Defaults to 0.1.
obs_loss_threshold (float, optional): loss threshold for point to be suitable as robot position. Defaults to 0.6. # choose s.t. no points on the terrain TODO: change at some point
debug (bool, optional): debug mode. Defaults to True.
"""
# get x-y-z coordinates limits where the explortion of all the mesh should take place
# for Carla, Town01_Opt is the explored map equal to the city surrounded by the road
# --> get min und max over the maximum extent of the Road_Sidewalk meshes
# IMPORTANT: y-up!!!
mesh_prims, mesh_prims_name = CarlaLoader.get_mesh_prims(self._cfg_load.prim_path + self._cfg_load.suffix)
if self._cfg.space_limiter:
# if space limiter is given, only consider the meshes with the space limiter in the name
mesh_idx = [
idx
for idx, prim_name in enumerate(mesh_prims_name)
if self._cfg.space_limiter.lower() in prim_name.lower()
]
else:
# remove ground plane since has infinite extent
mesh_idx = [idx for idx, prim_name in enumerate(mesh_prims_name) if "groundplane" not in prim_name.lower()]
mesh_prims = [mesh_prims[idx] for idx in mesh_idx]
bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), ["default", "render"])
bbox = [self.compute_bbox_with_cache(bbox_cache, curr_prim) for curr_prim in mesh_prims]
prim_max = np.vstack([list(prim_range.GetMax()) for prim_range in bbox])
prim_min = np.vstack([list(prim_range.GetMin()) for prim_range in bbox])
x_min, y_min, z_min = np.min(prim_min, axis=0)
x_max, y_max, z_max = np.max(prim_max, axis=0)
max_area = (x_max - x_min) * (y_max - y_min)
max_distance = (z_max - z_min) + 10 # 10m extra
print("Exploration area: ", round(max_area / (1000) ** 2, 3), "km^2 or ", max_area, "m^2")
# init sampler as qmc
sampler = qmc.Halton(d=2, scramble=False)
# determine number of samples to dram
nbr_points = int(max_area * self._cfg.points_per_m2)
# get raw samples origins
points = sampler.random(nbr_points)
if self._cfg.nomoko_model:
points = qmc.scale(points, [y_min, x_min], [y_max, x_max])
else:
points = qmc.scale(points, [x_min, y_min], [x_max, y_max])
if self._cfg.indoor_filter:
heights = np.ones((nbr_points, 1)) * (z_max + 2 * self._cfg.robot_height) # above the map highest point
else:
heights = np.ones((nbr_points, 1)) * (z_min + 2 * self._cfg.robot_height) # above the map lowest point
ray_origins = np.hstack((points, heights))
# get ray directions in negative z direction
ray_directions = np.zeros((nbr_points, 3))
ray_directions[:, 2] = -1.0
# perform raycast check
hit_position, hit_loss, _, _ = self._raycast_check(ray_origins, ray_directions, max_distance)
# filter all indexes which are not in traversable terrain
camera_positions = hit_position[hit_loss < self._cfg.obs_loss_threshold]
# indoor filter
if self._cfg.indoor_filter:
# check on all 4 sites can only be performed with semantics
if self._cfg_load.sem_mesh_to_class_map is not None:
# filter all points within buildings by checking if hit above the point and if yes, if hit on all 4 sites of it
# rays always send from both sides since mesh only one-sided
# check if hit above the point
camera_positions_elevated = camera_positions + np.array([0.0, 0.0, 100])
_, hit_loss_low, _, hit_idx_low = self._raycast_check(
camera_positions_elevated, ray_directions, max_distance=200
)
ray_directions[:, 2] = 1.0
_, hit_loss_high, _, hit_idx_high = self._raycast_check(
camera_positions, ray_directions, max_distance=200
)
hit_idx_low = np.array(hit_idx_low)[hit_loss_low >= self._cfg.obs_loss_threshold]
hit_idx_high = np.array(hit_idx_high)[hit_loss_high >= self._cfg.obs_loss_threshold]
hit_idx = np.unique(np.hstack([hit_idx_low, hit_idx_high]))
if len(hit_idx) > 0:
# check hit on sites of the point
ray_directions[:, 2] = 0.0 # reset ray direction
ray_directions[:, 0] = 1.0
_, hit_loss, _, hit_idx_front = self._raycast_check(
camera_positions[hit_idx], ray_directions[hit_idx], max_distance=10
)
traversable_front_pos = np.ones((len(hit_idx), 1), dtype=bool)
traversable_front_pos[hit_idx_front, 0] = hit_loss < self._cfg.obs_loss_threshold
ray_directions[:, 0] = -1.0
_, hit_loss, _, hit_idx_front = self._raycast_check(
camera_positions[hit_idx] + np.array([10, 0.0, 0.0]), ray_directions[hit_idx], max_distance=10
)
traversable_front_neg = np.ones((len(hit_idx), 1), dtype=bool)
traversable_front_neg[hit_idx_front, 0] = hit_loss < self._cfg.obs_loss_threshold
traversable_front = np.all(np.hstack([traversable_front_pos, traversable_front_neg]), axis=1)
ray_directions[:, 0] = -1.0
_, hit_loss, _, hit_idx_back = self._raycast_check(
camera_positions[hit_idx], ray_directions[hit_idx], max_distance=10
)
traversable_back_neg = np.ones((len(hit_idx), 1), dtype=bool)
traversable_back_neg[hit_idx_back, 0] = hit_loss < self._cfg.obs_loss_threshold
ray_directions[:, 0] = 1.0
_, hit_loss, _, hit_idx_back = self._raycast_check(
camera_positions[hit_idx] - np.array([10, 0.0, 0.0]), ray_directions[hit_idx], max_distance=10
)
traversable_back_pos = np.ones((len(hit_idx), 1), dtype=bool)
traversable_back_pos[hit_idx_back, 0] = hit_loss < self._cfg.obs_loss_threshold
traversable_back = np.all(np.hstack([traversable_back_pos, traversable_back_neg]), axis=1)
ray_directions[:, 0] = 0.0 # reset ray direction
ray_directions[:, 1] = 1.0
_, hit_loss, _, hit_idx_right = self._raycast_check(
camera_positions[hit_idx], ray_directions[hit_idx], max_distance=10
)
traversable_right_pos = np.ones((len(hit_idx), 1), dtype=bool)
traversable_right_pos[hit_idx_right, 0] = hit_loss < self._cfg.obs_loss_threshold
ray_directions[:, 1] = -1.0
_, hit_loss, _, hit_idx_right = self._raycast_check(
camera_positions[hit_idx] + np.array([0.0, 10, 0.0]), ray_directions[hit_idx], max_distance=10
)
traversable_right_neg = np.ones((len(hit_idx), 1), dtype=bool)
traversable_right_neg[hit_idx_right, 0] = hit_loss < self._cfg.obs_loss_threshold
traversable_right = np.all(np.hstack([traversable_right_pos, traversable_right_neg]), axis=1)
ray_directions[:, 1] = -1.0
_, hit_loss, _, hit_idx_left = self._raycast_check(
camera_positions[hit_idx], ray_directions[hit_idx], max_distance=10
)
traversable_left_neg = np.ones((len(hit_idx), 1), dtype=bool)
traversable_left_neg[hit_idx_left, 0] = hit_loss < self._cfg.obs_loss_threshold
ray_directions[:, 1] = -1.0
_, hit_loss, _, hit_idx_left = self._raycast_check(
camera_positions[hit_idx] - np.array([0.0, 10, 0.0]), ray_directions[hit_idx], max_distance=10
)
traversable_left_pos = np.ones((len(hit_idx), 1), dtype=bool)
traversable_left_pos[hit_idx_left, 0] = hit_loss < self._cfg.obs_loss_threshold
traversable_left = np.all(np.hstack([traversable_left_neg, traversable_left_pos]), axis=1)
# filter points
traversable_all = np.vstack(
[traversable_front, traversable_back, traversable_right, traversable_left]
).all(axis=0)
hit_idx_non_traverable = np.array(hit_idx)[~traversable_all]
else:
hit_idx_non_traverable = []
else:
# semantics not available -> check compared to mean height
hit_idx_non_traverable = np.where(camera_positions[:, 2] > np.mean(camera_positions[:, 2]))[0]
else:
hit_idx_non_traverable = []
# update camera positions and nbr of points
if len(hit_idx_non_traverable) > 0:
self.camera_positions = np.delete(camera_positions, hit_idx_non_traverable, axis=0)
else:
self.camera_positions = camera_positions
# add more people in the scene
if self._cfg.nb_more_people is not None:
random.seed(self._cfg.random_seed)
pts_idx = random.sample(range(len(self.camera_positions)), self._cfg.nb_more_people)
if self._cfg_load.scale == 1.0:
scale_people = 100
else:
scale_people = 1
# add people and remove previous added offset
offset = np.array([0.0, 0.0, self._cfg.robot_height])
for idx in pts_idx:
CarlaLoader.insert_single_person(f"random_{idx}", self.camera_positions[idx] - offset, scale_people)
self.camera_positions = np.delete(self.camera_positions, pts_idx, axis=0)
if self._cfg.carla_filter:
# for CARLA filter large open spaces
# Extract the x and y coordinates from the odom poses
x_coords = self.camera_positions[:, 0]
y_coords = self.camera_positions[:, 1]
# load file
# Filter the point cloud based on the square coordinates
mask_area_1 = (y_coords >= 100.5) & (y_coords <= 325.5) & (x_coords >= 208.9) & (x_coords <= 317.8)
mask_area_2 = (y_coords >= 12.7) & (y_coords <= 80.6) & (x_coords >= 190.3) & (x_coords <= 315.8)
mask_area_3 = (y_coords >= 10.0) & (y_coords <= 80.0) & (x_coords >= 123.56) & (x_coords <= 139.37)
combined_mask = mask_area_1 | mask_area_2 | mask_area_3
points_free_space = ~combined_mask
self.camera_positions = self.camera_positions[points_free_space]
self.nbr_points = len(self.camera_positions)
# plot dense cover of the mesh
if self.debug:
self.sim.play()
self.draw_interface.draw_points(
self.camera_positions, [(1, 1, 1, 1)] * len(self.camera_positions), [5] * len(self.camera_positions)
)
self.draw_interface.draw_points(
camera_positions[hit_idx_non_traverable],
[(1.0, 0.5, 0, 1)] * len(camera_positions[hit_idx_non_traverable]),
[5] * len(camera_positions[hit_idx_non_traverable]),
)
for count in range(100000):
self.sim.step()
self.sim.pause()
return
def _construct_kdtree(self, num_neighbors: int = 50) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# construct kdtree to find nearest neighbors of points
camera_position_unified_height = np.copy(self.camera_positions)
camera_position_unified_height[:, 2] = np.max(self.camera_positions[:, 2])
kdtree = KDTree(camera_position_unified_height)
_, nearest_neighbors_idx = kdtree.query(camera_position_unified_height, k=num_neighbors + 1, workers=-1)
# remove first neighbor as it is the point itself
nearest_neighbors_idx = nearest_neighbors_idx[:, 1:]
# define origin and neighbor points
origin_point = np.repeat(camera_position_unified_height, repeats=num_neighbors, axis=0)
neighbor_points = camera_position_unified_height[nearest_neighbors_idx, :].reshape(-1, 3)
distance = np.linalg.norm(origin_point - neighbor_points, axis=1)
# check for collision with raycasting
hit_position, _, _, hit_idx = self._raycast_check(
origin_point, neighbor_points - origin_point, np.max(distance)
)
# filter connections that collide with the environment
collision = np.zeros(len(origin_point), dtype=bool)
collision[hit_idx] = np.linalg.norm(hit_position - origin_point[hit_idx], axis=1) < distance[hit_idx]
collision = collision.reshape(-1, num_neighbors)
return nearest_neighbors_idx, collision, distance
def _get_cam_target(self) -> None:
"""Camera orientation variation (similar to matterport variation)"""
# the variation around the up axis (z-axis) has to be picked in order to avoid that the camera faces a wall
# done by construction a graph of all sample points and pick the z angle in order to point at one of the neighbors of the node
# get nearest neighbors and check for collision
nearest_neighbors_idx, collision, _ = self._construct_kdtree()
# get nodes where all neighbors are in collision
all_collision_idx = np.all(collision, axis=1)
# select random neighbor that is not in collision
direction_neighbor_idx = np.hstack(
[
(collision_single_node is False).nonzero()[0][-1]
for collision_single_node in collision[~all_collision_idx, :]
]
)
direction_neighbor_idx = np.vstack(
(np.arange(nearest_neighbors_idx.shape[0])[~all_collision_idx], direction_neighbor_idx)
).T
selected_neighbor_idx = nearest_neighbors_idx[direction_neighbor_idx[:, 0], direction_neighbor_idx[:, 1]]
# get the z angle of the neighbor that is closest to the origin point
neighbor_direction = (
self.camera_positions[~all_collision_idx, :] - self.camera_positions[selected_neighbor_idx, :]
)
z_angles = np.rad2deg(np.arctan2(neighbor_direction[:, 1], neighbor_direction[:, 0]))
# filter points that have no neighbors that are not in collision and update number of points
self.camera_positions = self.camera_positions[~all_collision_idx, :]
self.nbr_points = self.camera_positions.shape[0]
# vary the rotation of the forward and horizontal axis (in camera frame) as a uniform distribution within the limits
x_angles = np.random.uniform(self._cfg.x_angle_range[0], self._cfg.x_angle_range[1], self.nbr_points)
y_angles = np.random.uniform(self._cfg.y_angle_range[0], self._cfg.y_angle_range[1], self.nbr_points)
self.cam_angles = np.hstack((x_angles.reshape(-1, 1), y_angles.reshape(-1, 1), z_angles.reshape(-1, 1)))
return
""" Camera and Image Creator """
def _camera_init(self) -> None:
# Setup camera sensor
self.camera_depth = Camera(cfg=self._cfg.camera_cfg_depth, device="cpu")
self.camera_depth.spawn(self._cfg.camera_prim_depth)
if self._cfg.camera_intrinsics_depth:
intrinsic_matrix = np.array(self._cfg.camera_intrinsics_depth).reshape(3, 3)
self.camera_depth.set_intrinsic_matrix(intrinsic_matrix)
self.camera_depth.initialize()
if self._cfg.high_res_depth:
self._cfg.camera_cfg_sem.data_types += ["distance_to_image_plane"]
self.camera_semantic = Camera(cfg=self._cfg.camera_cfg_sem, device="cpu")
self.camera_semantic.spawn(self._cfg.camera_prim_sem)
if self._cfg.camera_intrinsics_sem:
intrinsic_matrix = np.array(self._cfg.camera_intrinsics_sem).reshape(3, 3)
self.camera_semantic.set_intrinsic_matrix(intrinsic_matrix)
self.camera_semantic.initialize()
return
def _domain_recorder(self) -> None:
"""
Will iterate over all camera positions and orientations while recording the resulting images in the different
domains (rgb, depth, semantic). The resulting images will be saved in the following folder structure:
NOTE: depth images are saved as png and the corresponding depth arrays are saved as npy files because the large
depths in CARLA exceed the int16 range of png images and lead to wrong depth values -> png only for visualization
- self._cfg.output_dir
- camera_extrinsic{depth_suffix}.txt (format: x y z qx qy qz qw for depth camera)
- camera_extrinsic{sem_suffix}.txt (format: x y z qx qy qz qw for semantic camera)
- intrinsics.txt (expects ROS CameraInfo format --> P-Matrix, both cameras)
- rgb
- xxxx{sem_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- depth
- xxxx{depth_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- xxxx{depth_suffix}.npy (arrays should be named with 4 digits, e.g. 0000.npy, 0001.npy, etc.)
- semantics
- xxxx{sem_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
The depth and semantic suffix are for example "_depth" and "_sem" and can be set in the config file. They are
necessary to differentiate between the two cameras and their extrinsics. The suffix in the image naming is for
for compatibility with the matterport3d explorer.
If high resolution depth images are enabled, the following additional folder is added:
- depth_high_res
- xxxx{depth_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- xxxx{depth_suffix}.npy (arrays should be named with 4 digits, e.g. 0000.npy, 0001.npy, etc.)
"""
# create save dirs for domains
os.makedirs(os.path.join(self._cfg.output_dir, "rgb"), exist_ok=True)
os.makedirs(os.path.join(self._cfg.output_dir, "depth"), exist_ok=True)
os.makedirs(os.path.join(self._cfg.output_dir, "semantics"), exist_ok=True)
if self._cfg.high_res_depth:
os.makedirs(os.path.join(self._cfg.output_dir, "depth_high_res"), exist_ok=True)
# save intrinsics
intrinsics = []
depth_intrinsics = (
np.array(self._cfg.camera_intrinsics_depth).reshape(3, 3)
if self._cfg.camera_intrinsics_depth
else self.camera_depth.data.intrinsic_matrix
)
sem_intrinsics = (
np.array(self._cfg.camera_intrinsics_sem).reshape(3, 3)
if self._cfg.camera_intrinsics_sem
else self.camera_semantic.data.intrinsic_matrix
)
for intrinsics_single in [depth_intrinsics, sem_intrinsics]:
if self._cfg.ros_p_mat:
p_mat = np.zeros((3, 4))
p_mat[:3, :3] = intrinsics_single
intrinsics.append(p_mat.flatten())
else:
intrinsics.append(intrinsics_single.flatten())
np.savetxt(os.path.join(self._cfg.output_dir, "intrinsics.txt"), np.vstack(intrinsics), delimiter=",")
# init pose buffers
sem_poses = np.zeros((self._cfg.max_cam_recordings, 7))
depth_poses = np.zeros((self._cfg.max_cam_recordings, 7))
# Play simulator
self.sim.play()
# Simulate for a few steps
# FIXME: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(14):
self.sim.render()
# matrix to transform opengl to isaac coordinate system
isaac_to_opengl_mat = tf.Rotation.from_euler("XYZ", [90, -90, 0], degrees=True).as_matrix()
for idx, sem_pos in enumerate(self.camera_positions):
start = time.time()
# Set semantic camera pose
sem_rot = self.cam_angles[idx].copy()
sem_rot = sem_rot.astype(np.float64) # convert to double precision
sem_rot_mat = tf.Rotation.from_euler("xyz", sem_rot, degrees=True).as_matrix()
rot = sem_rot_mat @ isaac_to_opengl_mat
rot_quad = tf.Rotation.from_matrix(rot).as_quat()
self.camera_semantic._sensor_xform.set_world_pose(sem_pos, convert_quat(rot_quad, "wxyz"))
# get correct rotation from depth camera to semantic camera
if self._cfg.tf_quat_convention == "isaac":
cam_rot_sem_from_depth = tf.Rotation.from_quat(self._cfg.tf_quat).as_matrix()
elif self._cfg.tf_quat_convention == "roll-pitch-yaw":
cam_rot_sem_from_depth = tf.Rotation.from_quat(self._cfg.tf_quat).as_euler("XYZ", degrees=True)
cam_rot_sem_from_depth[[1, 2]] *= -1
cam_rot_sem_from_depth = tf.Rotation.from_euler("XYZ", cam_rot_sem_from_depth, degrees=True).as_matrix()
else:
raise ValueError(f"tf_quat_convention {self._cfg.tf_quat_convention} not supported")
# set depth camera pose
cam_rot_sem_from_depth = cam_rot_sem_from_depth.astype(np.float64) # convert to double precision
depth_rot_mat = sem_rot_mat @ cam_rot_sem_from_depth.T # get depth rotation in odom frame
depth_pos = sem_pos - depth_rot_mat @ self._cfg.tf_pos
rot = depth_rot_mat @ isaac_to_opengl_mat
rot_quad = tf.Rotation.from_matrix(rot).as_quat()
# set depth camera pose
self.camera_depth._sensor_xform.set_world_pose(depth_pos, convert_quat(rot_quad, "wxyz"))
# FIXME: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.render()
# Update camera data
self.camera_depth.update(dt=0.0)
self.camera_semantic.update(dt=0.0)
# save poses in Isaac convention and extrinsic format (xyz)
sem_poses[idx, :3] = sem_pos
sem_poses[idx, 3:] = tf.Rotation.from_matrix(sem_rot_mat).as_quat()
depth_poses[idx, :3] = depth_pos
depth_poses[idx, 3:] = tf.Rotation.from_matrix(depth_rot_mat).as_quat()
# Save images
# RGB
if "rgb" in self.camera_semantic.data.output:
cv2.imwrite(
os.path.join(self._cfg.output_dir, "rgb", f"{idx}".zfill(4) + self._cfg.sem_suffix + ".png"),
cv2.cvtColor(self.camera_semantic.data.output["rgb"], cv2.COLOR_RGB2BGR),
)
# DEPTH
np.save(
os.path.join(self._cfg.output_dir, "depth", f"{idx}".zfill(4) + self._cfg.depth_suffix + ".npy"),
self.camera_depth.data.output["distance_to_image_plane"] * self._cfg.depth_scale,
)
cv2.imwrite(
os.path.join(self._cfg.output_dir, "depth", f"{idx}".zfill(4) + self._cfg.depth_suffix + ".png"),
(self.camera_depth.data.output["distance_to_image_plane"] * self._cfg.depth_scale).astype(
np.uint16
), # convert to meters
)
# High Resolution Depth
if self._cfg.high_res_depth:
np.save(
os.path.join(
self._cfg.output_dir, "depth_high_res", f"{idx}".zfill(4) + self._cfg.depth_suffix + ".npy"
),
self.camera_semantic.data.output["distance_to_image_plane"] * self._cfg.depth_scale,
)
cv2.imwrite(
os.path.join(
self._cfg.output_dir, "depth_high_res", f"{idx}".zfill(4) + self._cfg.depth_suffix + ".png"
),
(self.camera_semantic.data.output["distance_to_image_plane"] * self._cfg.depth_scale).astype(
np.uint16
), # convert to meters
)
# SEMANTICS
if self._cfg_load.sem_mesh_to_class_map:
class_color_with_unlabelled = self.vip_sem_meta.class_color
class_color_with_unlabelled["unlabelled"] = [0, 0, 0]
idToColor = np.array(
[
[
int(k),
self.vip_sem_meta.class_color[v["class"].lower()][0],
self.vip_sem_meta.class_color[v["class"].lower()][1],
self.vip_sem_meta.class_color[v["class"].lower()][2],
]
for k, v in self.camera_semantic.data.output["semantic_segmentation"]["info"][
"idToLabels"
].items()
]
)
idToColorArray = np.zeros((idToColor.max(axis=0)[0] + 1, 3))
idToColorArray[idToColor[:, 0]] = idToColor[:, 1:]
sem_img = idToColorArray[
self.camera_semantic.data.output["semantic_segmentation"]["data"].reshape(-1)
].reshape(self.camera_semantic.data.output["semantic_segmentation"]["data"].shape + (3,))
cv2.imwrite(
os.path.join(self._cfg.output_dir, "semantics", f"{idx}".zfill(4) + self._cfg.sem_suffix + ".png"),
cv2.cvtColor(sem_img.astype(np.uint8), cv2.COLOR_RGB2BGR),
)
# Print Info
duration = time.time() - start
print(f"Recording {idx + 1}/{self.nbr_points} ({(idx + 1) / self.nbr_points * 100:.2f}%) in {duration:.4f}")
# stop condition
if self._cfg.max_cam_recordings is not None and idx >= self._cfg.max_cam_recordings - 1:
break
if self.debug:
VisualCuboid(
prim_path="/cube_example", # The prim path of the cube in the USD stage
name="waypoint", # The unique name used to retrieve the object from the scene later on
position=sem_pos
+ (
tf.Rotation.from_euler("xyz", sem_rot, degrees=True).as_matrix()
@ np.array([100, 0, 0]).reshape(-1, 1)
).reshape(
-1
), # Using the current stage units which is in meters by default.
scale=np.array([15, 15, 15]), # most arguments accept mainly numpy arrays.
size=1.0,
color=np.array([255, 0, 0]), # RGB channels, going from 0-1
)
import matplotlib.pyplot as plt
_, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].imshow(self.camera_semantic.data.output["rgb"])
axs[1].imshow(self.camera_depth.data.output["distance_to_image_plane"])
axs[2].imshow(self.camera_semantic.data.output["semantic_segmentation"]["data"])
plt.show()
np.savetxt(
os.path.join(self._cfg.output_dir, f"camera_extrinsic{self._cfg.sem_suffix}.txt"),
sem_poses[:idx],
delimiter=",",
)
np.savetxt(
os.path.join(self._cfg.output_dir, f"camera_extrinsic{self._cfg.depth_suffix}.txt"),
depth_poses[:idx],
delimiter=",",
)
return
@staticmethod
def compute_bbox_with_cache(cache: UsdGeom.BBoxCache, prim: Usd.Prim) -> Gf.Range3d:
"""
Compute Bounding Box using ComputeWorldBound at UsdGeom.BBoxCache. More efficient if used multiple times.
See https://graphics.pixar.com/usd/dev/api/class_usd_geom_b_box_cache.html
Args:
cache: A cached, i.e. `UsdGeom.BBoxCache(Usd.TimeCode.Default(), ['default', 'render'])`
prim: A prim to compute the bounding box.
Returns:
A range (i.e. bounding box), see more at: https://graphics.pixar.com/usd/release/api/class_gf_range3d.html
"""
bound = cache.ComputeWorldBound(prim)
bound_range = bound.ComputeAlignedBox()
return bound_range
# EoF
| 33,772 | Python | 47.734488 | 194 | 0.577875 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/config/extension.toml | [package]
version = "0.0.1"
title = "ViPlanner extension"
description="Extension containing the novel ViPlanner: Visual Semantic Imperative Learning for Local Navigation"
authors =["Pascal Roth", "Julian Nubert", "Fan Yang", "Mayank Mittal", "Marco Hutter"]
repository = "https://github.com/leggedrobotics/viplanner"
category = "robotics"
keywords = ["kit", "robotics"]
readme = "docs/README.md"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
"omni.isaac.orbit" = {}
# Main python module this extension provides.
[[python.module]]
name = "omni.viplanner"
| 592 | TOML | 28.649999 | 112 | 0.716216 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# from .vip_anymal import VIPlanner
import os
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../data"))
from .viplanner_algo import VIPlannerAlgo
__all__ = ["DATA_DIR", "VIPlannerAlgo"]
| 365 | Python | 23.399998 | 84 | 0.69863 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/viplanner_algo.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the rigid objects class.
"""
"""Rest everything follows."""
import os
import carb
import omni.isaac.orbit.utils.math as math_utils
import torch
import torchvision.transforms as transforms
from omni.isaac.debug_draw import _debug_draw
from viplanner.config import TrainCfg
# viplanner
from viplanner.plannernet import AutoEncoder, DualAutoEncoder
from viplanner.traj_cost_opt.traj_opt import TrajOpt
"""
VIPlanner Helpers
"""
class VIPlannerAlgo:
def __init__(self, model_dir: str, fear_threshold: float = 0.5):
"""Apply VIPlanner Algorithm
Args:
model_dir (str): Directory that include model.pt and model.yaml
"""
super().__init__()
assert os.path.exists(model_dir), "Model directory does not exist"
assert os.path.isfile(os.path.join(model_dir, "model.pt")), "Model file does not exist"
assert os.path.isfile(os.path.join(model_dir, "model.yaml")), "Model config file does not exist"
# params
self.fear_threshold = fear_threshold
# load model
self.train_config: TrainCfg = None
self.load_model(model_dir)
# get transforms for images
self.transform = transforms.Resize(self.train_config.img_input_size, antialias=None)
# init trajectory optimizer
self.traj_generate = TrajOpt()
# setup waypoint display in Isaac
self.draw = _debug_draw.acquire_debug_draw_interface()
self.color_fear = [(1.0, 0.4, 0.1, 1.0)] # red
self.color_path = [(0.4, 1.0, 0.1, 1.0)] # green
self.size = [5.0]
def load_model(self, model_dir: str):
# load train config
self.train_config: TrainCfg = TrainCfg.from_yaml(os.path.join(model_dir, "model.yaml"))
carb.log_info(
f"Model loaded using sem: {self.train_config.sem}, rgb: {self.train_config.rgb}, knodes: {self.train_config.knodes}, in_channel: {self.train_config.in_channel}"
)
if isinstance(self.train_config.data_cfg, list):
self.max_goal_distance = self.train_config.data_cfg[0].max_goal_distance
self.max_depth = self.train_config.data_cfg[0].max_depth
else:
self.max_goal_distance = self.train_config.data_cfg.max_goal_distance
self.max_depth = self.train_config.data_cfg.max_depth
if self.train_config.sem:
self.net = DualAutoEncoder(self.train_config)
else:
self.net = AutoEncoder(self.train_config.in_channel, self.train_config.knodes)
# get model and load weights
try:
model_state_dict, _ = torch.load(os.path.join(model_dir, "model.pt"))
except ValueError:
model_state_dict = torch.load(os.path.join(model_dir, "model.pt"))
self.net.load_state_dict(model_state_dict)
# inference script = no grad for model
self.net.eval()
# move to GPU if available
if torch.cuda.is_available():
self.net = self.net.cuda()
self.cuda_avail = True
else:
carb.log_warn("CUDA not available, VIPlanner will run on CPU")
self.cuda_avail = False
return
###
# Transformations
###
def goal_transformer(self, goal: torch.Tensor, cam_pos: torch.Tensor, cam_quat: torch.Tensor) -> torch.Tensor:
"""transform goal into camera frame"""
goal_cam_frame = goal - cam_pos
goal_cam_frame[:, 2] = 0 # trained with z difference of 0
goal_cam_frame = math_utils.quat_apply(math_utils.quat_inv(cam_quat), goal_cam_frame)
return goal_cam_frame
def path_transformer(
self, path_cam_frame: torch.Tensor, cam_pos: torch.Tensor, cam_quat: torch.Tensor
) -> torch.Tensor:
"""transform path from camera frame to world frame"""
return math_utils.quat_apply(
cam_quat.unsqueeze(1).repeat(1, path_cam_frame.shape[1], 1), path_cam_frame
) + cam_pos.unsqueeze(1)
def input_transformer(self, image: torch.Tensor) -> torch.Tensor:
# transform images
image = self.transform(image)
image[image > self.max_depth] = 0.0
image[~torch.isfinite(image)] = 0 # set all inf or nan values to 0
return image
###
# Planning
###
def plan(self, image: torch.Tensor, goal_robot_frame: torch.Tensor) -> tuple:
with torch.no_grad():
keypoints, fear = self.net(self.input_transformer(image), goal_robot_frame)
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return keypoints, traj, fear
def plan_dual(self, dep_image: torch.Tensor, sem_image: torch.Tensor, goal_robot_frame: torch.Tensor) -> tuple:
# transform input
sem_image = self.transform(sem_image) / 255
with torch.no_grad():
keypoints, fear = self.net(self.input_transformer(dep_image), sem_image, goal_robot_frame)
traj = self.traj_generate.TrajGeneratorFromPFreeRot(keypoints, step=0.1)
return keypoints, traj, fear
###
# Debug Draw
###
def debug_draw(self, paths: torch.Tensor, fear: torch.Tensor, goal: torch.Tensor):
self.draw.clear_lines()
self.draw.clear_points()
def draw_single_traj(traj, color, size):
traj[:, 2] = torch.mean(traj[:, 2])
self.draw.draw_lines(traj[:-1].tolist(), traj[1:].tolist(), color * len(traj[1:]), size * len(traj[1:]))
for idx, curr_path in enumerate(paths):
if fear[idx] > self.fear_threshold:
draw_single_traj(curr_path, self.color_fear, self.size)
self.draw.draw_points(goal.tolist(), self.color_fear * len(goal), self.size * len(goal))
else:
draw_single_traj(curr_path, self.color_path, self.size)
self.draw.draw_points(goal.tolist(), self.color_path * len(goal), self.size * len(goal))
| 6,266 | Python | 35.436046 | 172 | 0.628631 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the viplanner environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .actions import * # noqa: F401, F403
from .commands import * # noqa: F401, F403
from .observations import * # noqa: F401, F403
| 440 | Python | 30.499998 | 93 | 0.725 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/observations.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains observation terms specific for viplanner.
The functions can be passed to the :class:`omni.isaac.orbit.managers.ObservationTermCfg` object to enable
the observation introduced by the function.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors.camera import CameraData
from viplanner.config import VIPlannerSemMetaHandler
from .actions import NavigationAction
if TYPE_CHECKING:
from omni.isaac.orbit.envs.base_env import BaseEnv
# initialize viplanner config
VIPLANNER_SEM_META = VIPlannerSemMetaHandler()
def matterport_raycast_camera_data(env: BaseEnv, sensor_cfg: SceneEntityCfg, data_type: str) -> torch.Tensor:
"""Images generated by the raycast camera."""
# extract the used quantities (to enable type-hinting)
sensor: CameraData = env.scene.sensors[sensor_cfg.name].data
# return the data
if data_type == "distance_to_image_plane":
output = sensor.output[data_type].clone().unsqueeze(1)
output[torch.isnan(output)] = 0.0
output[torch.isinf(output)] = 0.0
return output
else:
return sensor.output[data_type].clone().permute(0, 3, 1, 2)
def isaac_camera_data(env: BaseEnv, sensor_cfg: SceneEntityCfg, data_type: str) -> torch.Tensor:
"""Images generated by the usd camera."""
# extract the used quantities (to enable type-hinting)
sensor: CameraData = env.scene.sensors[sensor_cfg.name].data
# return the data
if data_type == "distance_to_image_plane":
output = sensor.output[data_type].clone().unsqueeze(1)
output[torch.isnan(output)] = 0.0
output[torch.isinf(output)] = 0.0
return output
elif data_type == "semantic_segmentation":
# retrieve data
info = [sensor.info[env_id][data_type]["idToLabels"] for env_id in range(env.num_envs)]
data = sensor.output[data_type].clone()
# assign each key a color from the VIPlanner color space
info = [
{
int(k): VIPLANNER_SEM_META.class_color["static"]
if v["class"] in ("BACKGROUND", "UNLABELLED")
else VIPLANNER_SEM_META.class_color[v["class"]]
for k, v in d.items()
}
for d in info
]
# create recolored images
output = torch.zeros((*data.shape, 3), device=env.device, dtype=torch.uint8)
for env_id in range(env.num_envs):
mapping = torch.zeros((max(info[env_id].keys()) + 1, 3), dtype=torch.uint8, device=env.device)
mapping[list(info[env_id].keys())] = torch.tensor(
list(info[env_id].values()), dtype=torch.uint8, device=env.device
)
output[env_id] = mapping[data[env_id].long().reshape(-1)].reshape(data.shape[1:] + (3,))
return output.permute(0, 3, 1, 2)
else:
return sensor.output[data_type].clone()
def cam_position(env: BaseEnv, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Position of the camera."""
# extract the used quantities (to enable type-hinting)
sensor: CameraData = env.scene.sensors[sensor_cfg.name].data
return sensor.pos_w.clone()
def cam_orientation(env: BaseEnv, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Orientation of the camera."""
# extract the used quantities (to enable type-hinting)
sensor: CameraData = env.scene.sensors[sensor_cfg.name].data
return sensor.quat_w_world.clone()
def low_level_actions(env: BaseEnv) -> torch.Tensor:
"""Low-level actions."""
# extract the used quantities (to enable type-hinting)
action_term: NavigationAction = env.action_manager._terms[0]
return action_term.low_level_actions.clone()
| 3,955 | Python | 34.321428 | 109 | 0.665487 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/actions/navigation_actions.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import MISSING
import torch
from omni.isaac.orbit.envs import RLTaskEnv
from omni.isaac.orbit.managers.action_manager import ActionTerm, ActionTermCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import check_file_path, read_file
# -- Navigation Action
class NavigationAction(ActionTerm):
"""Actions to navigate a robot by following some path."""
cfg: NavigationActionCfg
_env: RLTaskEnv
def __init__(self, cfg: NavigationActionCfg, env: RLTaskEnv):
super().__init__(cfg, env)
# check if policy file exists
if not check_file_path(self.cfg.low_level_policy_file):
raise FileNotFoundError(f"Policy file '{self.cfg.low_level_policy_file}' does not exist.")
file_bytes = read_file(self.cfg.low_level_policy_file)
# load policies
self.low_level_policy = torch.jit.load(file_bytes, map_location=self.device)
self.low_level_policy = torch.jit.freeze(self.low_level_policy.eval())
# prepare joint position actions
self.low_level_action_term: ActionTerm = self.cfg.low_level_action.class_type(cfg.low_level_action, env)
# prepare buffers
self._action_dim = (
self.cfg.path_length * 3
) # [vx, vy, omega] --> vx: [-0.5,1.0], vy: [-0.5,0.5], omega: [-1.0,1.0]
self._raw_navigation_velocity_actions = torch.zeros(self.num_envs, self._action_dim, device=self.device)
self._processed_navigation_velocity_actions = torch.zeros(
(self.num_envs, self.cfg.path_length, 3), device=self.device
)
self._low_level_actions = torch.zeros(self.num_envs, self.low_level_action_term.action_dim, device=self.device)
self._low_level_step_dt = self.cfg.low_level_decimation * self._env.physics_dt
self._counter = 0
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._action_dim
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_navigation_velocity_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_navigation_velocity_actions
@property
def low_level_actions(self) -> torch.Tensor:
return self._low_level_actions
"""
Operations.
"""
def process_actions(self, actions):
"""Process low-level navigation actions. This function is called with a frequency of 10Hz"""
# Store low level navigation actions
self._raw_navigation_velocity_actions[:] = actions
# reshape into 3D path
self._processed_navigation_velocity_actions[:] = actions.clone().view(self.num_envs, self.cfg.path_length, 3)
def apply_actions(self):
"""Apply low-level actions for the simulator to the physics engine. This functions is called with the
simulation frequency of 200Hz. Since low-level locomotion runs at 50Hz, we need to decimate the actions."""
if self._counter % self.cfg.low_level_decimation == 0:
self._counter = 0
# -- update command
self._env.command_manager.compute(dt=self._low_level_step_dt)
# Get low level actions from low level policy
self._low_level_actions[:] = self.low_level_policy(
self._env.observation_manager.compute_group(group_name="policy")
)
# Process low level actions
self.low_level_action_term.process_actions(self._low_level_actions)
# Apply low level actions
self.low_level_action_term.apply_actions()
self._counter += 1
@configclass
class NavigationActionCfg(ActionTermCfg):
class_type: type[ActionTerm] = NavigationAction
""" Class of the action term."""
low_level_decimation: int = 4
"""Decimation factor for the low level action term."""
low_level_action: ActionTermCfg = MISSING
"""Configuration of the low level action term."""
low_level_policy_file: str = MISSING
"""Path to the low level policy file."""
path_length: int = 51
"""Length of the path to be followed."""
| 4,313 | Python | 35.559322 | 119 | 0.658938 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.