file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
rosklyar/omniverse_extensions/tools/packman/config.packman.xml
|
<config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 |
XML
| 34.333328 | 123 | 0.691943 |
rosklyar/omniverse_extensions/tools/packman/bootstrap/install_package.py
|
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 |
Python
| 31.568965 | 103 | 0.68697 |
rosklyar/omniverse_extensions/exts/playtika.eyedarts.export/playtika/eyedarts/export/extension.py
|
import omni.ext
import carb.events
import omni.kit.app
import omni.ui as ui
import omni.kit.window.filepicker as fp
from pxr import Usd, UsdGeom, Gf
import omni.usd
import os
from os.path import exists
import asyncio
import json
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
async def export_eyedarts():
output_folder = "C:/Users/rskliar/ue-projects/test_folder"
progress_window = ui.Window("Export eye darts to json...", width=750, height=100)
with progress_window.frame:
with ui.VStack():
file_label = ui.StringField()
pb = ui.ProgressBar()
pb.model.set_value(0)
stage = omni.usd.get_context().get_stage()
manager = omni.audio2face.player.get_ext().player_manager()
instance = manager.get_instance("/World/LazyGraph/Player")
l_eye = stage.GetPrimAtPath("/World/male_fullface/char_male_model_hi/l_eye_grp_hi")
r_eye = stage.GetPrimAtPath("/World/male_fullface/char_male_model_hi/r_eye_grp_hi")
wav_files_folder = instance.get_abs_track_root_path()
files_to_process = getWavFiles(wav_files_folder)
for f in files_to_process:
instance.set_track_name(f)
pb.model.set_value(0)
print("Processing file:" + f)
file_label.model.set_value(f)
fileLengthInSeconds = instance.get_range_end()
time = 0.0
result = []
while(time < fileLengthInSeconds):
e = await omni.kit.app.get_app().next_update_async()
time += 1.0 / 60
instance.set_time(time)
pose_l = omni.usd.utils.get_world_transform_matrix(l_eye)
pose_r = omni.usd.utils.get_world_transform_matrix(r_eye)
l_rx, l_ry, l_rz = pose_l.ExtractRotation().Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
r_rx, r_ry, r_rz = pose_r.ExtractRotation().Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
frame = [l_rx, l_ry, l_rz, r_rx, r_ry, r_rz]
result.append(frame)
pb.model.set_value(time / fileLengthInSeconds)
result_json = {
"numPoses": 6,
"numFrames": len(result),
"facsNames" : ["l_rx", "l_ry", "l_rz", "r_rx", "r_ry", "r_rz"],
"weightMat": result
}
with open(output_folder + "/" + f[:-3] + "json", 'w') as outfile:
json.dump(result_json, outfile)
progress_window.destroy()
progress_window.visible = False
def on_change(event):
if(event.type == 8):
asyncio.ensure_future(export_eyedarts())
pass
print("[playtika.eyedarts.export] ExportEyeDarts startup")
self.output_folder = ""
self.fps = 60
self._window = ui.Window("Export eye darts", width=750, height=300)
self.filePicker = None
self.folder_label = None
print("Stream=" + str(omni.kit.app.get_app().get_message_bus_event_stream()))
print("Update Stream=" + str(omni.kit.app.get_app().get_update_event_stream()))
self._subscription = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(on_change)
print("[playtika.eyedarts.export] ExportEyeDarts subscription created")
with self._window.frame:
with ui.VStack():
def getWavFiles(json_files_folder):
files = []
if json_files_folder and not exists(json_files_folder):
raise Exception("Please, select existed folder with JSON files!")
for file in os.listdir(json_files_folder):
if file.endswith('.wav'):
files.append(file)
return files
def on_combobox_changed(model, item):
self.fps = model.get_item_value_model(model.get_item_children()[model.get_item_value_model().as_int]).as_int
def on_click():
asyncio.ensure_future(export())
async def export():
progress_window = ui.Window("Export eye darts to json...", width=750, height=100)
with progress_window.frame:
with ui.VStack():
file_label = ui.StringField()
pb = ui.ProgressBar()
pb.model.set_value(0)
if(self.output_folder):
stage = omni.usd.get_context().get_stage()
manager = omni.audio2face.player.get_ext().player_manager()
instance = manager.get_instance("/World/LazyGraph/Player")
l_eye = stage.GetPrimAtPath("/World/male_fullface/char_male_model_hi/l_eye_grp_hi")
r_eye = stage.GetPrimAtPath("/World/male_fullface/char_male_model_hi/r_eye_grp_hi")
wav_files_folder = instance.get_abs_track_root_path()
files_to_process = getWavFiles(wav_files_folder)
for f in files_to_process:
instance.set_track_name(f)
pb.model.set_value(0)
print("Processing file:" + f)
file_label.model.set_value(f)
fileLengthInSeconds = instance.get_range_end()
time = 0.0
result = []
while(time < fileLengthInSeconds):
e = await omni.kit.app.get_app().next_update_async()
time += 1.0 / self.fps
instance.set_time(time)
pose_l = omni.usd.utils.get_world_transform_matrix(l_eye)
pose_r = omni.usd.utils.get_world_transform_matrix(r_eye)
l_rx, l_ry, l_rz = pose_l.ExtractRotation().Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
r_rx, r_ry, r_rz = pose_r.ExtractRotation().Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
frame = [l_rx, l_ry, l_rz, r_rx, r_ry, r_rz]
result.append(frame)
pb.model.set_value(time / fileLengthInSeconds)
result_json = {
"numPoses": 6,
"numFrames": len(result),
"facsNames" : ["l_rx", "l_ry", "l_rz", "r_rx", "r_ry", "r_rz"],
"weightMat": result
}
with open(self.output_folder + "/" + f[:-3] + "json", 'w') as outfile:
json.dump(result_json, outfile)
progress_window.destroy()
progress_window.visible = False
def on_click_open(file_name, dir_name):
print("File name: " + dir_name)
self.output_folder = dir_name
self.folder_label.text = "Output folder: " + self.output_folder
self.filePicker.hide()
def show_file_picker():
print("show file picker")
self.filePicker = fp.FilePickerDialog("Select output folder", apply_button_label="Select", click_apply_handler=on_click_open)
self.filePicker.show()
with ui.HStack():
self.folder_label = ui.Label("Output folder: " + self.output_folder, height=20)
ui.Button("Select", clicked_fn=lambda: show_file_picker(), width = 20, height=20)
with ui.HStack():
ui.Label("FPS: ", height=20)
fpsCombobox = ui.ComboBox(0, "60", "24")
fpsCombobox.model.add_item_changed_fn(lambda model, item: on_combobox_changed(model, item))
ui.Button("Export", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[playtika.eyedarts.export] ExportEyeDarts shutdown")
def getWavFiles(json_files_folder):
files = []
if json_files_folder and not exists(json_files_folder):
raise Exception("Please, select existed folder with JSON files!")
for file in os.listdir(json_files_folder):
if file.endswith('.wav'):
files.append(file)
return files
| 9,417 |
Python
| 50.747252 | 145 | 0.511946 |
rosklyar/omniverse_extensions/exts/playtika.eyedarts.export/playtika/eyedarts/export/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
rosklyar/omniverse_extensions/exts/playtika.eyedarts.export/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Simple UI Extension Template"
description="The simplest python extension example. Use it as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import playtika.eyedarts.export".
[[python.module]]
name = "playtika.eyedarts.export"
| 815 |
TOML
| 27.13793 | 113 | 0.746012 |
MatthewDZane/XArmFollowTarget/README.md
|
<a name="readme-top"></a>
<!-- PROJECT SHIELDS -->
<!--
*** I'm using markdown "reference style" links for readability.
*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).
*** See the bottom of this document for the declaration of the reference variables
*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.
*** https://www.markdownguide.org/basic-syntax/#reference-style-links
-->
[![Contributors][contributors-shield]][contributors-url]
[![Forks][forks-shield]][forks-url]
[![Stargazers][stars-shield]][stars-url]
[![Issues][issues-shield]][issues-url]
[![LinkedIn][linkedin-shield]][linkedin-url]
<!-- PROJECT LOGO -->
<br />
<div align="center">
<a href="https://github/MatthewDZane/XArmFollowTarget/images/IsaacSim.png">
<img src="images/IsaacSim.png" alt="IsaacSim" width="700" height="400">
</a>
<h3 align="center">XArm Follow Target</h3>
<p align="center">
An Autonomous Camera Controller Robot
<br />
<a href="https://github/MatthewDZane/XArmFollowTarget"><strong>Explore the docs »</strong></a>
<br />
</a>
</p>
</div>
<!-- TABLE OF CONTENTS -->
<details>
<summary>Table of Contents</summary>
<ol>
<li>
<a href="#about-the-project">About The Project</a>
<ul>
<li><a href="#built-with">Built With</a></li>
</ul>
</li>
<li>
<a href="#using-xarm-follow-target-as-a-third-party-extension">Using Xarm Follow Target as a Third Party Extension</a>
</li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About The Project
This project autonomously controls a RealSense Camera mounted to an [XArm Robot](https://www.ufactory.cc/xarm-collaborative-robot). The repo is comprised of several scripts, which are designed to run synchronously across several different machines (XArm Robot, Local RealSense Camera Machine, and Remote Nautilus Cluster Machine), which utilize NVidia's [Isaac Sim](https://developer.nvidia.com/isaac-sim) and Intel's [RealSense Camera](https://www.intelrealsense.com/).
First the RealSense Camera script sends positional data to the remote machine, using OpenCV and the Camera's depth sensors. Then, the custom Isaac Sim Extension runs a Follow Target task, solving the Kinematic equations, to calculate the correct orientation of the XArm. Finally, these new orientations are sent back to the XArm itself to then update the robot is real life.
This project was headed by Professor Robert Twomey at the University of Nebraska Lincoln and is intented for educational and experimental use.
<p align="right">(<a href="#readme-top">back to top</a>)</p>
### Built With
This project was built using the following.
* [XArm](https://www.ufactory.cc/xarm-collaborative-robot)
* [Isaac Sim](https://developer.nvidia.com/isaac-sim)
* [RealSense Camera](https://www.intelrealsense.com/)
<div align="center">
<a href="https://gitlab.nrp-nautilus.io/MatthewZane/XArmFollowTarget/images/RealSenseCamera.png">
<img src="images/RealSenseCamera.png" alt="RealSenseCamera" width="500" height="410">
</a>
</div>
# Using XArm Follow Target as a Third Party Extension
1. Clone the repo into the directory where you would like to store Isaac Sim Extensions
2. Open Isaac Sim and go to Windows->Extensions
3. Click the Settings Icon (Gear) and add the path to the parent directory of the repo (not XArm or XArmFollowTarget). Now the XArm Follow Target Extention should show up under the Third Party Extensions.
4. Enable the XArm Follow Target Extension and check the Autoload setting. The XArm Follow Target Extension will now appear on the top menu bar of the Isaac Sim Application.
5. Click the XArm Follow Target to use the Extension
Port Forward local ports to the Container for the realsense camera client.
- once you have a XGL container running you will need to use the kubernetes CLI to get the specific pod name. This can be done with
```
kubectl get pods -n <your namespace>
```
once you have your pod name we can now prot forward the local ports to the container for communication,
Run
```
kubectl port-forward <running XGL pod> 12345:12345 12346:12346 -n <your namespace>
```
We use these ports by default
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/MatthewDZane/XArmFollowTarget.svg?style=for-the-badge
[contributors-url]: https://gitlab.nrp-nautilus.io/MatthewZane/XArmFollowTarget/graphs/contributors
[forks-shield]: https://img.shields.io/github/forks/MatthewDZane/XArmFollowTarget.svg?style=for-the-badge
[forks-url]: https://gitlab.nrp-nautilus.io/MatthewZane/XArmFollowTarget/network/members
[stars-shield]: https://img.shields.io/github/stars/MatthewDZane/XArmFollowTarget.svg?style=for-the-badge
[stars-url]: https://gitlab.nrp-nautilus.io/MatthewZane/XArmFollowTarget/stargazers
[issues-shield]: https://img.shields.io/github/issues/MatthewDZane/XArmFollowTarget.svg?style=for-the-badge
[issues-url]: https://gitlab.nrp-nautilus.io/MatthewZane/XArmFollowTarget/issues
[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-url]: https://linkedin.com/in/matthewdzane
| 5,280 |
Markdown
| 44.525862 | 471 | 0.743371 |
MatthewDZane/XArmFollowTarget/scripts/global_variables.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
EXTENSION_TITLE = "XArm Follow Target"
EXTENSION_DESCRIPTION = ""
| 570 |
Python
| 39.785711 | 76 | 0.801754 |
MatthewDZane/XArmFollowTarget/scripts/scenario.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
class ScenarioTemplate:
def __init__(self):
pass
def setup_scenario(self):
pass
def teardown_scenario(self):
pass
def update_scenario(self):
pass
import numpy as np
from omni.isaac.core.utils.types import ArticulationAction
"""
This scenario takes in a robot Articulation and makes it move through its joint DOFs.
Additionally, it adds a cuboid prim to the stage that moves in a circle around the robot.
The particular framework under which this scenario operates should not be taken as a direct
recomendation to the user about how to structure their code. In the simple example put together
in this template, this particular structure served to improve code readability and separate
the logic that runs the example from the UI design.
"""
class ExampleScenario(ScenarioTemplate):
def __init__(self):
self._object = None
self._articulation = None
self._running_scenario = False
self._time = 0.0 # s
self._object_radius = 0.5 # m
self._object_height = 0.5 # m
self._object_frequency = 0.25 # Hz
self._joint_index = 0
self._max_joint_speed = 4 # rad/sec
self._lower_joint_limits = None
self._upper_joint_limits = None
self._joint_time = 0
self._path_duration = 0
self._calculate_position = lambda t, x: 0
self._calculate_velocity = lambda t, x: 0
def setup_scenario(self, articulation, object_prim):
self._articulation = articulation
self._object = object_prim
self._initial_object_position = self._object.get_world_pose()[0]
self._initial_object_phase = np.arctan2(self._initial_object_position[1], self._initial_object_position[0])
self._object_radius = np.linalg.norm(self._initial_object_position[:2])
self._running_scenario = True
self._joint_index = 0
self._lower_joint_limits = articulation.dof_properties["lower"]
self._upper_joint_limits = articulation.dof_properties["upper"]
# teleport robot to lower joint range
epsilon = 0.001
articulation.set_joint_positions(self._lower_joint_limits + epsilon)
self._derive_sinusoid_params(0)
def teardown_scenario(self):
self._time = 0.0
self._object = None
self._articulation = None
self._running_scenario = False
self._joint_index = 0
self._lower_joint_limits = None
self._upper_joint_limits = None
self._joint_time = 0
self._path_duration = 0
self._calculate_position = lambda t, x: 0
self._calculate_velocity = lambda t, x: 0
def update_scenario(self, step: float):
if not self._running_scenario:
return
self._time += step
x = self._object_radius * np.cos(self._initial_object_phase + self._time * self._object_frequency * 2 * np.pi)
y = self._object_radius * np.sin(self._initial_object_phase + self._time * self._object_frequency * 2 * np.pi)
z = self._initial_object_position[2]
self._object.set_world_pose(np.array([x, y, z]))
self._update_sinusoidal_joint_path(step)
def _derive_sinusoid_params(self, joint_index: int):
# Derive the parameters of the joint target sinusoids for joint {joint_index}
start_position = self._lower_joint_limits[joint_index]
P_max = self._upper_joint_limits[joint_index] - start_position
V_max = self._max_joint_speed
T = P_max * np.pi / V_max
# T is the expected time of the joint path
self._path_duration = T
self._calculate_position = (
lambda time, path_duration: start_position
+ -P_max / 2 * np.cos(time * 2 * np.pi / path_duration)
+ P_max / 2
)
self._calculate_velocity = lambda time, path_duration: V_max * np.sin(2 * np.pi * time / path_duration)
def _update_sinusoidal_joint_path(self, step):
# Update the target for the robot joints
self._joint_time += step
if self._joint_time > self._path_duration:
self._joint_time = 0
self._joint_index = (self._joint_index + 1) % self._articulation.num_dof
self._derive_sinusoid_params(self._joint_index)
joint_position_target = self._calculate_position(self._joint_time, self._path_duration)
joint_velocity_target = self._calculate_velocity(self._joint_time, self._path_duration)
action = ArticulationAction(
np.array([joint_position_target]),
np.array([joint_velocity_target]),
joint_indices=np.array([self._joint_index]),
)
self._articulation.apply_action(action)
| 5,251 |
Python
| 34.486486 | 118 | 0.645972 |
MatthewDZane/XArmFollowTarget/scripts/extension.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import weakref
import asyncio
import gc
import omni
import omni.ui as ui
import omni.usd
import omni.timeline
import omni.kit.commands
from omni.kit.menu.utils import add_menu_items, remove_menu_items
from omni.isaac.ui.menu import make_menu_item_description
from omni.usd import StageEventType
import omni.physx as _physx
from .global_variables import EXTENSION_TITLE, EXTENSION_DESCRIPTION
from .ui_builder import UIBuilder
"""
This file serves as a basic template for the standard boilerplate operations
that make a UI-based extension appear on the toolbar.
This implementation is meant to cover most use-cases without modification.
Various callbacks are hooked up to a seperate class UIBuilder in .ui_builder.py
Most users will be able to make their desired UI extension by interacting solely with
UIBuilder.
This class sets up standard useful callback functions in UIBuilder:
on_menu_callback: Called when extension is opened
on_timeline_event: Called when timeline is stopped, paused, or played
on_physics_step: Called on every physics step
on_stage_event: Called when stage is opened or closed
cleanup: Called when resources such as physics subscriptions should be cleaned up
build_ui: User function that creates the UI they want.
"""
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
"""Initialize extension and UI elements"""
# Events
self._usd_context = omni.usd.get_context()
# Build Window
self._window = ui.Window(
title=EXTENSION_TITLE, width=600, height=500, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
self._window.set_visibility_changed_fn(self._on_window)
# UI
self._models = {}
self._ext_id = ext_id
self._menu_items = [
make_menu_item_description(ext_id, EXTENSION_TITLE, lambda a=weakref.proxy(self): a._menu_callback())
]
add_menu_items(self._menu_items, EXTENSION_TITLE)
# Filled in with User Functions
self.ui_builder = UIBuilder()
# Events
self._usd_context = omni.usd.get_context()
self._physxIFace = _physx.acquire_physx_interface()
self._physx_subscription = None
self._stage_event_sub = None
self._timeline = omni.timeline.get_timeline_interface()
def on_shutdown(self):
self._models = {}
remove_menu_items(self._menu_items, EXTENSION_TITLE)
if self._window:
self._window = None
self.ui_builder.cleanup()
gc.collect()
def _on_window(self, visible):
if self._window.visible:
# Subscribe to Stage and Timeline Events
self._usd_context = omni.usd.get_context()
events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = events.create_subscription_to_pop(self._on_stage_event)
stream = self._timeline.get_timeline_event_stream()
self._timeline_event_sub = stream.create_subscription_to_pop(self._on_timeline_event)
self._build_ui()
else:
self._usd_context = None
self._stage_event_sub = None
self._timeline_event_sub = None
self.ui_builder.cleanup()
def _build_ui(self):
with self._window.frame:
with ui.VStack(spacing=5, height=0):
self._build_extension_ui()
async def dock_window():
await omni.kit.app.get_app().next_update_async()
def dock(space, name, location, pos=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, pos)
return window
tgt = ui.Workspace.get_window("Viewport")
dock(tgt, EXTENSION_TITLE, omni.ui.DockPosition.LEFT, 0.33)
await omni.kit.app.get_app().next_update_async()
self._task = asyncio.ensure_future(dock_window())
#################################################################
# Functions below this point call user functions
#################################################################
def _menu_callback(self):
self._window.visible = not self._window.visible
self.ui_builder.on_menu_callback()
def _on_timeline_event(self, event):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
if not self._physx_subscription:
self._physx_subscription = self._physxIFace.subscribe_physics_step_events(self._on_physics_step)
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._physx_subscription = None
self.ui_builder.on_timeline_event(event)
def _on_physics_step(self, step):
self.ui_builder.on_physics_step(step)
def _on_stage_event(self, event):
if event.type == int(StageEventType.OPENED) or event.type == int(StageEventType.CLOSED):
# stage was opened or closed, cleanup
self._physx_subscription = None
self.ui_builder.cleanup()
self.ui_builder.on_stage_event(event)
def _build_extension_ui(self):
# Call user function for building UI
self.ui_builder.build_ui()
| 5,777 |
Python
| 36.764706 | 117 | 0.647395 |
MatthewDZane/XArmFollowTarget/scripts/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
MatthewDZane/XArmFollowTarget/scripts/ui_builder.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import omni.ui as ui
import omni.timeline
from omni.isaac.core.world import World
from omni.isaac.ui.ui_utils import get_style, btn_builder, state_btn_builder, cb_builder, float_builder
from omni.usd import StageEventType
from .XArm.xarm_sample import XArmSample
import asyncio
class UIBuilder:
def __init__(self):
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper instance
self.wrapped_ui_elements = []
self._buttons = None
self._task_ui_elements = None
# Get access to the timeline to control stop/pause/play programmatically
self._timeline = omni.timeline.get_timeline_interface()
self._sample = XArmSample()
###################################################################################
# The Functions Below Are Called Automatically By extension.py
###################################################################################
def on_menu_callback(self):
"""Callback for when the UI is opened from the toolbar.
This is called directly after build_ui().
"""
pass
def on_timeline_event(self, event):
"""Callback for Timeline events (Play, Pause, Stop)
Args:
event (omni.timeline.TimelineEventType): Event Type
"""
pass
def on_physics_step(self, step: float):
"""Callback for Physics Step.
Physics steps only occur when the timeline is playing
Args:
step (float): Size of physics step
"""
pass
def cleanup(self):
"""
Called when the stage is closed or the extension is hot reloaded.
Perform any necessary cleanup such as removing active callback functions
Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called
"""
if self._sample._world is not None:
self._sample._world_cleanup()
if self._buttons is not None:
self._enable_all_buttons(False)
self._buttons["Load XArm5"].enabled = True
self._buttons["Load XArm7"].enabled = True
self.shutdown_cleanup()
return
def build_ui(self):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
self._buttons = {}
self._task_ui_elements = {}
world_controls_frame = ui.CollapsableFrame(title="World Controls", collapsed=False)
with world_controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
dict = {
"label": "Load XArm5",
"type": "button",
"text": "Load",
"tooltip": "Load XArm5 and Task",
"on_clicked_fn": self._on_load_xarm5,
}
self._buttons["Load XArm5"] = btn_builder(**dict)
self._buttons["Load XArm5"].enabled = True
dict = {
"label": "Load XArm7",
"type": "button",
"text": "Load",
"tooltip": "Load XArm7 and Task",
"on_clicked_fn": self._on_load_xarm7,
}
self._buttons["Load XArm7"] = btn_builder(**dict)
self._buttons["Load XArm7"].enabled = True
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._buttons["Reset"] = btn_builder(**dict)
self._buttons["Reset"].enabled = False
task_controls_frame = ui.CollapsableFrame(title="Task Controls", collapsed=False)
with task_controls_frame:
with ui.VStack(spacing=5):
task_controls_frame.visible = True
dict = {
"label": "Follow Target",
"type": "button",
"a_text": "START",
"b_text": "STOP",
"tooltip": "Follow Target",
"on_clicked_fn": self._on_follow_target_button_event,
}
self._task_ui_elements["Follow Target"] = state_btn_builder(**dict)
self._task_ui_elements["Follow Target"].enabled = False
dict = {
"label": "Add Obstacle",
"type": "button",
"text": "ADD",
"tooltip": "Add Obstacle",
"on_clicked_fn": self._on_add_obstacle_button_event,
}
self._task_ui_elements["Add Obstacle"] = btn_builder(**dict)
self._task_ui_elements["Add Obstacle"].enabled = False
dict = {
"label": "Remove Obstacle",
"type": "button",
"text": "REMOVE",
"tooltip": "Remove Obstacle",
"on_clicked_fn": self._on_remove_obstacle_button_event,
}
self._task_ui_elements["Remove Obstacle"] = btn_builder(**dict)
self._task_ui_elements["Remove Obstacle"].enabled = False
dict = {
"label": "Random Target Enabled",
"type": "checkbox",
"default_val": True,
"tooltip": "Random Target Enabled",
"on_clicked_fn": self._on_random_target_enabled_event
}
self._task_ui_elements["Random Target Checkbox"] = cb_builder(**dict)
args = {
"label": "Min Face Range",
"default_val": .3,
"tooltip": "Min Range in Meters",
}
self._task_ui_elements["Min Face Range"] = float_builder(**args)
self._task_ui_elements["Min Face Range"].add_value_changed_fn(self._on_min_face_range_changed_event)
args = {
"label": "Max Face Range",
"default_val": 10,
"tooltip": "Max Range in Meters",
}
self._task_ui_elements["Max Face Range"] = float_builder(**args)
self._task_ui_elements["Max Face Range"].add_value_changed_fn(self._on_max_face_range_changed_event)
def _on_load_xarm5(self):
self._sample.set_xarm_version(5)
self._on_random_target_enabled_event(False)
self._on_min_face_range_changed_event(self._task_ui_elements["Min Face Range"].get_value_as_float())
self._on_max_face_range_changed_event(self._task_ui_elements["Max Face Range"].get_value_as_float())
async def _on_load_world_async():
await self._sample.load_world_async()
await omni.kit.app.get_app().next_update_async()
self._sample._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self._buttons["Load XArm5"].enabled = False
self._buttons["Load XArm7"].enabled = False
self.post_load_button_event()
self._sample._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _on_load_xarm7(self):
self._sample.set_xarm_version(7)
self._on_random_target_enabled_event(False)
self._on_min_face_range_changed_event(self._task_ui_elements["Min Face Range"].get_value_as_float())
self._on_max_face_range_changed_event(self._task_ui_elements["Max Face Range"].get_value_as_float())
async def _on_load_world_async():
await self._sample.load_world_async()
await omni.kit.app.get_app().next_update_async()
self._sample._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self._buttons["Load XArm7"].enabled = False
self._buttons["Load XArm5"].enabled = False
self.post_load_button_event()
self._sample._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _on_reset(self):
async def _on_reset_async():
await self._sample.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_follow_target_button_event(self, val):
asyncio.ensure_future(self._sample._on_follow_target_event_async(val))
return
def _on_add_obstacle_button_event(self):
self._sample._on_add_obstacle_event()
self._task_ui_elements["Remove Obstacle"].enabled = True
return
def _on_remove_obstacle_button_event(self):
self._sample._on_remove_obstacle_event()
world = self._sample.get_world()
current_task = list(world.get_current_tasks().values())[0]
if not current_task.obstacles_exist():
self._task_ui_elements["Remove Obstacle"].enabled = False
return
def _on_random_target_enabled_event(self, val):
self._sample.rand_target_enabled = self._task_ui_elements["Random Target Checkbox"].get_value_as_bool()
def _on_min_face_range_changed_event(self, val):
self._sample.min_detection_range = self._task_ui_elements["Min Face Range"].get_value_as_float()
def _on_max_face_range_changed_event(self, val):
self._sample.max_detection_range = self._task_ui_elements["Max Face Range"].get_value_as_float()
def _enable_all_buttons(self, flag):
for btn_name, btn in self._buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def _on_follow_target_button_event(self, val):
asyncio.ensure_future(self._sample._on_follow_target_event_async(val))
return
def _on_save_data_button_event(self):
self._sample._on_save_data_event(self._task_ui_elements["Output Directory"].get_value_as_string())
return
def post_reset_button_event(self):
self._task_ui_elements["Follow Target"].enabled = True
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = True
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
return
def post_load_button_event(self):
self._task_ui_elements["Follow Target"].enabled = True
self._task_ui_elements["Add Obstacle"].enabled = True
return
def post_clear_button_event(self):
self._task_ui_elements["Follow Target"].enabled = False
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = False
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
return
def shutdown_cleanup(self):
return
def on_stage_event(self, event):
if event.type == int(StageEventType.OPENED):
# If the user opens a new stage, the extension should completely reset
self._task_ui_elements["Follow Target"].enabled = False
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = False
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
elif event.type == int(omni.usd.StageEventType.CLOSED):
if World.instance() is not None:
self._sample._world_cleanup()
self._sample._world.clear_instance()
if hasattr(self, "_buttons"):
if self._buttons is not None:
self._enable_all_buttons(False)
self._buttons["Load XArm5"].enabled = True
self._buttons["Load XArm7"].enabled = True
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
#self._buttons["Load XArm5"].enabled = False
#self._buttons["Load XArm7"].enabled = False
self._buttons["Reset"].enabled = True
self.post_clear_button_event()
return
| 13,492 |
Python
| 40.516923 | 116 | 0.561073 |
MatthewDZane/XArmFollowTarget/scripts/README.md
|
# Loading Extension
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
The user will see the extension appear on the toolbar on startup with the title they specified in the Extension Generator
# Extension Usage
This template extension creates a basic tool for interacting with a robot Articulation
through the UI by changing its joint position targets. This format is generally useful
for building UI tools to help with robot configuration or inspection.
To use the template as written, the user must load a robot Articulation onto the stage,
and press the PLAY button on the left-hand toolbar. Then, in the extension UI, they can select their
Articulation from a drop-down menu and start controlling the robot joint position targets.
The extension only functions while the timeline is running because robot Articulation objects only
function while the timeline is playing (physics does not run while the timeline is stopped stopped).
# Template Code Overview
The template is well documented and is meant to be self-explanatory to the user should they
start reading the provided python files. A short overview is also provided here:
global_variables.py:
A script that stores in global variables that the user specified when creating this extension such as the Title and Description.
extension.py:
A class containing the standard boilerplate necessary to have the user extension show up on the Toolbar. This
class is meant to fulfill most ues-cases without modification.
In extension.py, useful standard callback functions are created that the user may complete in ui_builder.py.
ui_builder.py:
This file is the user's main entrypoint into the template. Here, the user can see useful callback functions that have been
set up for them, and they may also create UI buttons that are hooked up to more user-defined callback functions. This file is
the most thoroughly documented, and the user should read through it before making serious modification.
| 2,057 |
Markdown
| 61.363635 | 132 | 0.79825 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm_socket.py
|
import ast
import socket
import threading
import carb
class XArmSocket():
def __init__(self) -> None:
# sending position data to arm
self.txsocket = None
self.txconn = None
# tracking info
self.rxsocket = None
self.rxconn = None
self.cam_to_nose = None
self.face_direction = None
self.dx = None
self.dy = None
# threads
self.txsocket_thread = None
self.rxsocket_thread = None
def start_txsocket(self):
self.txsocket_thread = threading.Thread(target=self.setup_txsocket)
self.txsocket_thread.start()
def start_rxsocket(self):
self.rxsocket_thread = threading.Thread(target=self.setup_rxsocket)
self.rxsocket_thread.start()
def setup_txsocket(self):
if self.txsocket is None:
self.txsocket = socket.socket()
# allow socket to reuse address
self.txsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
txport = 12345
self.txsocket.bind(('', txport))
# https://docs.python.org/3/library/socket.html#socket.socket.listen
self.txsocket.listen(5) # number of unaccepted connections allow (backlog)
while True:
self.txconn, self.txaddr = self.txsocket.accept()
print("accepted tx connection from:",str(self.txaddr[0]), ":", str(self.txaddr[1]))
def setup_rxsocket(self):
if self.rxsocket is None:
self.rxsocket = socket.socket()
# allow socket to reuse address
self.rxsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
rxport = 12346
self.rxsocket.bind(('', rxport))
# https://docs.python.org/3/library/socket.html#socket.socket.listen
self.rxsocket.listen(5) # number of unaccepted connections allow (backlog)
while True:
self.rxconn, self.rxaddr = self.rxsocket.accept()
print("accepted rx connection from:",str(self.rxaddr[0]), ":", str(self.rxaddr[1]))
while True:
data = self.rxconn.recv(1024)
if data:
message = data.decode()
# carb.log_error("received:" + str(type(message)) + message)
cam_to_nose = [0, 0, 0]
face_direction = [0, 0, 0]
try:
cam_to_nose[0], cam_to_nose[1], cam_to_nose[2], face_direction[0], face_direction[1], face_direction[2], dx, dy = ast.literal_eval(message)
except ValueError:
self.cam_to_nose = None
self.face_direction = None
self.dx = None
self.dy = None
else:
# print("received:", x, y, z, dx, dy, dist)
weight = 0.1
self.cam_to_nose = cam_to_nose
self.face_direction = face_direction
self.dx = weight*dx
self.dy = weight*dy
else:
self.cam_to_nose = None
self.face_direction = None
self.dx = None
self.dy = None
def shut_down_socket(self):
if self.txconn:
try:
# self._conn.send("Done".encode())
self.txsocket.shutdown(socket.SHUT_RDWR)
self.txsocket.close()
self.txsocket = None
except socket.error as e:
pass
if self.rxconn:
try:
# self._conn.send("Done".encode())
self.rxsocket.shutdown(socket.SHUT_RDWR)
self.rxsocket.close()
self.rxsocket = None
except socket.error as e:
pass
| 3,939 |
Python
| 35.82243 | 163 | 0.513074 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm_rmpflow_controller.py
|
import omni.isaac.motion_generation as mg
from omni.isaac.core.articulations import Articulation
import pathlib
class XArmRMPFlowController(mg.MotionPolicyController):
"""[summary]
Args:
name (str): [description]
robot_articulation (Articulation): [description]
physics_dt (float, optional): [description]. Defaults to 1.0/60.0.
attach_gripper (bool, optional): [description]. Defaults to False.
"""
def __init__(
self,
name: str,
robot_articulation: Articulation,
physics_dt: float = 1.0 / 60.0,
xarm_version: int = 7
) -> None:
current_directory = str(pathlib.Path(__file__).parent.resolve()).replace("\\", "/")
if xarm_version == 5:
relative_robot_description_path = "/XArm/XArm5/xarm5_descriptor.yaml"
relative_urdf_path = "/XArm/XArm5/xarm5.urdf"
relative_rmpflow_config_path = "/XArm/XArm5/xarm5_rmpflow_common.yaml"
end_effector_frame_name = "link5"
elif xarm_version == 7:
relative_robot_description_path = "/XArm/XArm7/xarm7_descriptor.yaml"
relative_urdf_path = "/XArm/XArm7/xarm7.urdf"
relative_rmpflow_config_path = "/XArm/XArm7/xarm7_rmpflow_common.yaml"
end_effector_frame_name = "link7"
self.rmp_flow = mg.lula.motion_policies.RmpFlow(
robot_description_path=current_directory + relative_robot_description_path,
urdf_path=current_directory + relative_urdf_path,
rmpflow_config_path=current_directory + relative_rmpflow_config_path,
end_effector_frame_name=end_effector_frame_name,
maximum_substep_size=0.0334,
ignore_robot_state_updates=False,
)
self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmp_flow, physics_dt)
mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp)
self._default_position, self._default_orientation = (
self._articulation_motion_policy._robot_articulation.get_world_pose()
)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
return
def reset(self):
mg.MotionPolicyController.reset(self)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
| 2,559 |
Python
| 40.967212 | 109 | 0.638531 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm5_follow_target_with_standalone.py
|
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.examples")
from omni.isaac.core import World
from XArm.xarm_follow_target import XArmFollowTarget
from XArm.xarm_rmpflow_controller import XArmRMPFlowController
from XArm.xarm_socket import XArmSocket
import numpy as np
import time
def get_new_target_orientation(position):
direction_vector = np.array([0, 0, 0]) - position
normalized_direction = direction_vector / np.linalg.norm(direction_vector)
rotation_axis = np.cross(np.array([0, 0, -1]), normalized_direction)
rotation_axis /= np.linalg.norm(rotation_axis)
rotation_angle = np.arccos(np.dot(np.array([0, 0, -1]), normalized_direction))
half_angle = 0.5 * rotation_angle
sin_half_angle = np.sin(half_angle)
cos_half_angle = np.cos(half_angle)
quaternion = np.array([
cos_half_angle,
sin_half_angle * rotation_axis[0],
sin_half_angle * rotation_axis[1],
sin_half_angle * rotation_axis[2]
])
return quaternion
def main():
xarm_version = 5
world = World(stage_units_in_meters=1.0)
xarm_task = XArmFollowTarget(xarm_version=xarm_version)
world.add_task(xarm_task)
world.reset()
task_params = world.get_task("xarm_follow_target_task").get_params()
xarm_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
xarm = world.scene.get_object(xarm_name)
cube = world.scene.get_object(target_name)
xarm_controller = XArmRMPFlowController(
name="target_follower_controller",
robot_articulation=xarm,
xarm_version=xarm_version
)
articulation_controller = xarm.get_articulation_controller()
xarm_socket = XArmSocket()
xarm_socket.start_txsocket()
xarm_socket.start_rxsocket()
max_range = 0.6
min_range = 0.4
min_height = 0.2
last_face_seen_timeout = 1
last_face_seen_time = 0
last_rand_target_timeout = 5
last_rand_target_time = 0
start_time = time.time()
wait_time = 1
while simulation_app.is_running() and time.time() < start_time + wait_time:
world.step(render=True)
if world.is_playing():
if world.current_time_step_index == 0:
world.reset()
xarm_controller.reset()
while simulation_app.is_running():
world.step(render=True)
if world.is_playing():
observations = world.get_observations()
actions = xarm_controller.forward(
target_end_effector_position=observations[task_params["target_name"]["value"]]["position"],
target_end_effector_orientation=observations[task_params["target_name"]["value"]]["orientation"],
)
gains = 1e15*np.ones(5), 1e14*np.ones(5)
articulation_controller.set_gains(gains[0], gains[1]) # Solution from Nvidia Live Session 1:23:00
articulation_controller.apply_action(actions)
if xarm_socket.txconn:
try:
sendData = str(xarm.get_joint_positions().tolist())
res = xarm_socket.txconn.send(sendData.encode())
if res == 0:
print("channel is closed...")
except:
# if sending failed, recreate the socket and reconnect
print("sending failed... closing connection")
xarm_socket.txconn.close()
xarm_socket.txconn = None
current_time = time.time()
if xarm_socket.dx and xarm_socket.dy:
# update position of target from camera feed
cube = world.scene.get_object("target")
pos, _ = cube.get_world_pose()
newpose = [pos[0], pos[1] + xarm_socket.dx, pos[2] + xarm_socket.dy]
range = np.linalg.norm(newpose)
if range < min_range:
newpose = newpose / np.linalg.norm(newpose) * min_range
elif range > max_range:
newpose = newpose / np.linalg.norm(newpose) * max_range
newpose = [newpose[0], newpose[1], max(newpose[2], min_height)]
updated_quaternion = get_new_target_orientation(newpose)
print("pose", pos, "->", newpose, end="")
cube.set_world_pose(np.array(newpose), updated_quaternion)
print("set.")
xarm_socket.dx = None
xarm_socket.dy = None
last_face_seen_time = current_time
elif ( \
xarm_task.task_achieved or \
current_time > last_rand_target_time + last_rand_target_timeout \
) and current_time > last_face_seen_time + last_face_seen_timeout:
# set random location
cube = world.scene.get_object("target")
randpos = [
np.random.uniform(-1, 1),
np.random.uniform(-1, 1),
np.random.uniform(0, 1)
]
range = np.linalg.norm(randpos)
if range < min_range:
randpos = randpos / np.linalg.norm(randpos) * min_range
elif range > max_range:
randpos = randpos / np.linalg.norm(randpos) * max_range
randpos = [randpos[0], randpos[1], max(randpos[2], min_height)]
updated_quaternion = get_new_target_orientation(randpos)
print("Setting new target pos:"+str(randpos))
cube.set_world_pose(np.array(randpos), updated_quaternion)
last_rand_target_time = time.time()
xarm_socket.shut_down_socket()
simulation_app.close()
if __name__ == '__main__':
main()
| 6,120 |
Python
| 35.218935 | 113 | 0.577124 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm_sample.py
|
from omni.isaac.examples.base_sample import BaseSample
from .xarm_follow_target import XArmFollowTarget
from .xarm_rmpflow_controller import XArmRMPFlowController
from .xarm_socket import XArmSocket
import numpy as np
import time
import carb
import omni.kit.pipapi
omni.kit.pipapi.install("pyquaternion")
from pyquaternion import Quaternion
class XArmSample(BaseSample):
def __init__(self) -> None:
super().__init__()
self._controller = None
self._articulation_controller = None
self._xarm_version = None
# sending position data to arm
self.xarm_socket = XArmSocket()
self._max_range = None
self._min_range = None
self._min_height = None
self._last_face_seen_timeout = 1
self._last_face_seen_time = 0
self.rand_target_enabled = True
self._last_rand_target_timeout = 5
self._last_rand_target_time = 0
self.min_detection_range = None
self.max_detection_range = None
def set_xarm_version(self, xarm_version):
self._xarm_version = xarm_version
if self._xarm_version == 5:
self._max_range = 0.7
self._min_range = 0.3
self._min_height = 0.1
elif self._xarm_version == 7:
self._max_range = 0.7
self._min_range = 0.3
self._min_height = 0.1
def setup_scene(self):
world = self.get_world()
world.add_task(XArmFollowTarget(self._xarm_version))
return
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
self._controller.reset()
return
def world_cleanup(self):
self._controller = None
self.xarm_socket.shut_down_socket()
return
async def setup_post_load(self):
self._xarm_task = list(self._world.get_current_tasks().values())[0]
self._task_params = self._xarm_task.get_params()
self._xarm = self._world.scene.get_object(self._task_params["robot_name"]["value"])
self._controller = XArmRMPFlowController(
name="target_follower_controller",
robot_articulation=self._xarm,
xarm_version=self._xarm_version
)
self._articulation_controller = self._xarm.get_articulation_controller()
self.xarm_socket.start_txsocket()
self.xarm_socket.start_rxsocket()
return
async def _on_follow_target_event_async(self, val):
world = self.get_world()
if val:
await world.play_async()
world.add_physics_callback("sim_step", self._on_follow_target_simulation_step)
else:
world.remove_physics_callback("sim_step")
return
def _on_follow_target_simulation_step(self, step_size):
observations = self._world.get_observations()
actions = self._controller.forward(
target_end_effector_position=observations[self._task_params["target_name"]["value"]]["position"],
target_end_effector_orientation=observations[self._task_params["target_name"]["value"]]["orientation"],
)
self._articulation_controller.set_gains(
1e15*np.ones(self._xarm_version),
1e14*np.ones(self._xarm_version)
) # Solution from Nvidia Live Session 1:23:00
self._articulation_controller.apply_action(actions)
if self.xarm_socket.txconn:
try:
sendData = str(self._xarm.get_joint_positions().tolist())
#print("joints:", sendData)
res = self.xarm_socket.txconn.send(sendData.encode())
if res == 0:
print("channel is closed...")
except Exception as e:
print(e)
# if sending failed, recreate the socket and reconnect
print("sending failed... closing connection")
self.xarm_socket.txconn.close()
self.xarm_socket.txconn = None
face_in_range = False
if self.xarm_socket.cam_to_nose and self.xarm_socket.face_direction:
cam_position, cam_orientation = self._xarm.end_effector.get_world_pose()
nose_distance_from_camera = np.linalg.norm(self.xarm_socket.cam_to_nose)
carb.log_error(str(self.min_detection_range) + " " + str(self.max_detection_range) + " " + str(nose_distance_from_camera))
face_in_range = nose_distance_from_camera >= self.min_detection_range and nose_distance_from_camera <= self.max_detection_range
current_time = time.time()
if face_in_range:
carb.log_error("here")
cam_orientation = Quaternion(cam_orientation)
nose_position = cam_orientation.rotate(self.xarm_socket.cam_to_nose) + cam_position
nose_direction = cam_orientation.rotate(self.xarm_socket.face_direction)
nose_direction /= np.linalg.norm(nose_direction)
# update position of target from camera feed
cube = self._world.scene.get_object("target")
if nose_distance_from_camera < self.min_detection_range:
newpose = nose_position + nose_direction * self.min_detection_range
elif nose_distance_from_camera > self.max_detection_range:
newpose = nose_position + nose_direction * self.max_detection_range
else:
newpose = nose_position + nose_direction * nose_distance_from_camera
range = np.linalg.norm(newpose)
if range < self._min_range:
newpose = newpose / np.linalg.norm(newpose) * self._min_range
elif range > self._max_range:
newpose = newpose / np.linalg.norm(newpose) * self._max_range
newpose = [newpose[0], newpose[1], max(newpose[2], self._min_height)]
updated_quaternion = self._get_new_target_orientation(newpose)
cube.set_world_pose(np.array(newpose), updated_quaternion)
self.xarm_socket.dx = None
self.xarm_socket.dy = None
self._last_face_seen_time = current_time
elif self.rand_target_enabled and ( \
self._xarm_task.task_achieved or \
current_time > self._last_rand_target_time + self._last_rand_target_timeout \
) and current_time > self._last_face_seen_time + self._last_face_seen_timeout:
# set random location
cube = self._world.scene.get_object("target")
randpos = [
np.random.uniform(-1, 1),
np.random.uniform(-1, 1),
np.random.uniform(0, 1)
]
range = np.linalg.norm(randpos)
if range < self._min_range:
randpos = randpos / np.linalg.norm(randpos) * self._min_range
elif range > self._max_range:
randpos = randpos / np.linalg.norm(randpos) * self._max_range
randpos = [randpos[0], randpos[1], max(randpos[2], self._min_height)]
updated_quaternion = self._get_new_target_orientation(randpos)
print("Setting new target pos:"+str(randpos))
cube.set_world_pose(np.array(randpos), updated_quaternion)
self._last_rand_target_time = time.time()
self.xarm_socket.cam_to_nose=None
self.xarm_socket.face_direction=None
return
def _on_add_obstacle_event(self):
world = self.get_world()
current_task = list(world.get_current_tasks().values())[0]
cube = current_task.add_obstacle()
self._controller.add_obstacle(cube)
return
def _on_remove_obstacle_event(self):
world = self.get_world()
current_task = list(world.get_current_tasks().values())[0]
obstacle_to_delete = current_task.get_obstacle_to_delete()
self._controller.remove_obstacle(obstacle_to_delete)
current_task.remove_obstacle()
return
def _on_logging_event(self, val):
world = self.get_world()
data_logger = world.get_data_logger()
if not world.get_data_logger().is_started():
robot_name = self._task_params["robot_name"]["value"]
target_name = self._task_params["target_name"]["value"]
def frame_logging_func(tasks, scene):
return {
"joint_positions": scene.get_object(robot_name).get_joint_positions().tolist(),
"applied_joint_positions": scene.get_object(robot_name)
.get_applied_action()
.joint_positions.tolist(),
"target_position": scene.get_object(target_name).get_world_pose()[0].tolist(),
}
data_logger.add_data_frame_logging_func(frame_logging_func)
if val:
data_logger.start()
else:
data_logger.pause()
return
def _on_save_data_event(self, log_path):
world = self.get_world()
data_logger = world.get_data_logger()
data_logger.save(log_path=log_path)
data_logger.reset()
return
def _get_new_target_orientation(self, position):
direction_vector = np.array([0, 0, 0.3]) - position
normalized_direction = direction_vector / np.linalg.norm(direction_vector)
rotation_axis = np.cross(np.array([0, 0, -1]), normalized_direction)
rotation_axis /= np.linalg.norm(rotation_axis)
rotation_angle = np.arccos(np.dot(np.array([0, 0, -1]), normalized_direction))
half_angle = 0.5 * rotation_angle
sin_half_angle = np.sin(half_angle)
cos_half_angle = np.cos(half_angle)
quaternion = np.array([
cos_half_angle,
sin_half_angle * rotation_axis[0],
sin_half_angle * rotation_axis[1],
sin_half_angle * rotation_axis[2]
])
return quaternion
| 10,092 |
Python
| 38.89328 | 139 | 0.586504 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm7_follow_target_with_standalone.py
|
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.examples")
from omni.isaac.core import World
from XArm.xarm_follow_target import XArmFollowTarget
from XArm.xarm_rmpflow_controller import XArmRMPFlowController
from XArm.xarm_socket import XArmSocket
import numpy as np
import time
def get_new_target_orientation(position):
direction_vector = np.array([0, 0, 0]) - position
normalized_direction = direction_vector / np.linalg.norm(direction_vector)
rotation_axis = np.cross(np.array([0, 0, -1]), normalized_direction)
rotation_axis /= np.linalg.norm(rotation_axis)
rotation_angle = np.arccos(np.dot(np.array([0, 0, -1]), normalized_direction))
half_angle = 0.5 * rotation_angle
sin_half_angle = np.sin(half_angle)
cos_half_angle = np.cos(half_angle)
quaternion = np.array([
cos_half_angle,
sin_half_angle * rotation_axis[0],
sin_half_angle * rotation_axis[1],
sin_half_angle * rotation_axis[2]
])
return quaternion
def main():
xarm_version = 7
world = World(stage_units_in_meters=1.0)
xarm_task = XArmFollowTarget(xarm_version=xarm_version)
world.add_task(xarm_task)
world.reset()
task_params = world.get_task("xarm_follow_target_task").get_params()
xarm_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
xarm = world.scene.get_object(xarm_name)
cube = world.scene.get_object(target_name)
xarm_controller = XArmRMPFlowController(
name="target_follower_controller",
robot_articulation=xarm,
xarm_version=xarm_version
)
articulation_controller = xarm.get_articulation_controller()
xarm_socket = XArmSocket()
xarm_socket.start_txsocket()
xarm_socket.start_rxsocket()
max_range = 0.7
min_range = 0.3
min_height = 0.1
last_face_seen_timeout = 1
last_face_seen_time = 0
last_rand_target_timeout = 5
last_rand_target_time = 0
start_time = time.time()
wait_time = 1
while simulation_app.is_running() and time.time() < start_time + wait_time:
world.step(render=True)
if world.is_playing():
if world.current_time_step_index == 0:
world.reset()
xarm_controller.reset()
while simulation_app.is_running():
world.step(render=True)
if world.is_playing():
observations = world.get_observations()
actions = xarm_controller.forward(
target_end_effector_position=observations[task_params["target_name"]["value"]]["position"],
target_end_effector_orientation=observations[task_params["target_name"]["value"]]["orientation"],
)
gains = 1e15*np.ones(xarm_version), 1e14*np.ones(xarm_version)
articulation_controller.set_gains(gains[0], gains[1]) # Solution from Nvidia Live Session 1:23:00
articulation_controller.apply_action(actions)
if xarm_socket.txconn:
try:
sendData = str(xarm.get_joint_positions().tolist())
res = xarm_socket.txconn.send(sendData.encode())
if res == 0:
print("channel is closed...")
except:
# if sending failed, recreate the socket and reconnect
print("sending failed... closing connection")
xarm_socket.txconn.close()
xarm_socket.txconn = None
current_time = time.time()
if xarm_socket.dx and xarm_socket.dy:
# update position of target from camera feed
cube = world.scene.get_object("target")
pos, _ = cube.get_world_pose()
newpose = [pos[0], pos[1] + xarm_socket.dx, pos[2] + xarm_socket.dy]
range = np.linalg.norm(newpose)
if range < min_range:
newpose = newpose / np.linalg.norm(newpose) * min_range
elif range > max_range:
newpose = newpose / np.linalg.norm(newpose) * max_range
newpose = [newpose[0], newpose[1], max(newpose[2], min_height)]
updated_quaternion = get_new_target_orientation(newpose)
print("pose", pos, "->", newpose, end="")
cube.set_world_pose(np.array(newpose), updated_quaternion)
print("set.")
xarm_socket.dx = None
xarm_socket.dy = None
last_face_seen_time = current_time
elif ( \
xarm_task.task_achieved or \
current_time > last_rand_target_time + last_rand_target_timeout \
) and current_time > last_face_seen_time + last_face_seen_timeout:
# set random location
cube = world.scene.get_object("target")
randpos = [
np.random.uniform(-1, 1),
np.random.uniform(-1, 1),
np.random.uniform(0, 1)
]
range = np.linalg.norm(randpos)
if range < min_range:
randpos = randpos / np.linalg.norm(randpos) * min_range
elif range > max_range:
randpos = randpos / np.linalg.norm(randpos) * max_range
randpos = [randpos[0], randpos[1], max(randpos[2], min_height)]
updated_quaternion = get_new_target_orientation(randpos)
print("Setting new target pos:"+str(randpos))
cube.set_world_pose(np.array(randpos), updated_quaternion)
last_rand_target_time = time.time()
xarm_socket.shut_down_socket()
simulation_app.close()
if __name__ == '__main__':
main()
| 6,142 |
Python
| 35.349112 | 113 | 0.578313 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm_follow_target.py
|
from .xarm import XArm
import numpy as np
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.stage import get_stage_units
import carb
class XArmFollowTarget(tasks.FollowTarget):
"""[summary]
Args:
name (str, optional): [description]. Defaults to "ur10_follow_target".
target_prim_path (Optional[str], optional): [description]. Defaults to None.
target_name (Optional[str], optional): [description]. Defaults to None.
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
target_orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(self, xarm_version: int = 7):
super().__init__(name="xarm_follow_target_task", target_position=np.array([0.3, 0.0, 0.5]) / get_stage_units(), offset=None)
self._goal_position = np.array([0, 0, 0])
self.task_achieved = False
self.xarm_version = xarm_version
return
def set_up_scene(self, scene):
super().set_up_scene(scene)
scene.add_default_ground_plane()
self._cube = scene.get_object("target")
# cpose, crot = self._cube.get_world_pose()
# self._cube.set_world_pose(cpose, np.Array([180.0, 90, -180]))
return
def pre_step(self, control_index, simulation_time):
self._goal_position, orient = self._cube.get_world_pose()
end_effector_position, _ = self._xarm.end_effector.get_world_pose()
# print("orientation"+str(orient))
dist = np.mean(np.abs(self._goal_position - end_effector_position))
if not self.task_achieved is bool(dist < 0.02):
self.task_achieved = bool(dist < 0.02)
if self.task_achieved:
print("Target Reached")
self._cube.get_applied_visual_material().set_color(color=np.array([0, 1.0, 0]))
else:
self._cube.get_applied_visual_material().set_color(color=np.array([1.0, 0, 0]))
return
def set_robot(self) -> XArm:
"""[summary]
Returns:
XArm: [description]
"""
if self.xarm_version == 5:
prim_path = "/World/XArm5"
name = "xarm5"
positions=np.array([-np.pi / 2, -np.pi / 2, -np.pi / 2, np.pi / 2, 0])
elif self.xarm_version == 7:
prim_path = "/World/XArm7"
name = "xarm7"
positions=np.array([-np.pi / 2, -np.pi / 2, -np.pi / 2, -np.pi / 2, -np.pi / 2, np.pi / 2, 0])
self._xarm = XArm(prim_path=prim_path, name=name, version=self.xarm_version)
self._xarm.set_joints_default_state(
positions=positions
)
return self._xarm
| 2,758 |
Python
| 38.414285 | 132 | 0.577592 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm.py
|
from typing import Optional
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.stage import add_reference_to_stage
import pathlib
class XArm(Robot):
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "ur10_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "xarm_robot",
version: Optional[int] = 7,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None
) -> None:
self._end_effector = None
current_directory = str(pathlib.Path(__file__).parent.resolve()).replace("\\", "/")
if version == 5:
relative_usd_path = "/XArm/XArm5/xarm5.usd"
end_link = "/link5"
elif version == 7:
relative_usd_path = "/XArm/XArm7/xarm7.usd"
end_link = "/link7"
add_reference_to_stage(usd_path=current_directory + relative_usd_path, prim_path=prim_path)
self._end_effector_prim_path = prim_path + end_link
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
return
@property
def end_effector(self) -> RigidPrim:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._end_effector
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
super().initialize(physics_sim_view)
self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector")
self.disable_gravity()
self._end_effector.initialize(physics_sim_view)
return
def post_reset(self) -> None:
"""[summary]
"""
Robot.post_reset(self)
self._end_effector.post_reset()
return
| 2,275 |
Python
| 30.178082 | 116 | 0.593846 |
MatthewDZane/XArmFollowTarget/scripts/XArm/README.md
|
# Using XArm Follow Target as an Isaac Example
1. cd into the Isaac Sim User examples directory.
```
/home/user/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.examples/omni/isaac/examples/user_examples/
```
2. Clone the repo as XArmFollowTarget.
```
git clone https://gitlab.nrp-nautilus.io/isaac/xarm.git XArmFollowTarget
```
3. Add the following lines to the .../user_examples/__init__.py file (not in the repo directory):
```
from omni.isaac.examples.user_examples.XArmFollowTarget.scripts.XArm.xarm_sample import XArmSample
from omni.isaac.examples.user_examples.XArmFollowTarget.scripts.XArm.xarm_extension import XArmExtension
```
4. After this runs you should see the XArm extension in the Example drop down in the Isaac-sim gui
5. To recieve the position output run the **client.py** script found in the folder, the **server.py** runs by default once the example is loaded.
| 896 |
Markdown
| 41.714284 | 146 | 0.767857 |
MatthewDZane/XArmFollowTarget/scripts/XArm/xarm_extension.py
|
import asyncio
import os
import omni
import omni.ui as ui
from omni.usd import StageEventType
from omni.isaac.core import World
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.ui.ui_utils import btn_builder, get_style, state_btn_builder, setup_ui_headers, cb_builder, float_builder
from .xarm_sample import XArmSample
class XArmExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
self._buttons = {}
self._task_ui_elements = {}
super().on_startup(ext_id)
super().start_extension(
menu_name="",
submenu_name="",
name="XArm",
title="XArm",
doc_link="",
overview="",
sample=XArmSample(),
file_path=os.path.abspath(__file__),
number_of_extra_frames=0,
)
return
def shutdown_cleanup(self):
if self._sample._world is not None:
self._sample._world_cleanup()
if self._buttons is not None:
self._enable_all_buttons(False)
self._buttons["Load XArm5"].enabled = True
self._buttons["Load XArm7"].enabled = True
return
def _build_ui(
self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width, keep_window_open
):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=keep_window_open, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="World Controls",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
dict = {
"label": "Load XArm5",
"type": "button",
"text": "Load",
"tooltip": "Load XArm5 and Task",
"on_clicked_fn": self._on_load_xarm5,
}
self._buttons["Load XArm5"] = btn_builder(**dict)
self._buttons["Load XArm5"].enabled = True
dict = {
"label": "Load XArm7",
"type": "button",
"text": "Load",
"tooltip": "Load XArm7 and Task",
"on_clicked_fn": self._on_load_xarm7,
}
self._buttons["Load XArm7"] = btn_builder(**dict)
self._buttons["Load XArm7"].enabled = True
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._buttons["Reset"] = btn_builder(**dict)
self._buttons["Reset"].enabled = False
task_controls_frame = ui.CollapsableFrame(
title="Task Controls",
collapsed=False
)
with task_controls_frame:
with ui.VStack(spacing=5):
task_controls_frame.visible = True
dict = {
"label": "Follow Target",
"type": "button",
"a_text": "START",
"b_text": "STOP",
"tooltip": "Follow Target",
"on_clicked_fn": self._on_follow_target_button_event,
}
self._task_ui_elements["Follow Target"] = state_btn_builder(**dict)
self._task_ui_elements["Follow Target"].enabled = False
dict = {
"label": "Add Obstacle",
"type": "button",
"text": "ADD",
"tooltip": "Add Obstacle",
"on_clicked_fn": self._on_add_obstacle_button_event,
}
self._task_ui_elements["Add Obstacle"] = btn_builder(**dict)
self._task_ui_elements["Add Obstacle"].enabled = False
dict = {
"label": "Remove Obstacle",
"type": "button",
"text": "REMOVE",
"tooltip": "Remove Obstacle",
"on_clicked_fn": self._on_remove_obstacle_button_event,
}
self._task_ui_elements["Remove Obstacle"] = btn_builder(**dict)
self._task_ui_elements["Remove Obstacle"].enabled = False
dict = {
"label": "Random Target Enabled",
"type": "checkbox",
"default_val": False,
"tooltip": "Random Target Enabled",
"on_clicked_fn": self._on_random_target_enabled_event
}
self._task_ui_elements["Random Target Checkbox"] = cb_builder(**dict)
args = {
"label": "Min Face Range",
"default_val": .3,
"tooltip": "Min Range in Meters",
}
self._task_ui_elements["Min Face Range"] = float_builder(**args)
self._task_ui_elements["Min Face Range"].add_value_changed_fn(self._on_min_face_range_changed_event)
args = {
"label": "Max Face Range",
"default_val": 10,
"tooltip": "Max Range in Meters",
}
self._task_ui_elements["Max Face Range"] = float_builder(**args)
self._task_ui_elements["Max Face Range"].add_value_changed_fn(self._on_max_face_range_changed_event)
def _on_load_xarm5(self):
self._sample.set_xarm_version(5)
self._on_random_target_enabled_event(False)
self._on_min_face_range_changed_event(self._task_ui_elements["Min Face Range"].get_value_as_float())
self._on_max_face_range_changed_event(self._task_ui_elements["Max Face Range"].get_value_as_float())
async def _on_load_world_async():
await self._sample.load_world_async()
await omni.kit.app.get_app().next_update_async()
self._sample._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self._buttons["Load XArm5"].enabled = False
self._buttons["Load XArm7"].enabled = False
self.post_load_button_event()
self._sample._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _on_load_xarm7(self):
self._sample.set_xarm_version(7)
self._on_random_target_enabled_event(False)
self._on_min_face_range_changed_event(self._task_ui_elements["Min Face Range"].get_value_as_float())
self._on_max_face_range_changed_event(self._task_ui_elements["Max Face Range"].get_value_as_float())
async def _on_load_world_async():
await self._sample.load_world_async()
await omni.kit.app.get_app().next_update_async()
self._sample._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self._buttons["Load XArm5"].enabled = False
self._buttons["Load XArm7"].enabled = False
self.post_load_button_event()
self._sample._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _on_reset(self):
async def _on_reset_async():
await self._sample.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_follow_target_button_event(self, val):
asyncio.ensure_future(self._sample._on_follow_target_event_async(val))
return
def _on_add_obstacle_button_event(self):
self._sample._on_add_obstacle_event()
self._task_ui_elements["Remove Obstacle"].enabled = True
return
def _on_remove_obstacle_button_event(self):
self._sample._on_remove_obstacle_event()
world = self._sample.get_world()
current_task = list(world.get_current_tasks().values())[0]
if not current_task.obstacles_exist():
self._task_ui_elements["Remove Obstacle"].enabled = False
return
def _on_random_target_enabled_event(self, val):
self._sample.rand_target_enabled = self._task_ui_elements["Random Target Checkbox"].get_value_as_bool()
def _on_min_face_range_changed_event(self, val):
self._sample.min_detection_range = self._task_ui_elements["Min Face Range"].get_value_as_float()
def _on_max_face_range_changed_event(self, val):
self._sample.min_detection_range = self._task_ui_elements["Max Face Range"].get_value_as_float()
def _enable_all_buttons(self, flag):
for btn_name, btn in self._buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def _on_follow_target_button_event(self, val):
asyncio.ensure_future(self._sample._on_follow_target_event_async(val))
return
def _on_save_data_button_event(self):
self._sample._on_save_data_event(self._task_ui_elements["Output Directory"].get_value_as_string())
return
def post_reset_button_event(self):
self._task_ui_elements["Follow Target"].enabled = True
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = True
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
return
def post_load_button_event(self):
self._task_ui_elements["Follow Target"].enabled = True
self._task_ui_elements["Add Obstacle"].enabled = True
return
def post_clear_button_event(self):
self._task_ui_elements["Follow Target"].enabled = False
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = False
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
return
def on_stage_event(self, event):
if event.type == int(StageEventType.OPENED):
# If the user opens a new stage, the extension should completely reset
self._task_ui_elements["Follow Target"].enabled = False
self._task_ui_elements["Remove Obstacle"].enabled = False
self._task_ui_elements["Add Obstacle"].enabled = False
if self._task_ui_elements["Follow Target"].text == "STOP":
self._task_ui_elements["Follow Target"].text = "START"
elif event.type == int(omni.usd.StageEventType.CLOSED):
if World.instance() is not None:
self._sample._world_cleanup()
self._sample._world.clear_instance()
if hasattr(self, "_buttons"):
if self._buttons is not None:
self._enable_all_buttons(False)
self._buttons["Load XArm5"].enabled = True
self._buttons["Load XArm7"].enabled = True
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
#self._buttons["Load XArm5"].enabled = False
#self._buttons["Load XArm7"].enabled = False
self._buttons["Reset"].enabled = True
self.post_clear_button_event()
return
| 13,233 |
Python
| 44.013605 | 124 | 0.516058 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/Test_ContainerServer.py
|
# server.py
import socket
HOST = "localhost"
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
data = conn.recv(1024)
if not data:
break
print("Received:", data)
conn.sendall(data)
| 332 |
Python
| 17.499999 | 53 | 0.596386 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/Cont_clientTest.py
|
# server.py
import socket
HOST = "localhost"
PORT = 8211
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
data = conn.recv(1024)
if not data:
break
print("Received:", data)
conn.sendall(data)
# client.py
import socket
HOST = "localhost"
PORT = 50000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall("Hello, world!".encode())
data = s.recv(1024)
print("Received:", data.decode())
s.close()
| 573 |
Python
| 15.882352 | 53 | 0.631763 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/local-xarm-client.py
|
import socket
import math
import time
mysocket = socket.socket()
# mysocket.connect(('127.0.0.1',12345))
mysocket.connect(('192.168.4.206',12345))
"""
# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK
# git clone [email protected]:xArm-Developer/xArm-Python-SDK.git
# cd xArm-Python-SDK
# python setup.py install
"""
try:
from xarm.tools import utils
except:
pass
from xarm import version
from xarm.wrapper import XArmAPI
arm = XArmAPI('192.168.4.15')
arm.motion_enable(enable=True)
arm.set_mode(0)
arm.set_state(state=0)
# arm.reset(wait=True)
arm.set_mode(1)
arm.set_state(0)
time.sleep(0.1)
variables = {}
params = {'speed': 100, 'acc': 2000, 'angle_speed': 20, 'angle_acc': 500, 'events': {}, 'variables': variables, 'callback_in_thread': True, 'quit': False}
# Register error/warn changed callback
def error_warn_change_callback(data):#
if data and data['error_code'] != 0:
params['quit'] = True
pprint('err={}, quit'.format(data['error_code']))
arm.release_error_warn_changed_callback(error_warn_change_callback)
arm.register_error_warn_changed_callback(error_warn_change_callback)
# Register state changed callback
def state_changed_callback(data):
if data and data['state'] == 4:
if arm.version_number[0] >= 1 and arm.version_number[1] >= 1 and arm.version_number[2] > 0:
params['quit'] = True
pprint('state=4, quit')
arm.release_state_changed_callback(state_changed_callback)
arm.register_state_changed_callback(state_changed_callback)
# Register counter value changed callback
if hasattr(arm, 'register_count_changed_callback'):
def count_changed_callback(data):
if not params['quit']:
pprint('counter val: {}'.format(data['count']))
arm.register_count_changed_callback(count_changed_callback)
# Register connect changed callback
def connect_changed_callback(data):
if data and not data['connected']:
params['quit'] = True
pprint('disconnect, connected={}, reported={}, quit'.format(data['connected'], data['reported']))
arm.release_connect_changed_callback(error_warn_change_callback)
arm.register_connect_changed_callback(connect_changed_callback)
def close_socket(thissocket):
try:
thissocket.shutdown(socket.SHUT_RDWR)
thissocket.close()
thissocket = None
except socket.error as e:
pass
print("socket is closed")
try:
while True:
data = mysocket.recv(1024)
message = data.decode()
if message == "Done":
break
# print(message)
try:
joints = eval(message)
except:
continue
# print(joints)
joints_deg = [math.degrees(joint) for joint in joints]
if arm.connected and arm.state != 4:
arm.set_servo_angle_j(joints, is_radian=True)
# print("moved to", joints_deg)
except KeyboardInterrupt:
print("closing socket...")
close_socket(mysocket)
print("Isaac Sim Connection Stopped")
if hasattr(arm, 'release_count_changed_callback'):
arm.release_count_changed_callback(count_changed_callback)
arm.release_error_warn_changed_callback(state_changed_callback)
arm.release_state_changed_callback(state_changed_callback)
arm.release_connect_changed_callback(error_warn_change_callback)
| 3,384 |
Python
| 28.692982 | 154 | 0.668735 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/go-home.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2022, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
"""
# Notice
# 1. Changes to this file on Studio will not be preserved
# 2. The next conversion will overwrite the file with the same name
"""
import sys
import math
import time
import datetime
import random
import traceback
import threading
"""
# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK
# git clone [email protected]:xArm-Developer/xArm-Python-SDK.git
# cd xArm-Python-SDK
# python setup.py install
"""
try:
from xarm.tools import utils
except:
pass
from xarm import version
from xarm.wrapper import XArmAPI
def pprint(*args, **kwargs):
try:
stack_tuple = traceback.extract_stack(limit=2)[0]
print('[{}][{}] {}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), stack_tuple[1], ' '.join(map(str, args))))
except:
print(*args, **kwargs)
pprint('xArm-Python-SDK Version:{}'.format(version.__version__))
arm = XArmAPI('192.168.4.15')
arm.clean_warn()
arm.clean_error()
arm.motion_enable(True)
arm.set_mode(0)
arm.set_state(0)
time.sleep(1)
variables = {}
params = {'speed': 100, 'acc': 2000, 'angle_speed': 20, 'angle_acc': 500, 'events': {}, 'variables': variables, 'callback_in_thread': True, 'quit': False}
# Register error/warn changed callback
def error_warn_change_callback(data):
if data and data['error_code'] != 0:
params['quit'] = True
pprint('err={}, quit'.format(data['error_code']))
arm.release_error_warn_changed_callback(error_warn_change_callback)
arm.register_error_warn_changed_callback(error_warn_change_callback)
# Register state changed callback
def state_changed_callback(data):
if data and data['state'] == 4:
if arm.version_number[0] >= 1 and arm.version_number[1] >= 1 and arm.version_number[2] > 0:
params['quit'] = True
pprint('state=4, quit')
arm.release_state_changed_callback(state_changed_callback)
arm.register_state_changed_callback(state_changed_callback)
# Register counter value changed callback
if hasattr(arm, 'register_count_changed_callback'):
def count_changed_callback(data):
if not params['quit']:
pprint('counter val: {}'.format(data['count']))
arm.register_count_changed_callback(count_changed_callback)
# Register connect changed callback
def connect_changed_callback(data):
if data and not data['connected']:
params['quit'] = True
pprint('disconnect, connected={}, reported={}, quit'.format(data['connected'], data['reported']))
arm.release_connect_changed_callback(error_warn_change_callback)
arm.register_connect_changed_callback(connect_changed_callback)
# Rotation
if not params['quit']:
params['angle_acc'] = 1145
if not params['quit']:
params['angle_speed'] = 80
# if params['quit']:
if arm.error_code == 0 and not params['quit']:
code = arm.set_servo_angle(angle=[0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
if code != 0:
params['quit'] = True
pprint('set_servo_angle, code={}'.format(code))
# release all event
if hasattr(arm, 'release_count_changed_callback'):
arm.release_count_changed_callback(count_changed_callback)
arm.release_error_warn_changed_callback(state_changed_callback)
arm.release_state_changed_callback(state_changed_callback)
arm.release_connect_changed_callback(error_warn_change_callback)
| 3,630 |
Python
| 32.009091 | 155 | 0.680716 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/realsense_facePoseClient.py
|
# ====== Sample Code for Smart Design Technology Blog ======
# Intel Realsense D435 cam has RGB camera with 1920×1080 resolution
# Depth camera is 1280x720
# FOV is limited to 69deg x 42deg (H x V) - the RGB camera FOV
# If you run this on a non-Intel CPU, explore other options for rs.align
# On the NVIDIA Jetson AGX we build the pyrealsense lib with CUDA
import pyrealsense2 as rs
import mediapipe as mp
import cv2
import numpy as np
import datetime as dt
import socket
import sys
from itertools import combinations
font = cv2.FONT_HERSHEY_SIMPLEX
org = (20, 100)
fontScale = .5
color = (0,50,255)
thickness = 1
# ====== Realsense ======
realsense_ctx = rs.context()
connected_devices = [] # List of serial numbers for present cameras
for i in range(len(realsense_ctx.devices)):
detected_camera = realsense_ctx.devices[i].get_info(rs.camera_info.serial_number)
print(f"{detected_camera}")
connected_devices.append(detected_camera)
device = connected_devices[0] # In this example we are only using one camera
pipeline = rs.pipeline()
config = rs.config()
background_removed_color = 153 # Grey
# ====== Mediapipe ======
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
# ====== Socket ======
bSocket = True
if (len(sys.argv) > 1):
print(sys.argv)
if sys.argv[1] == "--no-socket":
bSocket = False
if bSocket:
# open socket to omniverse machine
mysocket = socket.socket()
# mysocket.connect(('192.168.1.62',12346))
mysocket.connect(('192.168.4.206',12346))
# mysocket.connect(('127.0.0.1',12346))
def close_socket(thissocket):
try:
thissocket.shutdown(socket.SHUT_RDWR)
thissocket.close()
thissocket = None
except socket.error as e:
pass
print("socket is closed")
def get_pixel_depth(depth_image_flipped, pixel_coodinate, img_w, img_h):
#calculate distance to end of the nose
x_pixel = pixel_coodinate[0]
y_pixel = pixel_coodinate[1]
if x_pixel >= img_w:
x_pixel = img_w - 1
if y_pixel >= img_h:
y_pixel = img_h - 1
return depth_image_flipped[y_pixel, x_pixel] * depth_scale
def get_camera_to_pixel_vector(pixel_coodinate, inv_cam_matrix, distance_from_camera):
homogeneous_nose_coordinates = np.array([
[pixel_coodinate[0]],
[pixel_coodinate[1]],
[1]
])
homogeneous_nose_coordinates = homogeneous_nose_coordinates / np.linalg.norm(homogeneous_nose_coordinates)
cam_to_pixel = inv_cam_matrix @ homogeneous_nose_coordinates
# Normalize the input vectors
cam_to_pixel = cam_to_pixel / np.linalg.norm(cam_to_pixel)
#print(vector_to_nose.flatten())
return cam_to_pixel * distance_from_camera
def estimate_plane_normal(points):
planes = list(combinations(points, 3))
#print(points)
normals = []
for points in planes:
a = points[1] - points[0]
b = points[2] - points[0]
#print (a,b)
normal = np.cross(a, b)
if normal[2] > 0:
normal = -1 * normal
normals.append(normal)
plane_normal = np.mean(normals, axis=0)
plane_normal = plane_normal / np.linalg.norm(plane_normal)
return plane_normal
# ====== Enable Streams ======
config.enable_device(device)
# # For worse FPS, but better resolution:
# stream_res_x = 1280
# stream_res_y = 720
# # For better FPS. but worse resolution:
stream_res_x = 640
stream_res_y = 480
stream_fps = 30
config.enable_stream(rs.stream.depth, stream_res_x, stream_res_y, rs.format.z16, stream_fps)
config.enable_stream(rs.stream.color, stream_res_x, stream_res_y, rs.format.bgr8, stream_fps)
profile = pipeline.start(config)
align_to = rs.stream.color
align = rs.align(align_to)
# ====== Get depth Scale ======
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print(f"\tDepth Scale for Camera SN {device} is: {depth_scale}")
# ====== Set clipping distance ======
clipping_distance_in_meters = 2
clipping_distance = clipping_distance_in_meters / depth_scale
print(f"\tConfiguration Successful for SN {device}")
# ====== Get and process images ======
print(f"Starting to capture images on SN: {device}")
while True:
start_time = dt.datetime.today().timestamp()
# Get and align frames
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
continue
# Process images
depth_image = np.asanyarray(aligned_depth_frame.get_data())
depth_image_flipped = cv2.flip(depth_image,1)
color_image = np.asanyarray(color_frame.get_data())
depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #Depth image is 1 channel, while color image is 3
background_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), background_removed_color, color_image)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = cv2.flip(background_removed,1)
# images = background_removed
color_image = cv2.flip(color_image,1)
color_images_rgb = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
#added from face-pose client
img_h, img_w, img_c = color_images_rgb.shape
# calculate intrinsic values
focal_length = 1 * img_w
intrinsics = aligned_frames.get_profile().as_video_stream_profile().get_intrinsics()
cam_matrix = np.array( [
[intrinsics.fx, 0, img_w/2],
[0, intrinsics.fy, img_h/2],
[0, 0, 1]
])
inv_cam_matrix = np.linalg.inv(cam_matrix)
# the distortion paramaters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
face_3d = []
face_2d = []
# Process face
results = face_mesh.process(color_images_rgb)
if results.multi_face_landmarks:
i=0
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx in [1, 33, 61, 199, 263, 291]:
x, y = int(lm.x * img_w), int(lm.y * img_h)
cv2.circle(color_images_rgb, (x, y), 10, (0, 0, 255), -1)
face_2d.append([x, y])
landmark_distance = get_pixel_depth(
depth_image_flipped,
[x, y],
img_w,
img_h
)
cam_to_landmark = get_camera_to_pixel_vector(
[x, y],
inv_cam_matrix,
landmark_distance
)
face_3d.append(cam_to_landmark.flatten())
if idx == 1:
nose_pixel = (x, y)
cam_to_nose = cam_to_landmark
nose_distance = landmark_distance
face_2d = np.array(face_2d, dtype=np.float64)
face_3d = np.array(face_3d, dtype=np.float64)
nose_landmark = face_landmarks.landmark[1]
# solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# get rotatinal matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# get the y rotation degree
x_degrees = angles[0] * 360
y_degrees = angles[1] * 360
z_degrees = angles[2] * 360
# see where the user's head is tilting
if y < -10:
text = "Looking Left"
elif y > 10:
text = "Looking Right"
elif x < -10:
text = "Looking Down"
elif x > 10:
text = "Looking Up"
else:
text = "Looking Forward"
# display the nose direction
p2 = (int(nose_pixel[0] + y_degrees * 10), int(nose_pixel[1] - x_degrees * 10))
cv2.line(color_images_rgb, (int(nose_pixel[0]), int(nose_pixel[1])), p2, (255, 0, 0), 3)
# cv2.line(images, (int(nose_pixel[0]), int(nose_pixel[1])), p2, (255, 0, 0), 3)
face_direction = estimate_plane_normal(face_3d)
print(nose_distance, end="\t")
##vector of Nose in camera local space
print(cam_to_nose.flatten(), end="\t")
##Normal of the face in camera local space
print(face_direction.flatten(), end="\t")
print(nose_pixel)
# add the text on the image
cv2.putText(color_images_rgb, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(color_images_rgb, "x: "+str(np.round(x_degrees, 2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(color_images_rgb, "y: "+str(np.round(y_degrees, 2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(color_images_rgb, "z: "+str(np.round(z_degrees, 2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(color_images_rgb, "Distance: "+ str(np.round(nose_distance, 2)), (500, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
xdiff = 0.5-nose_landmark.x
ydiff = 0.5-nose_landmark.y
cv2.putText(color_images_rgb, "xdiff: "+str(np.round(xdiff, 2)), (500, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
images = cv2.putText(color_images_rgb, "ydiff: "+str(np.round(ydiff, 2)), (500, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
# mp_drawing.draw_landmarks(
# image=color_images_rgb,
# landmark_list=face_landmarks,
# connections=mp_face_mesh.FACEMESH_CONTOURS,
# landmark_drawing_spec = drawing_spec,
# connection_drawing_spec=drawing_spec)
if bSocket:
cam_to_nose = cam_to_nose.flatten()
try:
#print("sending:", [cam_to_nose[0], cam_to_nose[1], cam_to_nose[2], face_direction[0], face_direction[1], face_direction[2], xdiff, ydiff])
sendData = str([
cam_to_nose[0], cam_to_nose[1], cam_to_nose[2],
face_direction[0], face_direction[1], face_direction[2],
xdiff, ydiff]
)
mysocket.send(sendData.encode())
except:
pass
# Display FPS
time_diff = dt.datetime.today().timestamp() - start_time
fps = int(1 / time_diff)
org3 = (20, org[1] + 60)
images = cv2.putText(images, f"FPS: {fps}", org3, font, fontScale, color, thickness, cv2.LINE_AA)
name_of_window = 'SN: ' + str(device)
# Display images
cv2.namedWindow(name_of_window, cv2.WINDOW_AUTOSIZE)
output_bgr = cv2.cvtColor(color_images_rgb, cv2.COLOR_RGB2BGR)
cv2.imshow(name_of_window, output_bgr)
# cv2.imshow(name_of_window, images)
# cv2.imshow(name_of_window, color_image)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
print(f"User pressed break key for SN: {device}")
break
print(f"Application Closing")
pipeline.stop()
print(f"Application Closed.")
| 11,623 |
Python
| 33.906907 | 155 | 0.593564 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/look-forward.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2022, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
"""
# Notice
# 1. Changes to this file on Studio will not be preserved
# 2. The next conversion will overwrite the file with the same name
"""
import sys
import math
import time
import datetime
import random
import traceback
import threading
# IKFast for xarm 7
import pyikfast
import numpy as np
"""
# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK
# git clone [email protected]:xArm-Developer/xArm-Python-SDK.git
# cd xArm-Python-SDK
# python setup.py install
"""
try:
from xarm.tools import utils
except:
pass
from xarm import version
from xarm.wrapper import XArmAPI
def pprint(*args, **kwargs):
try:
stack_tuple = traceback.extract_stack(limit=2)[0]
print('[{}][{}] {}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), stack_tuple[1], ' '.join(map(str, args))))
except:
print(*args, **kwargs)
frontForwardAngle = [0, 2.5, 0, 37.3, 0, -57.3, 0]
frontBackAngle = [0.0,-45.0,0.0,0.0,0.0,-45.0,0.0]
pprint('xArm-Python-SDK Version:{}'.format(version.__version__))
arm = XArmAPI('192.168.4.15')
arm.clean_warn()
arm.clean_error()
arm.motion_enable(True)
arm.set_mode(0)
arm.set_state(0)
time.sleep(1)
variables = {}
params = {'speed': 50, 'acc': 2000, 'angle_speed': 20, 'angle_acc': 500, 'events': {}, 'variables': variables, 'callback_in_thread': True, 'quit': False}
params['angle_acc'] = 50
params['angle_speed'] = 1000
# Register error/warn changed callback
def error_warn_change_callback(data):
if data and data['error_code'] != 0:
params['quit'] = True
pprint('err={}, quit'.format(data['error_code']))
arm.release_error_warn_changed_callback(error_warn_change_callback)
arm.register_error_warn_changed_callback(error_warn_change_callback)
# Register state changed callback
def state_changed_callback(data):
if data and data['state'] == 4:
if arm.version_number[0] >= 1 and arm.version_number[1] >= 1 and arm.version_number[2] > 0:
params['quit'] = True
pprint('state=4, quit')
arm.release_state_changed_callback(state_changed_callback)
arm.register_state_changed_callback(state_changed_callback)
# Register counter value changed callback
if hasattr(arm, 'register_count_changed_callback'):
def count_changed_callback(data):
if not params['quit']:
pprint('counter val: {}'.format(data['count']))
arm.register_count_changed_callback(count_changed_callback)
# Register connect changed callback
def connect_changed_callback(data):
if data and not data['connected']:
params['quit'] = True
pprint('disconnect, connected={}, reported={}, quit'.format(data['connected'], data['reported']))
arm.release_connect_changed_callback(error_warn_change_callback)
arm.register_connect_changed_callback(connect_changed_callback)
# Rotation
if not params['quit']:
# params['angle_acc'] = 1145
# params['angle_speed'] = 80
# if params['quit']:
if arm.error_code == 0 and not params['quit']:
# code = arm.set_servo_angle(angle=[0.1, -34.9, -0.1, 1.6, 0, -63.5, 0.1], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
code = arm.set_servo_angle(angle=frontForwardAngle, speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
if code != 0:
params['quit'] = True
pprint('set_servo_angle, code={}'.format(code))
# look forward but retracted
code = arm.set_servo_angle(angle=frontBackAngle, peed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# print(arm.get_position(), arm.get_position(is_radian=True))
# angles = list(np.radians(frontForwardAngle))
# angles = list(np.radians(frontBackAngle))
# print("start (joints): ", angles)
# translate, rotate = pyikfast.forward(angles)
# print("start pos (translate, rotate): ", translate, rotate, "\n")
# translate = [0.400, 0.0, 0.400]
# results = pyikfast.inverse(translate, rotate)
# for result in results:
# theseangles = list(result)
# print("final angles (IK joints): ", theseangles)
# finalangles = list(np.degrees(results[3]))
# arm.set_servo_angle(angle=finalangles, speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# translate, rotate = pyikfast.forward(angles)
# print("final FK (translate, rotate): ", translate, rotate, "\n")
# frontBackAngle = [0.0,-45.0,0.0,0.0,0.0,-45.0,0.0]
# angles = np.radians(frontBackAngle)
# print("start (joints): ", angles)
# translate, rotate = pyikfast.forward(list(angles))
# print("FK (translate, rotate): ", translate, rotate, "\n")
# joints = pyikfast.inverse(translate, rotate)
# look down
# code = arm.set_servo_angle(angle=[0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# relative moves
# arm.set_position(pitch=-88.0, relative=False, wait=True)
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# testing face tracking
# arm.set_tool_position(pitch=10.0, wait=True)
# arm.set_tool_position(pitch=-20.0, wait=True)
# arm.set_tool_position(pitch=10.0, wait=True)
# arm.set_servo_angle(servo_id=1, angle=3, relative=True, is_radian=False, wait=True)
# arm.set_servo_angle(servo_id=1, angle=-6, relative=True, is_radian=False, wait=True)
# arm.set_servo_angle(servo_id=1, angle=3, relative=True, is_radian=False, wait=True)
# arm.set_position(pitch=-20.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=20.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=-10, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# back to forward
# arm.set_servo_angle(angle=[0.0, -45.0, 0.0, 0.0, 0.0, -45.0, 0.0], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# release all event
if hasattr(arm, 'release_count_changed_callback'):
arm.release_count_changed_callback(count_changed_callback)
arm.release_error_warn_changed_callback(state_changed_callback)
arm.release_state_changed_callback(state_changed_callback)
arm.release_connect_changed_callback(error_warn_change_callback)
| 6,835 |
Python
| 32.674877 | 162 | 0.685296 |
MatthewDZane/XArmFollowTarget/scripts/XArm/client/face-pose-client.py
|
# from this video:
# https://www.youtube.com/watch?v=-toNMaS4SeQ
import cv2
import mediapipe as mp
import numpy as np
import time
import socket
import sys
bSocket = True
if (len(sys.argv) > 1):
print(sys.argv)
if sys.argv[1] == "--no-socket":
bSocket = False
if bSocket:
# open socket to omniverse machine
mysocket = socket.socket()
mysocket.connect(('192.168.4.206',12346)) # robert's local machine
# mysocket.connect(('192.168.1.62',12346))
# mysocket.connect(('127.0.0.1',12346))
def close_socket(thissocket):
try:
thissocket.shutdown(socket.SHUT_RDWR)
thissocket.close()
thissocket = None
except socket.error as e:
pass
print("socket is closed")
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(1)
try:
while cap.isOpened():
success, image = cap.read()
start = time.time()
image = cv2.cvtColor(cv2.flip(image, -1), cv2.COLOR_BGR2RGB)
# image = cv2.cvtColor(cv2.flip(image, -2), cv2.COLOR_BGR2RGB)
# improve performance
image.flags.writeable = False
# get the results
results = face_mesh.process(image)
# improve performance
image.flags.writeable = True
# convert colorspace
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_h, img_w, img_c = image.shape
face_3d = []
face_2d = []
nose_norm = []
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
if idx == 1:
nose_2d = (lm.x * img_w, lm.y * img_h)
nose_norm = (lm.x, lm.y)
nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
x, y = int(lm.x * img_w), int(lm.y * img_h)
cv2.circle(image, (x, y), 10, (0, 0, 255), -1)
face_2d.append([x, y])
face_3d.append([x, y, lm.z])
# convert to numpy array
face_2d = np.array(face_2d, dtype=np.float64)
# convert to np array
face_3d = np.array(face_3d, dtype=np.float64)
# the camera matrix focal length
focal_length = 1 * img_w
cam_matrix = np.array( [
[focal_length, 0, img_h/2],
[0, focal_length, img_w/2],
[0, 0, 1]
])
# the distortion paramaters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
# solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# get rotatinal matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# get the y rotation degree
x = angles[0] * 360
y = angles[1] * 360
z = angles[2] * 360
# see where the user's head is tilting
if y < -10:
text = "Looking Left"
elif y > 10:
text = "Looking Right"
elif x < -10:
text = "Looking Down"
elif x > 10:
text = "Looking Up"
else:
text = "Looking Forward"
# display the nose direction
nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
p1 = (int(nose_2d[0]), int(nose_2d[1]))
p2 = (int(nose_2d[0] + y * 10), int(nose_2d[1] - x * 10))
cv2.line(image, p1, p2, (255, 0, 0), 3)
# add the text on the image
cv2.putText(image, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "x: "+str(np.round(x, 2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "y: "+str(np.round(y, 2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "z: "+str(np.round(z, 2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
xdiff = 0.5-nose_norm[0]
ydiff = 0.5-nose_norm[1]
cv2.putText(image, "xdiff: "+str(np.round(xdiff, 2)), (500, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "ydiff: "+str(np.round(ydiff, 2)), (500, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
end = time.time()
totalTime = end-start
fps = 1/totalTime
# print("FPS: ", fps)
cv2.putText(image, f'FPS: {int(fps)}', (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec = drawing_spec,
connection_drawing_spec=drawing_spec)
if bSocket:
try:
print("sending:", [x, y, z, xdiff, ydiff])
sendData = str([x, y, z, xdiff, ydiff])
mysocket.send(sendData.encode())
except:
pass
cv2.imshow('Head Pose Estimation', image)
if cv2.waitKey(5) & 0xFF == 27:
break
except KeyboardInterrupt:
print("quitting")
if bSocket:
close_socket(mysocket)
cap.release()
| 4,957 |
Python
| 24.958115 | 114 | 0.616905 |
MatthewDZane/XArmFollowTarget/scripts/XArm/XArm/XArm7/xarm7_rmpflow_common.yaml
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
joint_limit_buffers: [.01, .03, .01, .01, .01, .01, .01]
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 1.
cspace_trajectory_rmp:
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp:
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp:
max_velocity: 2. # 4. # max_xd
velocity_damping_region: 1.5
damping_gain: 1000.0
metric_weight: 100. # metric_scalar
target_rmp:
accel_p_gain: 50. #100.
accel_d_gain: 85.
accel_norm_eps: .075
metric_alpha_length_scale: .05
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
proximity_metric_boost_length_scale: .02
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false # Values >= .5 are true and < .5 are false
axis_target_rmp:
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
proximity_metric_boost_length_scale: .08
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp:
damping_gain: 50.
damping_std_dev: .04
damping_robustness_eps: 1e-2
damping_velocity_gate_length_scale: .01
repulsion_gain: 800.
repulsion_std_dev: .01
metric_modulation_radius: .5
metric_scalar: 10000. # Real value should be this.
#metric_scalar: 0. # Turns off collision avoidance.
metric_exploder_std_dev: .02
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 100.
canonical_resolve:
max_acceleration_norm: 50.
projection_tolerance: .01
verbose: false
body_cylinders:
- name: base_stem
pt1: [0,0,.333]
pt2: [0,0,0.]
radius: .05
# Each arm is approx. 1m from (arm) base to gripper center.
# .1661 between links (approx .15)
body_collision_controllers:
- name: link7
radius: .05
- name: link5
radius: .05
| 3,017 |
YAML
| 30.768421 | 78 | 0.618827 |
MatthewDZane/XArmFollowTarget/scripts/XArm/XArm/XArm7/xarm7_descriptor.yaml
|
api_version: 1.0
cspace:
- joint1
- joint2
- joint3
- joint4
- joint5
- joint6
- joint7
root_link: world
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00
]
cspace_to_urdf_rules: []
composite_task_spaces: []
#collision_spheres:
# - panda_link0:
# - "center": [0.0, 0.0, 0.05]
# "radius": 0.08
| 348 |
YAML
| 16.449999 | 44 | 0.534483 |
MatthewDZane/XArmFollowTarget/scripts/XArm/XArm/XArm5/xarm5_descriptor.yaml
|
api_version: 1.0
cspace:
- joint1
- joint2
- joint3
- joint4
- joint5
root_link: world
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00,
]
cspace_to_urdf_rules: []
composite_task_spaces: []
| 207 |
YAML
| 13.857142 | 33 | 0.57971 |
MatthewDZane/XArmFollowTarget/scripts/XArm/XArm/XArm5/xarm5_rmpflow_common.yaml
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
joint_limit_buffers: [.01, .03, .01, .01, .01]
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 1.
cspace_trajectory_rmp:
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp:
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp:
max_velocity: 2. # 4. # max_xd
velocity_damping_region: 1.5
damping_gain: 1000.0
metric_weight: 100. # metric_scalar
target_rmp:
accel_p_gain: 50. #100.
accel_d_gain: 85.
accel_norm_eps: .075
metric_alpha_length_scale: .05
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
proximity_metric_boost_length_scale: .02
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false # Values >= .5 are true and < .5 are false
axis_target_rmp:
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
proximity_metric_boost_length_scale: .08
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp:
damping_gain: 50.
damping_std_dev: .04
damping_robustness_eps: 1e-2
damping_velocity_gate_length_scale: .01
repulsion_gain: 800.
repulsion_std_dev: .01
metric_modulation_radius: .5
metric_scalar: 10000. # Real value should be this.
#metric_scalar: 0. # Turns off collision avoidance.
metric_exploder_std_dev: .02
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 100.
canonical_resolve:
max_acceleration_norm: 50.
projection_tolerance: .01
verbose: false
body_cylinders:
- name: base_stem
pt1: [0,0,.333]
pt2: [0,0,0.]
radius: .05
# Each arm is approx. 1m from (arm) base to gripper center.
# .1661 between links (approx .15)
body_collision_controllers:
- name: link5
radius: .05
| 2,971 |
YAML
| 30.956989 | 78 | 0.62134 |
MatthewDZane/XArmFollowTarget/scripts/XArm/sandbox/look-forward.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2022, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
"""
# Notice
# 1. Changes to this file on Studio will not be preserved
# 2. The next conversion will overwrite the file with the same name
"""
import sys
import math
import time
import datetime
import random
import traceback
import threading
# IKFast for xarm 7
import numpy as np
"""
# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK
# git clone [email protected]:xArm-Developer/xArm-Python-SDK.git
# cd xArm-Python-SDK
# python setup.py install
"""
try:
from xarm.tools import utils
except:
pass
from xarm import version
from xarm.wrapper import XArmAPI
def pprint(*args, **kwargs):
try:
stack_tuple = traceback.extract_stack(limit=2)[0]
print('[{}][{}] {}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), stack_tuple[1], ' '.join(map(str, args))))
except:
print(*args, **kwargs)
frontForwardAngle = [0, 2.5, 0, 37.3, 0, -57.3, 0]
frontBackAngle = [0.0,-45.0,0.0,0.0,0.0,-45.0,0.0]
pprint('xArm-Python-SDK Version:{}'.format(version.__version__))
arm = XArmAPI('192.168.4.15')
arm.clean_warn()
arm.clean_error()
arm.motion_enable(True)
arm.set_mode(0)
arm.set_state(0)
time.sleep(1)
variables = {}
params = {'speed': 50, 'acc': 2000, 'angle_speed': 20, 'angle_acc': 500, 'events': {}, 'variables': variables, 'callback_in_thread': True, 'quit': False}
params['angle_acc'] = 50
params['angle_speed'] = 1000
# Register error/warn changed callback
def error_warn_change_callback(data):
if data and data['error_code'] != 0:
params['quit'] = True
pprint('err={}, quit'.format(data['error_code']))
arm.release_error_warn_changed_callback(error_warn_change_callback)
arm.register_error_warn_changed_callback(error_warn_change_callback)
# Register state changed callback
def state_changed_callback(data):
if data and data['state'] == 4:
if arm.version_number[0] >= 1 and arm.version_number[1] >= 1 and arm.version_number[2] > 0:
params['quit'] = True
pprint('state=4, quit')
arm.release_state_changed_callback(state_changed_callback)
arm.register_state_changed_callback(state_changed_callback)
# Register counter value changed callback
if hasattr(arm, 'register_count_changed_callback'):
def count_changed_callback(data):
if not params['quit']:
pprint('counter val: {}'.format(data['count']))
arm.register_count_changed_callback(count_changed_callback)
# Register connect changed callback
def connect_changed_callback(data):
if data and not data['connected']:
params['quit'] = True
pprint('disconnect, connected={}, reported={}, quit'.format(data['connected'], data['reported']))
arm.release_connect_changed_callback(error_warn_change_callback)
arm.register_connect_changed_callback(connect_changed_callback)
# Rotation
if not params['quit']:
# params['angle_acc'] = 1145
# params['angle_speed'] = 80
# if params['quit']:
if arm.error_code == 0 and not params['quit']:
# code = arm.set_servo_angle(angle=[0.1, -34.9, -0.1, 1.6, 0, -63.5, 0.1], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
code = arm.set_servo_angle(angle=frontForwardAngle, speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
if code != 0:
params['quit'] = True
pprint('set_servo_angle, code={}'.format(code))
# look forward but retracted
code = arm.set_servo_angle(angle=frontBackAngle, peed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# print(arm.get_position(), arm.get_position(is_radian=True))
# angles = list(np.radians(frontForwardAngle))
# angles = list(np.radians(frontBackAngle))
# print("start (joints): ", angles)
# translate, rotate = pyikfast.forward(angles)
# print("start pos (translate, rotate): ", translate, rotate, "\n")
# translate = [0.400, 0.0, 0.400]
# results = pyikfast.inverse(translate, rotate)
# for result in results:
# theseangles = list(result)
# print("final angles (IK joints): ", theseangles)
# finalangles = list(np.degrees(results[3]))
# arm.set_servo_angle(angle=finalangles, speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# translate, rotate = pyikfast.forward(angles)
# print("final FK (translate, rotate): ", translate, rotate, "\n")
# frontBackAngle = [0.0,-45.0,0.0,0.0,0.0,-45.0,0.0]
# angles = np.radians(frontBackAngle)
# print("start (joints): ", angles)
# translate, rotate = pyikfast.forward(list(angles))
# print("FK (translate, rotate): ", translate, rotate, "\n")
# joints = pyikfast.inverse(translate, rotate)
# look down
# code = arm.set_servo_angle(angle=[0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# relative moves
# arm.set_position(pitch=-88.0, relative=False, wait=True)
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# testing face tracking
# arm.set_tool_position(pitch=10.0, wait=True)
# arm.set_tool_position(pitch=-20.0, wait=True)
# arm.set_tool_position(pitch=10.0, wait=True)
# arm.set_servo_angle(servo_id=1, angle=3, relative=True, is_radian=False, wait=True)
# arm.set_servo_angle(servo_id=1, angle=-6, relative=True, is_radian=False, wait=True)
# arm.set_servo_angle(servo_id=1, angle=3, relative=True, is_radian=False, wait=True)
# arm.set_position(pitch=-20.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(pitch=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=-10.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=20.0, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# arm.set_position(roll=-10, relative=True, wait=True)
# print(arm.get_position(), arm.get_position(is_radian=True))
# back to forward
# arm.set_servo_angle(angle=[0.0, -45.0, 0.0, 0.0, 0.0, -45.0, 0.0], speed=params['angle_speed'], mvacc=params['angle_acc'], wait=True, radius=-1.0)
# release all event
if hasattr(arm, 'release_count_changed_callback'):
arm.release_count_changed_callback(count_changed_callback)
arm.release_error_warn_changed_callback(state_changed_callback)
arm.release_state_changed_callback(state_changed_callback)
arm.release_connect_changed_callback(error_warn_change_callback)
| 6,819 |
Python
| 32.762376 | 162 | 0.684851 |
MatthewDZane/XArmFollowTarget/scripts/XArm/sandbox/face-pose-mediapipe.py
|
# from this video:
# https://www.youtube.com/watch?v=-toNMaS4SeQ
import cv2
import mediapipe as mp
import numpy as np
import time
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
start = time.time()
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# improve performance
image.flags.writeable = False
# get the results
results = face_mesh.process(image)
# improve performance
image.flags.writeable = True
# convert colorspace
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_h, img_w, img_c = image.shape
face_3d = []
face_2d = []
nose_norm = []
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
if idx == 1:
nose_2d = (lm.x * img_w, lm.y * img_h)
nose_norm = (lm.x, lm.y)
nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
x, y = int(lm.x * img_w), int(lm.y * img_h)
face_2d.append([x, y])
face_3d.append([x, y, lm.z])
# convert to numpy array
face_2d = np.array(face_2d, dtype=np.float64)
# convert to np array
face_3d = np.array(face_3d, dtype=np.float64)
# the camera matrix focal length
focal_length = 1 * img_w
cam_matrix = np.array( [
[focal_length, 0, img_h/2],
[0, focal_length, img_w/2],
[0, 0, 1]
])
# the distortion paramaters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
# solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# get rotatinal matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# get the y rotation degree
x = angles[0] * 360
y = angles[1] * 360
z = angles[2] * 360
# see where the user's head is tilting
if y < -10:
text = "Looking Left"
elif y > 10:
text = "Looking Right"
elif x < -10:
text = "Looking Down"
elif x > 10:
text = "Looking Up"
else:
text = "Looking Forward"
# display the nose direction
nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
p1 = (int(nose_2d[0]), int(nose_2d[1]))
p2 = (int(nose_2d[0] + y * 10), int(nose_2d[1] - x * 10))
cv2.line(image, p1, p2, (255, 0, 0), 3)
# add the text on the image
cv2.putText(image, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "x: "+str(np.round(x, 2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "y: "+str(np.round(y, 2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "z: "+str(np.round(z, 2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
xdiff = nose_norm[0]-0.5
ydiff = 0.5-nose_norm[1]
cv2.putText(image, "xdiff: "+str(np.round(xdiff, 2)), (500, 250), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "ydiff: "+str(np.round(ydiff, 2)), (500, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
end = time.time()
totalTime = end-start
fps = 1/totalTime
print("FPS: ", fps)
cv2.putText(image, f'FPS: {int(fps)}', (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec = drawing_spec,
connection_drawing_spec=drawing_spec)
# try:
# print("sending:", [x, y, z, xdiff, ydiff])
# sendData = str([x, y, z, nose_norm[0], nose_norm[1]])
# mysocket.send(sendData.encode())
# except:
# pass
cv2.imshow('Head Pose Estimation', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
| 4,046 |
Python
| 25.801324 | 113 | 0.623826 |
MatthewDZane/XArmFollowTarget/config/extension.toml
|
[core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "XArm Follow Target"
description = ""
authors = ["NVIDIA"]
repository = ""
keywords = []
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
[[python.module]]
name = "scripts"
| 419 |
TOML
| 15.799999 | 34 | 0.658711 |
MatthewDZane/XArmFollowTarget/docs/CHANGELOG.md
|
# Changelog
## [0.1.0] - 2023-04-27
### Added
- Initial version of XArm Follow Target Extension
| 99 |
Markdown
| 11.499999 | 49 | 0.666667 |
MatthewDZane/XArmFollowTarget/docs/README.md
|
# Usage
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
| 129 |
Markdown
| 24.999995 | 118 | 0.736434 |
MatthewDZane/XArmFollowTarget/data/XArm/xarm_rmpflow_common.yaml
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
joint_limit_buffers: [.01, .03, .01, .01, .01, .01, .01]
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 1.
cspace_trajectory_rmp:
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp:
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp:
max_velocity: 4. # max_xd
velocity_damping_region: 1.5
damping_gain: 1000.0
metric_weight: 100. # metric_scalar
target_rmp:
accel_p_gain: 100.
accel_d_gain: 85.
accel_norm_eps: .075
metric_alpha_length_scale: .05
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
proximity_metric_boost_length_scale: .02
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false # Values >= .5 are true and < .5 are false
axis_target_rmp:
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
proximity_metric_boost_length_scale: .08
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp:
damping_gain: 50.
damping_std_dev: .04
damping_robustness_eps: 1e-2
damping_velocity_gate_length_scale: .01
repulsion_gain: 800.
repulsion_std_dev: .01
metric_modulation_radius: .5
metric_scalar: 10000. # Real value should be this.
#metric_scalar: 0. # Turns off collision avoidance.
metric_exploder_std_dev: .02
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 100.
canonical_resolve:
max_acceleration_norm: 50.
projection_tolerance: .01
verbose: false
body_cylinders:
- name: base_stem
pt1: [0,0,.333]
pt2: [0,0,0.]
radius: .05
- name: base_tree
pt1: [0,0,.333]
pt2: [0,0,.333]
radius: .15
# Each arm is approx. 1m from (arm) base to gripper center.
# .1661 between links (approx .15)
body_collision_controllers:
- name: link7
radius: .05
- name: link5
radius: .05
| 3,091 |
YAML
| 30.232323 | 78 | 0.614688 |
MatthewDZane/XArmFollowTarget/data/XArm/xarm_descriptor.yaml
|
api_version: 1.0
cspace:
- joint1
- joint2
- joint3
- joint4
- joint5
- joint6
- joint7
root_link: world
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00
]
cspace_to_urdf_rules: []
composite_task_spaces: []
#collision_spheres:
# - panda_link0:
# - "center": [0.0, 0.0, 0.05]
# "radius": 0.08
| 348 |
YAML
| 16.449999 | 44 | 0.534483 |
natestrong/omni-ext-hello-world/README.md
|
# Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "nstrong.hello.world" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,043 |
Markdown
| 37.566037 | 258 | 0.75722 |
natestrong/omni-ext-hello-world/tools/scripts/link_app.py
|
import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 |
Python
| 32.117647 | 133 | 0.562189 |
natestrong/omni-ext-hello-world/tools/packman/config.packman.xml
|
<config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 |
XML
| 34.333328 | 123 | 0.691943 |
natestrong/omni-ext-hello-world/tools/packman/bootstrap/install_package.py
|
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 |
Python
| 33.166666 | 108 | 0.703362 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/nstrong/hello/world/extension.py
|
import omni.ext
import omni.ui as ui
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[nstrong.hello.world] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class NstrongHelloWorldExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[nstrong.hello.world] nstrong hello world startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self._count += 1
label.text = f"count: {self._count}"
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Add", clicked_fn=on_click)
ui.Button("Reset", clicked_fn=on_reset)
def on_shutdown(self):
print("[nstrong.hello.world] nstrong hello world shutdown")
| 1,583 |
Python
| 34.999999 | 119 | 0.607707 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/nstrong/hello/world/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/nstrong/hello/world/tests/__init__.py
|
from .test_hello_world import *
| 31 |
Python
| 30.999969 | 31 | 0.774194 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/nstrong/hello/world/tests/test_hello_world.py
|
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import nstrong.hello.world
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = nstrong.hello.world.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,674 |
Python
| 34.638297 | 142 | 0.682198 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "nstrong hello world"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import nstrong.hello.world".
[[python.module]]
name = "nstrong.hello.world"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,583 |
TOML
| 31.999999 | 118 | 0.74542 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/docs/CHANGELOG.md
|
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 |
Markdown
| 18.888887 | 80 | 0.702247 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/docs/README.md
|
# Python Extension Example [nstrong.hello.world]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 178 |
Markdown
| 34.799993 | 126 | 0.786517 |
natestrong/omni-ext-hello-world/exts/nstrong.hello.world/docs/index.rst
|
nstrong.hello.world
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"nstrong.hello.world"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 339 |
reStructuredText
| 15.190475 | 43 | 0.619469 |
Vadim-Karpenko/omniverse-camera-view-optimizer/CHANGELOG.md
|
# Change Log
All notable changes to this project will be documented in this file.
## [1.0.3] - 2022-10-05
Fixed size slider
## [1.0.2] - 2022-10-04
Improve README.md for Community Tab
## [1.0.1] - 2022-10-03
Convert icon and preview_image to png format
## [1.0.0] - 2022-10-01
Initial release.
| 305 |
Markdown
| 15.999999 | 68 | 0.672131 |
Vadim-Karpenko/omniverse-camera-view-optimizer/README.md
|
# Camera View Optimizer

### About
Omniverse extensions that allow you to quickly hide/remove objects that are not visible to the camera, to improve performance and decrease VRAM usage.
## Quick links
* [Installation](#installation)
* [How to use](#how-to-use)
* [Linking with an Omniverse app](#linking-with-an-omniverse-app)
* [Contributing](#contributing)
* [Changelog](CHANGELOG.md)
## Installation
To add a this extension to your Omniverse app:
### From Community tab
1. Go to **Extension Manager** (Window - Extensions) — Community tab
2. Search for **Camera View Optimizer** extension and enable it
### Manual
1. Go to **Extension Manager** (Window - Extensions) — **Gear Icon** — **Extension Search Path**
2. Add this as a search path:
```
git://github.com/Vadim-Karpenko/omniverse-camera-view-optimizer?branch=main&dir=exts
```
3. Search for **Camera View Optimizer** extension and enable it
A new window will appear next to the Properties tab:

## How to use
- Open a scene you want to optimize
- Open an extension window
- The current view in the viewport is used to scan for visible objects, so make sure your camera is positioned correctly.
- Make sure settings are set correctly. Hover over each option to read about what it does.
- Click **Optimize** button.
**Hide if contains in title** and **Show if contains in title** fields support a regular expressions (regex) that allows you to filter any object based on its title, with any pattern, simple or complex.
Regex examples:
- `(^Abs.*)` - all names starting with `Abs`
- `(.*nt$)` - all names ending with `nt`
- `(.*ph.*)` - contains `ph`
- `(.*\d.*)` - contains any number
- `(^((?!tree).)*$)` - any title that does not contain `tree`.
You can also combine multiple conditions:
- `(.*El.*|t1$)` - Has `El` **OR** ends with `t1`
- `(.*El.*)(t1$)` - Has `El` **AND** ends with `t1`
## Linking with an Omniverse app
For a better developer experience, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
Run:
```bash
> link_app.bat
```
There is also an analogous `link_app.sh` for Linux. If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```bash
> link_app.bat --app code
```
You can also just pass a path to create link to:
```bash
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2022.1.3"
```
## Contributing
Feel free to create a new issue if you run into any problems. Pull requests are welcomed.
| 2,740 |
Markdown
| 33.696202 | 202 | 0.712774 |
Vadim-Karpenko/omniverse-camera-view-optimizer/tools/scripts/link_app.py
|
import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 |
Python
| 32.5 | 133 | 0.562389 |
Vadim-Karpenko/omniverse-camera-view-optimizer/tools/packman/config.packman.xml
|
<config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 |
XML
| 34.333328 | 123 | 0.691943 |
Vadim-Karpenko/omniverse-camera-view-optimizer/tools/packman/bootstrap/install_package.py
|
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 |
Python
| 31.568965 | 103 | 0.68697 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/karpenko/camera_view_optimizer/ext/style.py
|
import omni.ui as ui
from omni.ui import color as cl
# The main style dict
cvo_window_style = {
"Label::main_label": {
"alignment": ui.Alignment.LEFT_CENTER,
"color": cl("#a1a1a1"),
"font_size": 24,
},
}
| 236 |
Python
| 20.545453 | 46 | 0.584746 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/karpenko/camera_view_optimizer/ext/extension.py
|
import asyncio
import math
import re
import omni.ext
import omni.kit.commands
import omni.kit.viewport_legacy
import omni.physx.scripts.utils as core_utils
import omni.ui as ui
import omni.usd
from omni.kit.viewport.utility import (get_active_viewport_camera_string,
get_active_viewport_window,
get_ui_position_for_prim)
from pxr import Gf, Sdf, Usd, UsdGeom
from .style import cvo_window_style
class CameraViewOptimizer(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[karpenko.camera_view_optimizer.ext] CameraViewOptimizer startup")
self._usd_context = omni.usd.get_context()
self.stage = self._usd_context.get_stage()
self.render_main_window()
# show the window in the usual way if the stage is loaded
if self.stage:
self._window.deferred_dock_in("Property")
else:
# otherwise, show the window after the stage is loaded
self._setup_window_task = asyncio.ensure_future(self._dock_window())
def on_shutdown(self):
"""
It deletes all the variables that were created in the extension
"""
print("[karpenko.camera_view_optimizer.ext] CameraViewOptimizer shutdown")
self._fov_slider = None
self._max_size_slider = None
self._max_distance_field = None
self._hide_objects_field = None
self._show_objects_field = None
self._base_path_field = None
self._delete_objects = None
self._button_optimize = None
self._button_show_all = None
self._button_delete_hidden = None
def check_stage(self):
"""
It gets the current stage from the USD context
"""
if not hasattr(self, "stage") or not self.stage:
self._usd_context = omni.usd.get_context()
self.stage = self._usd_context.get_stage()
def optimize(self):
"""
It's hiding all objects that are not visible from the camera
"""
self.check_stage()
if not self.stage:
return
window = get_active_viewport_window()
# Getting the camera path and prim from the stage.
camera_path = get_active_viewport_camera_string()
if not camera_path:
return
camera_prim = self.stage.GetPrimAtPath(camera_path)
# get camera transform
camera_translate = camera_prim.GetAttribute("xformOp:translate").Get()
# It's a helper class that allows to get camera forward vector.
camera_th = core_utils.CameraTransformHelper(camera_prim)
# Get the vecotr of the camera forward direction.
cameraForward = camera_th.get_forward()
# It's creating a new translate location approximately 10 units forward from the camera.
# It will be used to determinate if object is in front of the camera or not by comparing distances
# between the camera and the object and the camera and the new translate location.
camera_new_location = Gf.Vec3d(
camera_translate[0]+(cameraForward[0] * 10),
camera_translate[1]+(cameraForward[1] * 10),
camera_translate[2]+(cameraForward[2] * 10)
)
# It's getting all visible objects from the stage.
all_objects = self.get_all_objects()
# return if there are no objects
if not all_objects:
return
# It's creating a list of objects that not visible from the camera.
not_visible = []
# Get the camera focal length
focal_length_param = Sdf.Path(f'{camera_path}.focalLength')
focal_length = self.stage.GetPrimAtPath(Sdf.Path(camera_path)).GetAttribute('focalLength').Get()
max_size = self._max_size_slider.model.as_float
with omni.kit.undo.group():
if all_objects:
# Changing the value of the focal length parameter of the camera so we can scan more objects.
omni.kit.commands.execute(
'ChangePropertyCommand',
prop_path=focal_length_param,
value=self._fov_slider.model.as_float,
prev=focal_length,
timecode=Usd.TimeCode.Default(),
)
for prim in all_objects:
# Getting the position of the prim in the window.
# Because it is a screen-space calculation, for some reason it returns is_visible=True when objects
# is right behind the camera.
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath(), alignment=0)
# additional check for visibility with different origin points of the object
if not is_visible:
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath(), alignment=1)
if not is_visible:
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath(), alignment=2)
if not is_visible:
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath(), alignment=3)
if not is_visible:
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath(), alignment=4)
# Getting the position of the prim in the world.
prim_translate = prim.GetAttribute("xformOp:translate").Get()
# Calculating the distance between the camera and the prim.
distance_to_camera = self.get_distance_between_translations(camera_translate, prim_translate)
# Calculating the distance between the camera and the new translate location.
distance_to_cameraforward = self.get_distance_between_translations(camera_new_location, prim_translate)
# If the distance between the camera and the prim is less than the distance between
# the camera and the new translate location, which is 10 units forward from first locatio,
# it means that the prim is behind the camera.
if is_visible:
if distance_to_cameraforward > distance_to_camera + 6:
is_visible = False
is_distant = False
# Hide if the object is too distant
if distance_to_camera > self._max_distance_field.model.as_float:
is_visible = False
is_distant = True
# Checking if the ignore_size_distant_objects is true and if the object is distant.
if self._ignore_size_distant_objects.model.as_bool and is_distant:
pass
else:
prim_size = self.get_prim_size(prim)
# If one of the dimensions of the prim is bigger than the limit, we will not consider to hide it.
if not is_visible and max_size != 0:
if max_size < prim_size[0] or max_size < prim_size[1] or max_size < prim_size[2]:
is_visible = True
# Checking if the prim name matches the show or hide pattern.
prim_name = prim.GetName()
show_pattern = self._show_objects_field.model.as_string
hide_pattern = self._hide_objects_field.model.as_string
if show_pattern and (show_pattern.lower() in prim_name.lower() or re.match(show_pattern, prim_name)):
is_visible = True
elif hide_pattern and (hide_pattern.lower() in prim_name.lower() or re.match(hide_pattern, prim_name)):
is_visible = False
# Final checking if the prim is visible or not.
if not is_visible:
not_visible.append({
"prim": prim,
"prim_path": prim.GetPath(),
"type": prim.GetTypeName(),
})
# Creating a list of object types that are not allowed to be hidden.
not_allowed_types = []
# Check if we should hide lights as well
if not self._hide_lights.model.as_bool:
not_allowed_types.extend([
"DistantLight",
"SphereLight",
"DiskLight",
"RectLight",
"CylinderLight",
"ConeLight"]
)
if not_visible:
omni.kit.commands.execute(
'HideSelectedPrimsCommand',
selected_paths=[i["prim_path"] for i in not_visible if i["type"] not in not_allowed_types],
)
if focal_length:
# Changing the focal length of the camera back to the value before the scan.
omni.kit.commands.execute(
'ChangePropertyCommand',
prop_path=focal_length_param,
value=focal_length,
prev=self._fov_slider.model.as_float,
timecode=Usd.TimeCode.Default(),
)
def get_distance_between_translations(self, pos1, pos2):
"""
It returns the distance between two translations
:param pos1: The position of the first object
:param pos2: The position of the object you want to measure the distance to
:return: The distance between two points in 3D space.
"""
if pos1 and pos2:
return math.sqrt(
(pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2 + (pos1[2] - pos2[2]) ** 2
)
return 0
def get_prim_size(self, prim):
"""
It returns the size of a prim in world space
:param prim: The prim you want to get the size of
:return: The size of the bounding box of the prim.
"""
bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_])
bbox_cache.Clear()
prim_bbox = bbox_cache.ComputeWorldBound(prim)
prim_range = prim_bbox.ComputeAlignedRange()
return prim_range.GetSize()
def render_main_window(self):
self._window = ui.Window("Camera View Omptimizer", width=300, height=300)
with self._window.frame:
with ui.VStack(style=cvo_window_style):
ui.Label("Camera View Optimizer", height=0, name="main_label")
ui.Spacer(height=10)
ui.Separator(height=6)
ui.Spacer(height=10)
with ui.ScrollingFrame(height=ui.Percent(63)):
with ui.VStack():
with ui.VStack():
with ui.HStack(height=0):
tooltip = "When checking the visibility of the object, the camera FOV will be set " \
"to the value specified in the slider and then back once scan is complete"
ui.Label("Scan FOV (mm):", elided_text=True, tooltip=tooltip)
# Slider for the FOV value of the camera
self._fov_slider = ui.IntSlider(
min=1,
max=300,
step=1,
tooltip=tooltip,
)
self._fov_slider.model.set_value(4)
ui.Spacer(height=10)
with ui.HStack(height=0):
tooltip = "Ignore object if one of the three dimensions is greater than values provided," \
"useful for walls, floors, etc."
ui.Label("Max object size:", elided_text=True, tooltip=tooltip)
# Slider for the max object size
self._max_size_slider = ui.IntField(
min=0,
max=100000,
step=1,
tooltip=tooltip,
)
self._max_size_slider.model.set_value(150)
ui.Spacer(height=10)
with ui.VStack():
tooltip = "Should the extension hide lights?"
# Available types of objects to hide
with ui.HStack(height=0):
ui.Label("Process Lights:", elided_text=True, tooltip=tooltip, width=ui.Percent(50))
self._hide_lights = ui.CheckBox(tooltip=tooltip, width=ui.Percent(5))
ui.Line(name="default", width=ui.Percent(45))
ui.Spacer(height=10)
# max distance int field
with ui.VStack():
tooltip = "Max distance of the object from the camera. If the object is further than this" \
" value, it will be hidden even if it is visible to the camera."
with ui.HStack(height=0):
ui.Label("Max distance:", tooltip=tooltip)
self._max_distance_field = ui.IntField(tooltip=tooltip)
self._max_distance_field.model.set_value(10000)
ui.Spacer(height=10)
# ignore size settings for distant objects
with ui.VStack():
tooltip = "No matter the size of the object, if it is too far away, it will be hidden"
with ui.HStack(height=0):
ui.Label(
"Ignore size for distant objects:",
elided_text=True,
tooltip=tooltip,
width=ui.Percent(50)
)
self._ignore_size_distant_objects = ui.CheckBox(tooltip=tooltip, width=ui.Percent(5))
self._ignore_size_distant_objects.model.set_value(False)
ui.Line(name="default", width=ui.Percent(45))
ui.Spacer(height=10)
# String field to hide objects by title
with ui.VStack():
tooltip = "Hide objects by title. Partial text match and regex is supported."
with ui.HStack(height=0):
ui.Label("Hide if contains in title:", elided_text=True, tooltip=tooltip)
self._hide_objects_field = ui.StringField(tooltip=tooltip)
ui.Spacer(height=10)
# string field to show objects by title
with ui.VStack():
tooltip = "Show objects by title. Partial text match and regex is supported."
with ui.HStack(height=0):
ui.Label("Show if contains in title:", elided_text=True, tooltip=tooltip)
self._show_objects_field = ui.StringField(tooltip=tooltip)
ui.Spacer(height=10)
# base path where to search for objects
with ui.VStack():
with ui.HStack(height=0):
tooltip = "Base path where to search for objects. If empty or invalid, " \
"DefaultPrim will be used, so make sure this is value is correct."
ui.Label("Base path:", elided_text=True, tooltip=tooltip)
self._base_path_field = ui.StringField(tooltip=tooltip)
if self.stage:
self._base_path_field.model.set_value(
self.stage.GetDefaultPrim().GetPath().pathString
)
ui.Spacer()
with ui.VStack(height=40):
with ui.HStack(height=40):
self._button_show_all = ui.Button(
"Show all",
height=40,
clicked_fn=self.show_all,
)
self._button_delete_hidden = ui.Button(
"Delete hidden",
height=40,
clicked_fn=self.delete_hidden,
)
with ui.VStack(height=40):
# Button to execute the extension
self._button_optimize = ui.Button(
"Optimize",
height=40,
clicked_fn=self.optimize,
)
async def _dock_window(self):
"""
It waits for the property window to appear, then docks the window to it
"""
property_win = None
frames = 3
while frames > 0:
if not property_win:
property_win = ui.Workspace.get_window("Property")
if property_win:
break # early out
frames = frames - 1
await omni.kit.app.get_app().next_update_async()
# Dock to property window after 5 frames. It's enough for window to appear.
for _ in range(5):
await omni.kit.app.get_app().next_update_async()
if property_win:
self._window.deferred_dock_in("Property")
self._setup_window_task = None
for _ in range(10):
await omni.kit.app.get_app().next_update_async()
self.check_stage()
if self.stage:
self._base_path_field.model.set_value(self.stage.GetDefaultPrim().GetPath().pathString)
def get_default_prim(self):
"""
If the base path field is empty, return the default prim, otherwise return the prim at the path in the base path
field
:return: The default prim.
"""
self.check_stage()
if not self.stage:
return
if self._base_path_field.model.as_string == "":
return self.stage.GetDefaultPrim()
else:
custom_default_prim = self.stage.GetPrimAtPath(self._base_path_field.model.as_string)
if not custom_default_prim:
return self.stage.GetDefaultPrim()
else:
return custom_default_prim
def get_all_objects(self, only_visible=False):
"""
It returns a list of all the objects in the scene, and if the only_visible parameter is set to True,
it will only return objects that are visible
:param only_visible: If True, only visible objects will be returned, defaults to True (optional)
:return: A list of objects
"""
if not self.stage:
return []
valid_objects = []
default_prim = self.get_default_prim()
for obj in self.get_all_children_of_prim(default_prim):
if obj:
if only_visible:
visibility_attr = obj.GetAttribute("visibility")
if visibility_attr:
visibility = visibility_attr.Get()
if visibility != "invisible":
valid_objects.append(obj)
else:
valid_objects.append(obj)
return valid_objects
def get_all_children_of_prim(self, prim):
"""
It takes a prim as an argument and returns a list of all the prims that are children of that prim
:param prim: The prim you want to get the children of
:return: A list of all the children of the prim.
"""
children = []
for child in prim.GetAllChildren():
if child.GetTypeName() != "Scope":
children.append(child)
children.extend(self.get_all_children_of_prim(child))
return children
def get_all_hidden_objects(self):
"""
It returns a list of all the hidden objects in the scene
:return: A list of objects
"""
if not self.stage:
return []
valid_objects = []
default_prim = self.get_default_prim()
for obj in self.get_all_children_of_prim(default_prim):
if obj:
visibility_attr = obj.GetAttribute("visibility")
if visibility_attr:
visibility = visibility_attr.Get()
if visibility == "invisible":
valid_objects.append(obj)
return valid_objects
def show_all(self):
"""
It gets all the hidden objects in the scene, and if there are any, it shows them
:return: A list of objects that were hidden.
"""
objects_to_show = self.get_all_hidden_objects()
if objects_to_show:
omni.kit.commands.execute(
'ShowSelectedPrimsCommand',
selected_paths=[i.GetPath() for i in objects_to_show],
)
return objects_to_show
def delete_hidden(self):
"""
It gets all the hidden objects in the scene, and if there are any, it deletes them
:return: A list of objects that were deleted.
"""
objects_to_delete = self.get_all_hidden_objects()
if objects_to_delete:
omni.kit.commands.execute(
'DeletePrims',
paths=[i.GetPath() for i in objects_to_delete],
)
return objects_to_delete
| 22,251 |
Python
| 44.691992 | 120 | 0.522673 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/karpenko/camera_view_optimizer/ext/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/karpenko/camera_view_optimizer/ext/commands/__init__.py
|
from .usd_commands import *
| 27 |
Python
| 26.999973 | 27 | 0.777778 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/karpenko/camera_view_optimizer/ext/commands/usd_commands.py
|
from typing import List
import omni.kit.commands
import omni.timeline
import omni.usd
from pxr import UsdGeom
class HideSelectedPrimsCommand(omni.kit.commands.Command):
"""
Hides the selected primitives
Args:
selected_paths (List[str]): Prim paths.
"""
def __init__(self, selected_paths: List[str]):
"""
This function is called when the user clicks the button in the UI. It takes the list of selected
paths and stores it in the class
:param selected_paths: List[str]
:type selected_paths: List[str]
"""
self._stage = omni.usd.get_context().get_stage()
self._selected_paths = selected_paths.copy()
def _hide(self):
"""
This function takes the selected prims and makes them invisible
"""
for selected_path in self._selected_paths:
selected_prim = self._stage.GetPrimAtPath(selected_path)
imageable = UsdGeom.Imageable(selected_prim)
imageable.MakeInvisible()
def _show(self):
"""
It makes visible all the selected prims in the stage
"""
for selected_path in self._selected_paths:
selected_prim = self._stage.GetPrimAtPath(selected_path)
imageable = UsdGeom.Imageable(selected_prim)
imageable.MakeVisible()
def do(self):
self._hide()
def undo(self):
self._show()
class ShowSelectedPrimsCommand(omni.kit.commands.Command):
"""
Shows the selected primitives
Args:
selected_paths (List[str]): Prim paths.
"""
def __init__(self, selected_paths: List[str]):
"""
This function is called when the user clicks the button in the UI. It takes the list of selected
paths and stores it in the class
:param selected_paths: List[str]
:type selected_paths: List[str]
"""
self._stage = omni.usd.get_context().get_stage()
self._selected_paths = selected_paths.copy()
def _hide(self):
"""
This function takes the selected prims and makes them invisible
"""
for selected_path in self._selected_paths:
selected_prim = self._stage.GetPrimAtPath(selected_path)
imageable = UsdGeom.Imageable(selected_prim)
imageable.MakeInvisible()
def _show(self):
"""
It makes visible all the selected prims in the stage
"""
for selected_path in self._selected_paths:
selected_prim = self._stage.GetPrimAtPath(selected_path)
imageable = UsdGeom.Imageable(selected_prim)
imageable.MakeVisible()
def do(self):
self._show()
def undo(self):
self._hide()
| 2,766 |
Python
| 28.126315 | 105 | 0.605929 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.3"
# The title and description fields are primarily for displaying extension info in UI
title = "Camera View Optimizer"
description="Omniverse extensions that allow you to quickly hide/delete objects that are not visible to the camera to save performance and VRAM."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/Vadim-Karpenko/omniverse-camera-view-optimizer"
# One of categories for UI.
category = "Lighting & Rendering"
# Keywords for the extension
keywords = ["camera", "view", "optimizer", "optimization", "performance", "vram", "memory", "hide", "delete", "visibility", "culling"]
authors = ["Vadim Karpenko"]
icon = "data/icon.png"
preview_image = "data/preview_image.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import karpenko.camera_view_optimizer.ext".
[[python.module]]
name = "karpenko.camera_view_optimizer.ext"
[[python.module]]
name = "karpenko.camera_view_optimizer.ext.commands"
| 1,223 |
TOML
| 32.08108 | 145 | 0.744072 |
Vadim-Karpenko/omniverse-camera-view-optimizer/exts/karpenko.camera_view_optimizer.ext/docs/README.md
|
## Camera View Optimizer
Omniverse extensions that allow you to quickly hide/remove objects that are not visible to the camera, to improve performance and decrease VRAM usage.
| 175 |
Markdown
| 86.999957 | 150 | 0.822857 |
mrsbCN/orbit.dexterous_hand/pyproject.toml
|
[build-system]
requires = ["setuptools", "toml"]
build-backend = "setuptools.build_meta"
[tool.isort]
atomic = true
profile = "black"
line_length = 120
py_version = 310
skip_glob = ["docs/*", "logs/*", "_orbit/*", "_isaac_sim/*"]
group_by_package = true
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"ORBITPARTY",
"FIRSTPARTY",
"LOCALFOLDER",
]
extra_standard_library = [
"numpy",
"h5py",
"open3d",
"torch",
"tensordict",
"bpy",
"matplotlib",
"gymnasium",
"gym",
"scipy",
"hid",
"yaml",
"prettytable",
"toml",
"trimesh",
"tqdm",
]
known_thirdparty = [
"omni.isaac.core",
"omni.replicator.isaac",
"omni.replicator.core",
"pxr",
"omni.kit.*",
"warp",
"carb",
]
known_orbitparty = [
"omni.isaac.orbit",
"omni.isaac.orbit_tasks",
"omni.isaac.orbit_assets"
]
# Modify the following to include the package names of your first-party code
known_firstparty = "orbit.ext_template"
known_local_folder = "config"
[tool.pyright]
exclude = [
"**/__pycache__",
"**/_isaac_sim",
"**/_orbit",
"**/docs",
"**/logs",
".git",
".vscode",
]
typeCheckingMode = "basic"
pythonVersion = "3.10"
pythonPlatform = "Linux"
enableTypeIgnoreComments = true
# This is required as the CI pre-commit does not download the module (i.e. numpy, torch, prettytable)
# Therefore, we have to ignore missing imports
reportMissingImports = "none"
# This is required to ignore for type checks of modules with stubs missing.
reportMissingModuleSource = "none" # -> most common: prettytable in mdp managers
reportGeneralTypeIssues = "none" # -> raises 218 errors (usage of literal MISSING in dataclasses)
reportOptionalMemberAccess = "warning" # -> raises 8 errors
reportPrivateUsage = "warning"
| 1,823 |
TOML
| 20.458823 | 103 | 0.633022 |
mrsbCN/orbit.dexterous_hand/setup.py
|
"""Installation script for the 'orbit.ext_template' python package."""
import os
import toml
from setuptools import setup
# Obtain the extension data from the extension.toml file
EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
# Read the extension.toml file
EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# NOTE: Add dependencies
"psutil",
]
# Installation operation
setup(
# TODO: Change your package naming
# -----------------------------------------------------------------
name="orbit.ext_template",
packages=["orbit.ext_template"],
# -----------------------------------------------------------------
author=EXTENSION_TOML_DATA["package"]["author"],
maintainer=EXTENSION_TOML_DATA["package"]["maintainer"],
maintainer_email=EXTENSION_TOML_DATA["package"]["maintainer_email"],
url=EXTENSION_TOML_DATA["package"]["repository"],
version=EXTENSION_TOML_DATA["package"]["version"],
description=EXTENSION_TOML_DATA["package"]["description"],
keywords=EXTENSION_TOML_DATA["package"]["keywords"],
install_requires=INSTALL_REQUIRES,
license="BSD-3-Clause",
include_package_data=True,
python_requires=">=3.10",
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.10",
"Isaac Sim :: 2023.1.0-hotfix.1",
"Isaac Sim :: 2023.1.1",
],
zip_safe=False,
)
| 1,524 |
Python
| 32.888888 | 89 | 0.622703 |
mrsbCN/orbit.dexterous_hand/README.md
|
# Extension Template for Orbit
[](https://docs.omniverse.nvidia.com/isaacsim/latest/overview.html)
[](https://isaac-orbit.github.io/orbit/)
[](https://docs.python.org/3/whatsnew/3.10.html)
[](https://releases.ubuntu.com/20.04/)
[](https://pre-commit.com/)
[](https://opensource.org/license/mit)
## Overview
This repository serves as a template for building projects or extensions based on Orbit. It allows you to develop in an isolated environment, outside of the core Orbit repository.
- **Project Template**
Ensures access to `Isaac Sim` and `Orbit` functionalities, which can be used as a project template.
- **Omniverse Extension**
Can be used as an Omniverse extension, ideal for projects that leverage the Omniverse platform's graphical user interface.
**Key Features:**
- `Isolation` Work outside the core Orbit repository, ensuring that your development efforts remain self-contained.
- `Flexibility` This template is set up to allow your code to be run as an extension in Omniverse.
**Keywords:** extension, template, orbit
### License
The source code is released under a [BSD 3-Clause license](https://opensource.org/licenses/BSD-3-Clause).
**Author: The ORBIT Project Developers<br />
Affiliation: [The AI Institute](https://theaiinstitute.com/)<br />
Maintainer: Nico Burger, [email protected]**
## Setup
Depending on the use case defined [above](#overview), follow the instructions to set up your extension template. Start with the [Basic Setup](#basic-setup), which is required for either use case.
### Basic Setup
#### Dependencies
This template depends on Isaac Sim and Orbit. For detailed instructions on how to install these dependencies, please refer to the [installation guide](https://isaac-orbit.github.io/orbit/source/setup/installation.html).
- [Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html)
- [Orbit](https://isaac-orbit.github.io/orbit/)
#### Configuration
Decide on a name for your project or extension. This guide will refer to this name as `<your_extension_name>`.
- Create a new repository based off this template [here](https://github.com/new?owner=isaac-orbit&template_name=orbit.ext_template&template_owner=isaac-orbit). Name your forked repository using the following convention: `"orbit.<your_extension_name>"`.
- Clone your forked repository to a location **outside** the orbit repository.
```bash
git clone <your_repository_url>
```
- To tailor this template to your needs, customize the settings in `config/extension.toml` and `setup.py` by completing the sections marked with TODO.
- Rename your source folder.
```bash
cd orbit.<your_extension_name>
mv orbit/ext_template orbit/<your_extension_name>
```
- Define the following environment variable to specify the path to your Orbit installation:
```bash
# Set the ORBIT_PATH environment variable to point to your Orbit installation directory
export ORBIT_PATH=<your_orbit_path>
```
#### Set Python Interpreter
Although using a virtual environment is optional, we recommend using `conda` (detailed instructions [here](https://isaac-orbit.github.io/orbit/source/setup/installation.html#setting-up-the-environment)). If you decide on using Isaac Sim's bundled Python, you can skip these steps.
- If you haven't already: create and activate your `conda` environment, followed by installing extensions inside Orbit:
```bash
# Create conda environment
${ORBIT_PATH}/orbit.sh --conda
# Activate conda environment
conda activate orbit
# Install all Orbit extensions in orbit/source/extensions
${ORBIT_PATH}/orbit.sh --install
```
- Set your `conda` environment as the default interpreter in VSCode by opening the command palette (`Ctrl+Shift+P`), choosing `Python: Select Interpreter` and selecting your `conda` environment.
Once you are in the virtual environment, you do not need to use `${ORBIT_PATH}/orbit.sh -p` to run python scripts. You can use the default python executable in your environment by running `python` or `python3`. However, for the rest of the documentation, we will assume that you are using `${ORBIT_PATH}/orbit.sh -p` to run python scripts.
#### Set up IDE
To setup the IDE, please follow these instructions:
1. Open the `orbit.<your_extension_template>` directory on Visual Studio Code IDE
2. Run VSCode Tasks, by pressing `Ctrl+Shift+P`, selecting `Tasks: Run Task` and running the `setup_python_env` in the drop down menu. When running this task, you will be prompted to add the absolute path to your Orbit installation.
If everything executes correctly, it should create a file .python.env in the .vscode directory. The file contains the python paths to all the extensions provided by Isaac Sim and Omniverse. This helps in indexing all the python modules for intelligent suggestions while writing code.
### Setup as Project Template
From within this repository, install your extension as a Python package to the Isaac Sim Python executable.
```bash
${ORBIT_PATH}/orbit.sh -p -m pip install --upgrade pip
${ORBIT_PATH}/orbit.sh -p -m pip install -e .
```
### Setup as Omniverse Extension
To enable your extension, follow these steps:
1. **Add the search path of your repository** to the extension manager:
- Navigate to the extension manager using `Window` -> `Extensions`.
- Click on the **Hamburger Icon** (☰), then go to `Settings`.
- In the `Extension Search Paths`, enter the path that goes up to your repository's location without actually including the repository's own directory. For example, if your repository is located at `/home/code/orbit.ext_template`, you should add `/home/code` as the search path.
- If not already present, in the `Extension Search Paths`, enter the path that leads to your local Orbit directory. For example: `/home/orbit/source/extensions`
- Click on the **Hamburger Icon** (☰), then click `Refresh`.
2. **Search and enable your extension**:
- Find your extension under the `Third Party` category.
- Toggle it to enable your extension.
### Installing Dependencies
To ensure that your program works as expected, please add your extensions's dependencies to the appropriate configuration file. Below are links for how to specify extension dependencies on ``IsaacSim`` and ``Orbit`` extensions, ``pip`` packages, ``apt`` packages, and [rosdep packages](https://docs.ros.org/en/humble/Tutorials/Intermediate/Rosdep.html).
- [Extensions](https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/extensions_advanced.html#dependencies-section)
- [pip packages](https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires)
- [apt packages](https://isaac-orbit.github.io/orbit/source/setup/developer.html#extension-dependency-management)
- [rosdep packages](https://isaac-orbit.github.io/orbit/source/setup/developer.html#extension-dependency-management)
## Usage
### Project Template
We provide an example for training and playing a policy for ANYmal on flat terrain. Install [RSL_RL](https://github.com/leggedrobotics/rsl_rl) outside of the orbit repository, e.g. `home/code/rsl_rl`.
```bash
git clone https://github.com/leggedrobotics/rsl_rl.git
cd rsl_rl
${ORBIT_PATH}/orbit.sh -p -m pip install -e .
```
Train a policy.
```bash
cd <path_to_your_extension>
${ORBIT_PATH}/orbit.sh -p scripts/rsl_rl/train.py --task Template-Velocity-Flat-Anymal-D-v0 --num_envs 4096 --headless
```
Play the trained policy.
```bash
${ORBIT_PATH}/orbit.sh -p scripts/rsl_rl/play.py --task Template-Velocity-Flat-Anymal-D-Play-v0 --num_envs 16
```
### Omniverse Extension
We provide an example UI extension that will load upon enabling your extension defined in `orbit/ext_template/ui_extension_example.py`. For more information on UI extensions, enable and check out the source code of the `omni.isaac.ui_template` extension and refer to the introduction on [Isaac Sim Workflows 1.2.3. GUI](https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/tutorial_intro_workflows.html#gui).
## Pre-Commit
Pre-committing involves using a framework to automate the process of enforcing code quality standards before code is actually committed to a version control system, like Git. This process involves setting up hooks that run automated checks, such as code formatting, linting (checking for programming errors, bugs, stylistic errors, and suspicious constructs), and running tests. If these checks pass, the commit is allowed; if not, the commit is blocked until the issues are resolved. This ensures that all code committed to the repository adheres to the defined quality standards, leading to a cleaner, more maintainable codebase. To do so, we use the [pre-commit](https://pre-commit.com/) module. Install the module using:
```bash
pip install pre-commit
```
Run the pre-commit with:
```bash
pre-commit run --all-files
```
## Finalize
You are all set and no longer need the template instructions
- The `orbit/ext_template` and `scripts/rsl_rl` directories act as a reference template for your convenience. Delete them if no longer required.
- When ready, use this `README.md` as a template and customize where appropriate.
## Docker
For docker usage, we require the following dependencies to be set up:
- [Docker and Docker Compose](https://isaac-orbit.github.io/orbit/source/deployment/docker.html#docker-and-docker-compose)
- [Isaac Sim Container](https://isaac-orbit.github.io/orbit/source/deployment/docker.html#obtaining-the-isaac-sim-container)
Clone this template into the `${ORBIT_PATH}/source/extensions` directory, and set it up as described
above in [Configuration](#configuration) (no other steps in Setup section required). Once done, start and enter your
container with:
```bash
# start container
${ORBIT_PATH}/docker/container.sh start
# enter container
${ORBIT_PATH}/docker/container.sh enter
```
More information on working with Docker in combination with Orbit can be found [here](https://isaac-orbit.github.io/orbit/source/deployment/index.html).
## Troubleshooting
### Docker Container
When running within a docker container, the following error has been encountered: `ModuleNotFoundError: No module named 'orbit'`. To mitigate, please comment out the docker specific environment definitions in `.vscode/launch.json` and run the following:
```bash
echo -e "\nexport PYTHONPATH=\$PYTHONPATH:/workspace/orbit.<your_extension_name>" >> ~/.bashrc
source ~/.bashrc
```
## Bugs & Feature Requests
Please report bugs and request features using the [Issue Tracker](https://github.com/isaac-orbit/orbit.ext_template/issues).
| 11,013 |
Markdown
| 47.519824 | 724 | 0.763734 |
mrsbCN/orbit.dexterous_hand/scripts/rsl_rl/play.py
|
"""Script to play a checkpoint if an RL agent from RSL-RL."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# local imports
import cli_args # isort: skip
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
import torch
from rsl_rl.runners import OnPolicyRunner
import omni.isaac.orbit_tasks # noqa: F401
from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlVecEnvWrapper,
export_policy_as_onnx,
)
# Import extensions to set up environment tasks
import orbit.ext_template.tasks # noqa: F401 TODO: import orbit.<your_extension_name>
def main():
"""Play with RSL-RL agent."""
# parse configuration
env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs)
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli)
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg)
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)
# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Loading experiment from directory: {log_root_path}")
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# load previously trained model
ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
ppo_runner.load(resume_path)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# obtain the trained policy for inference
policy = ppo_runner.get_inference_policy(device=env.unwrapped.device)
# export policy to onnx
export_model_dir = os.path.join(os.path.dirname(resume_path), "exported")
export_policy_as_onnx(ppo_runner.alg.actor_critic, export_model_dir, filename="policy.onnx")
# reset environment
obs, _ = env.get_observations()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# agent stepping
actions = policy(obs)
# env stepping
obs, _, _, _ = env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main execution
main()
# close sim app
simulation_app.close()
| 3,360 |
Python
| 32.949495 | 101 | 0.704762 |
mrsbCN/orbit.dexterous_hand/scripts/rsl_rl/cli_args.py
|
from __future__ import annotations
import argparse
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg
def add_rsl_rl_args(parser: argparse.ArgumentParser):
"""Add RSL-RL arguments to the parser.
Args:
parser: The parser to add the arguments to.
"""
# create a new argument group
arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.")
# -- experiment arguments
arg_group.add_argument(
"--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored."
)
arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.")
# -- load arguments
arg_group.add_argument("--resume", type=bool, default=None, help="Whether to resume from a checkpoint.")
arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.")
arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.")
# -- logger arguments
arg_group.add_argument(
"--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use."
)
arg_group.add_argument(
"--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune."
)
def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg:
"""Parse configuration for RSL-RL agent based on inputs.
Args:
task_name: The name of the environment.
args_cli: The command line arguments.
Returns:
The parsed configuration for RSL-RL agent based on inputs.
"""
from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry
# load the default configuration
rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point")
# override the default configuration with CLI arguments
if args_cli.seed is not None:
rslrl_cfg.seed = args_cli.seed
if args_cli.resume is not None:
rslrl_cfg.resume = args_cli.resume
if args_cli.load_run is not None:
rslrl_cfg.load_run = args_cli.load_run
if args_cli.checkpoint is not None:
rslrl_cfg.load_checkpoint = args_cli.checkpoint
if args_cli.run_name is not None:
rslrl_cfg.run_name = args_cli.run_name
if args_cli.logger is not None:
rslrl_cfg.logger = args_cli.logger
# set the project name for wandb and neptune
if rslrl_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name:
rslrl_cfg.wandb_project = args_cli.log_project_name
rslrl_cfg.neptune_project = args_cli.log_project_name
return rslrl_cfg
| 2,858 |
Python
| 39.842857 | 117 | 0.686494 |
mrsbCN/orbit.dexterous_hand/scripts/rsl_rl/train.py
|
"""Script to train RL agent with RSL-RL."""
"""Launch Isaac Sim Simulator first."""
import argparse
import os
from omni.isaac.orbit.app import AppLauncher
# local imports
import cli_args # isort: skip
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import os
import torch
from datetime import datetime
from rsl_rl.runners import OnPolicyRunner
import omni.isaac.orbit_tasks # noqa: F401
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.utils.dict import print_dict
from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml
from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper
# Import extensions to set up environment tasks
import orbit.ext_template.tasks # noqa: F401 TODO: import orbit.<your_extension_name>
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def main():
"""Train with RSL-RL agent."""
# parse configuration
env_cfg: RLTaskEnvCfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs)
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli)
# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Logging experiment in directory: {log_root_path}")
# specify directory for logging runs: {time-stamp}_{run_name}
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if agent_cfg.run_name:
log_dir += f"_{agent_cfg.run_name}"
log_dir = os.path.join(log_root_path, log_dir)
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
# wrap for video recording
if args_cli.video:
video_kwargs = {
"video_folder": os.path.join(log_dir, "videos"),
"step_trigger": lambda step: step % args_cli.video_interval == 0,
"video_length": args_cli.video_length,
"disable_logger": True,
}
print("[INFO] Recording videos during training.")
print_dict(video_kwargs, nesting=4)
env = gym.wrappers.RecordVideo(env, **video_kwargs)
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)
# create runner from rsl-rl
runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
# write git state to logs
runner.add_git_repo_to_log(__file__)
# save resume path before creating a new log_dir
if agent_cfg.resume:
# get path to previous checkpoint
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# load previously trained model
runner.load(resume_path)
# set seed of the environment
env.seed(agent_cfg.seed)
# dump the configuration into log-directory
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg)
# run training
runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main execution
main()
# close sim app
simulation_app.close()
| 4,801 |
Python
| 38.360655 | 117 | 0.70277 |
mrsbCN/orbit.dexterous_hand/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.0"
# Description
category = "orbit"
readme = "README.md"
# TODO: Change your package specifications
# -----------------------------------------------------------------
title = "Extension Template"
author = "ORBIT Project Developers"
maintainer = "Nico Burger"
maintainer_email = "[email protected]"
description="Extension Template for Orbit"
repository = "https://github.com/isaac-orbit/orbit.ext_template.git"
keywords = ["extension", "template", "orbit"]
# -----------------------------------------------------------------
[dependencies]
"omni.isaac.orbit" = {}
"omni.isaac.orbit_assets" = {}
"omni.isaac.orbit_tasks" = {}
# NOTE: Add additional dependencies here
[[python.module]]
# TODO: Change your package name
# -----------------------------------------------------------------
name = "orbit.ext_template"
# -----------------------------------------------------------------
[orbit_settings]
# TODO: Uncomment and list any apt dependencies here.
# If none, leave it commented out.
# apt_deps = ["example_package"]
# TODO: Uncomment and provide path to a ros_ws
# with rosdeps to be installed. If none,
# leave it commented out.
# ros_ws = "path/from/extension_root/to/ros_ws"
| 1,291 |
TOML
| 31.299999 | 68 | 0.564679 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/__init__.py
|
"""
Python module serving as a project/extension template.
"""
# Register Gym environments.
from .tasks import *
# Register UI extensions.
from .ui_extension_example import *
| 177 |
Python
| 16.799998 | 54 | 0.745763 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/ui_extension_example.py
|
import omni.ext
import omni.ui as ui
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[orbit.ext_template] some_public_function was called with x: ", x)
return x**x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class ExampleExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[orbit.ext_template] startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self._count += 1
label.text = f"count: {self._count}"
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Add", clicked_fn=on_click)
ui.Button("Reset", clicked_fn=on_reset)
def on_shutdown(self):
print("[orbit.ext_template] shutdown")
| 1,527 |
Python
| 34.534883 | 119 | 0.599214 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/__init__.py
|
"""Package containing task implementations for various robotic environments."""
import os
import toml
# Conveniences to other module directories via relative paths
ORBIT_TASKS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
"""Path to the extension source directory."""
ORBIT_TASKS_METADATA = toml.load(os.path.join(ORBIT_TASKS_EXT_DIR, "config", "extension.toml"))
"""Extension metadata dictionary parsed from the extension.toml file."""
# Configure the module-level variables
__version__ = ORBIT_TASKS_METADATA["package"]["version"]
##
# Register Gym environments.
##
from omni.isaac.orbit_tasks.utils import import_packages
# The blacklist is used to prevent importing configs from sub-packages
_BLACKLIST_PKGS = ["utils"]
# Import all configs in this package
import_packages(__name__, _BLACKLIST_PKGS)
| 845 |
Python
| 31.53846 | 95 | 0.745562 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/__init__.py
|
"""Locomotion environments for legged robots."""
from .velocity import * # noqa
| 82 |
Python
| 19.749995 | 48 | 0.719512 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/velocity_env_cfg.py
|
from __future__ import annotations
import math
from dataclasses import MISSING
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RandomizationTermCfg as RandTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns
from omni.isaac.orbit.terrains import TerrainImporterCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise
import orbit.ext_template.tasks.locomotion.velocity.mdp as mdp
##
# Pre-defined configs
##
from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=5,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
project_uvw=True,
),
debug_vis=False,
)
# robots
robot: ArticulationCfg = MISSING
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=False,
mesh_prim_paths=["/World/ground"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
sky_light = AssetBaseCfg(
prim_path="/World/skyLight",
spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command specifications for the MDP."""
base_velocity = mdp.UniformVelocityCommandCfg(
asset_name="robot",
resampling_time_range=(10.0, 10.0),
rel_standing_envs=0.02,
rel_heading_envs=1.0,
heading_command=True,
heading_control_stiffness=0.5,
debug_vis=True,
ranges=mdp.UniformVelocityCommandCfg.Ranges(
lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi)
),
)
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1))
base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2))
projected_gravity = ObsTerm(
func=mdp.projected_gravity,
noise=Unoise(n_min=-0.05, n_max=0.05),
)
velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"})
joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5))
actions = ObsTerm(func=mdp.last_action)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
noise=Unoise(n_min=-0.1, n_max=0.1),
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class RandomizationCfg:
"""Configuration for randomization."""
# startup
physics_material = RandTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.8, 0.8),
"dynamic_friction_range": (0.6, 0.6),
"restitution_range": (0.0, 0.0),
"num_buckets": 64,
},
)
add_base_mass = RandTerm(
func=mdp.add_body_mass,
mode="startup",
params={"asset_cfg": SceneEntityCfg("robot", body_names="base"), "mass_range": (-5.0, 5.0)},
)
# reset
base_external_force_torque = RandTerm(
func=mdp.apply_external_force_torque,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", body_names="base"),
"force_range": (0.0, 0.0),
"torque_range": (-0.0, 0.0),
},
)
reset_base = RandTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.5, 0.5),
"y": (-0.5, 0.5),
"z": (-0.5, 0.5),
"roll": (-0.5, 0.5),
"pitch": (-0.5, 0.5),
"yaw": (-0.5, 0.5),
},
},
)
reset_robot_joints = RandTerm(
func=mdp.reset_joints_by_scale,
mode="reset",
params={
"position_range": (0.5, 1.5),
"velocity_range": (0.0, 0.0),
},
)
# interval
push_robot = RandTerm(
func=mdp.push_by_setting_velocity,
mode="interval",
interval_range_s=(10.0, 15.0),
params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# -- task
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
track_ang_vel_z_exp = RewTerm(
func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
# -- penalties
lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0)
ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05)
dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5)
dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7)
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
feet_air_time = RewTerm(
func=mdp.feet_air_time,
weight=0.125,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"),
"command_name": "base_velocity",
"threshold": 0.5,
},
)
undesired_contacts = RewTerm(
func=mdp.undesired_contacts,
weight=-1.0,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0},
)
# -- optional penalties
flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0)
dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
terrain_levels = CurrTerm(func=mdp.terrain_levels_vel)
##
# Environment configuration
##
@configclass
class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
randomization: RandomizationCfg = RandomizationCfg()
curriculum: CurriculumCfg = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 0.005
self.sim.disable_contact_processing = True
self.sim.physics_material = self.scene.terrain.physics_material
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = self.decimation * self.sim.dt
if self.scene.contact_forces is not None:
self.scene.contact_forces.update_period = self.sim.dt
# check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator
# this generates terrains with increasing difficulty and is useful for training
if getattr(self.curriculum, "terrain_levels", None) is not None:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = True
else:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = False
| 10,526 |
Python
| 32.740385 | 118 | 0.625214 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/__init__.py
|
"""Locomotion environments with velocity-tracking commands.
These environments are based on the `legged_gym` environments provided by Rudin et al.
Reference:
https://github.com/leggedrobotics/legged_gym
"""
| 213 |
Python
| 25.749997 | 86 | 0.779343 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/mdp/__init__.py
|
"""This sub-module contains the functions that are specific to the locomotion environments."""
from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403
from .curriculums import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
| 247 |
Python
| 34.428567 | 94 | 0.728745 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/mdp/curriculums.py
|
"""Common functions that can be used to create curriculum for the learning environment.
The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable
the curriculum introduced by the function.
"""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.terrains import TerrainImporter
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def terrain_levels_vel(
env: RLTaskEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Curriculum based on the distance the robot walked when commanded to move at a desired velocity.
This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the
difficulty when the robot walks less than half of the distance required by the commanded velocity.
.. note::
It is only possible to use this term with the terrain type ``generator``. For further information
on different terrain types, check the :class:`omni.isaac.orbit.terrains.TerrainImporter` class.
Returns:
The mean terrain level for the given environment ids.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
terrain: TerrainImporter = env.scene.terrain
command = env.command_manager.get_command("base_velocity")
# compute the distance the robot walked
distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1)
# robots that walked far enough progress to harder terrains
move_up = distance > terrain.cfg.terrain_generator.size[0] / 2
# robots that walked less than half of their required distance go to simpler terrains
move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5
move_down *= ~move_up
# update terrain levels
terrain.update_env_origins(env_ids, move_up, move_down)
# return the mean terrain level
return torch.mean(terrain.terrain_levels.float())
| 2,253 |
Python
| 43.196078 | 112 | 0.742565 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/mdp/rewards.py
|
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors import ContactSensor
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def feet_air_time(env: RLTaskEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> torch.Tensor:
"""Reward long steps taken by the feet using L2-kernel.
This function rewards the agent for taking steps that are longer than a threshold. This helps ensure
that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of
the time for which the feet are in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids]
last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids]
reward = torch.sum((last_air_time - threshold) * first_contact, dim=1)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Reward long steps taken by the feet for bipeds.
This function rewards the agent for taking steps up to a specified threshold and also keep one foot at
a time in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids]
contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids]
in_contact = contact_time > 0.0
in_mode_time = torch.where(in_contact, contact_time, air_time)
single_stance = torch.sum(in_contact.int(), dim=1) == 1
reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0]
reward = torch.clamp(reward, max=threshold)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
| 2,472 |
Python
| 45.660376 | 119 | 0.716019 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/__init__.py
|
"""Configurations for velocity-based locomotion environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.
| 240 |
Python
| 47.199991 | 94 | 0.775 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/rough_env_cfg.py
|
from omni.isaac.orbit.utils import configclass
from orbit.ext_template.tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.anymal import ANYMAL_D_CFG # isort: skip
@configclass
class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to anymal-d
self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.randomization.base_external_force_torque = None
self.randomization.push_robot = None
| 1,485 |
Python
| 34.380952 | 103 | 0.682828 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py
|
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import AnymalDRoughEnvCfg
@configclass
class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.randomization.base_external_force_torque = None
self.randomization.push_robot = None
| 1,259 |
Python
| 31.307692 | 60 | 0.648133 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/__init__.py
|
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Template-Velocity-Flat-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Flat-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Rough-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Rough-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
| 1,351 |
Python
| 27.166666 | 77 | 0.680977 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py
|
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_d_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_d_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,294 |
Python
| 26.553191 | 58 | 0.636012 |
mrsbCN/orbit.dexterous_hand/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/__init__.py
|
from . import rsl_rl_cfg # noqa: F401, F403
| 45 |
Python
| 21.999989 | 44 | 0.666667 |
mrsbCN/orbit.dexterous_hand/docs/CHANGELOG.rst
|
Changelog
---------
0.1.0 (2024-01-29)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Created an initial template for building an extension or project based on Orbit
| 155 |
reStructuredText
| 13.181817 | 81 | 0.593548 |
abmoRobotics/MAPs/README.md
|
# Materials Acceleration Platforms: autonomous experimentation
# Dependencies
1. First install ROS 2 Foxy Fitzroy using [official toturial](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) for Ubuntu Focal.
2. Clone repository
```
git clone [email protected]:abmoRobotics/MAPs.git
```
3.
```
source /opt/ros/foxy/setup.bash
```
4. Install the required dependencies
```
cd MAPs/ros2_ws/
```
```
rosdep install --from-paths src -y
```
# How to run
| 476 |
Markdown
| 14.9 | 150 | 0.72479 |
abmoRobotics/MAPs/ros2_ws/asdf.py
|
import heapq
class ValueIDManager:
"""
A class that manages a set of values and their corresponding IDs.
The IDs are integers that are assigned to values in the order they are added.
The class also keeps track of IDs that have been removed, and reuses them when new values are added.
args: None
methods:
remove_by_value(value): Removes a value from the database.
remove_by_id(id_): Removes a value from the database.
add_value(value): Adds a value to the database.
get_id(value): Returns the ID of a value.
get_value(id_): Returns the value of an ID.
sync_array(arr): Adds all values in an array to the database, and removes all values that are not in the array.
"""
def __init__(self):
self.id_to_value = {}
self.value_to_id = {}
self.available_ids = []
self.index_to_entry = {}
self.next_id = 0
def remove_by_id(self, id_):
if id_ in self.id_to_value:
del_value = self.id_to_value[id_]
heapq.heappush(self.available_ids, id_)
del self.id_to_value[id_]
del self.value_to_id[del_value]
print(f"ID: {id_} removed.")
else:
print(f"No such ID: {id_} in the database.")
def add_value(self, value):
if value in self.value_to_id:
print(f"Value: {value} already exists.")
else:
if self.available_ids:
new_id = heapq.heappop(self.available_ids)
else:
new_id = self.next_id
self.next_id += 1
self.id_to_value[new_id] = value
self.value_to_id[value] = new_id
self.index_to_entry[len(self.index_to_entry)] = (value, new_id) # update new dict
print(f"Value: {value} added with ID: {new_id}.")
def remove_by_value(self, value):
if value in self.value_to_id:
del_id = self.value_to_id[value]
del_index = list(self.id_to_value.keys()).index(del_id)
heapq.heappush(self.available_ids, del_id)
del self.value_to_id[value]
del self.id_to_value[del_id]
#print(self.index_to_entry)
#del self.index_to_entry[del_index] # remove from new dict
for index in range(del_index, len(self.index_to_entry) - 1):
self.index_to_entry[index] = self.index_to_entry[index + 1]
# Delete last entry after shifting
del self.index_to_entry[len(self.index_to_entry) - 1]
print(f"Value: {value} removed.")
return del_index
else:
print(f"No such value: {value} in the database.")
return None
def get_id(self, value):
return self.value_to_id.get(value, "No such value in the database.")
def get_value(self, id_):
return self.id_to_value.get(id_, "No such ID in the database.")
def sync_array(self, arr):
removed_ids = []
for value in arr:
if value not in self.value_to_id:
self.add_value(value)
for value in list(self.value_to_id.keys()): # create a copy of keys to avoid runtime error
if value not in arr:
removed_id = self.remove_by_value(value)
if removed_id is not None:
removed_ids.append(removed_id)
return removed_ids
# Initialize manager
value_id_manager = ValueIDManager()
# Add value
value_id_manager.add_value(3.3)
# Add another value
value_id_manager.add_value(5.5)
# Sync with new array
value_id_manager.sync_array([3.3, 5.5, 7.7, 0.7, 0.0, 7.0, 7.3, 0.3])
# Check state after sync
print(value_id_manager.id_to_value)
print(value_id_manager.value_to_id)
value_id_manager.remove_by_value(7.0)
print(value_id_manager.id_to_value)
print(value_id_manager.value_to_id)
print(value_id_manager.available_ids)
value_id_manager.remove_by_id(1)
print(value_id_manager.available_ids)
value_id_manager.add_value(11.0)
print(value_id_manager.available_ids)
print(value_id_manager.id_to_value)
print(value_id_manager.value_to_id)
# Remove extras not in new array
print(value_id_manager.index_to_entry)
print(value_id_manager.sync_array([3.3, 5.5, 7.7, 0.7, 0.0, 7.0, 7.3,11.0]))
print(value_id_manager.index_to_entry)
| 4,347 |
Python
| 34.064516 | 119 | 0.599034 |
abmoRobotics/MAPs/ros2_ws/plot_time_functions3.py
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import pandas as pd
# Define the additional functions as shown in the previous response
def linear_decay_priority(task_deadline_seconds, total_time_seconds):
time_remaining = task_deadline_seconds
return total_time_seconds - time_remaining
def logistic_decay_priority(task_deadline_seconds, midpoint_seconds, steepness):
time_remaining = task_deadline_seconds
return -1 / (1 + math.exp(-steepness * (time_remaining - midpoint_seconds))) + 1
def time_remaining_squared_priority(task_deadline_seconds):
time_remaining = task_deadline_seconds
return time_remaining ** 2
def harmonic_priority(task_creation_seconds, task_deadline_seconds, alpha=1):
time_remaining = task_deadline_seconds - task_creation_seconds
return (time_remaining + alpha) / (task_deadline_seconds + alpha)
def inverse_time_remaining_priority(task_deadline_seconds, epsilon=1e-9):
time_remaining = task_deadline_seconds
return 1 / (time_remaining + epsilon)
# Create a range of input seconds
input_seconds = np.arange(0, 200, 1)
# Calculate priority scores for each function
linear_decay_scores = [linear_decay_priority(x, 200) for x in input_seconds]
logistic_decay_scores = [logistic_decay_priority(x, 100, 0.06) for x in input_seconds]
time_remaining_squared_scores = [time_remaining_squared_priority(x) for x in input_seconds]
harmonic_priority_scores = [harmonic_priority(36, x) for x in input_seconds]
inverse_time_remaining_scores = [inverse_time_remaining_priority(x) for x in input_seconds]
# # Create a dictionary to store the functions and their scores
# function_dict = {"Linear Decay": linear_decay_scores,
# "Logistic Decay": logistic_decay_scores,
# "Time Remaining Squared": time_remaining_squared_scores,
# "Harmonic Priority": harmonic_priority_scores,
# "Inverse Time Remaining": inverse_time_remaining_scores}
# # Create separate plots for each function
# fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14, 16))
# axes = axes.flatten()
# for idx, (function_name, scores) in enumerate(function_dict.items()):
# ax = axes[idx]
# sns.lineplot(x=input_seconds, y=scores, ax=ax)
# ax.set_title(function_name)
# ax.set_xlabel("Input Seconds")
# ax.set_ylabel("Priority Score")
# # Remove the extra empty subplot
# axes[-1].axis("off")
# plt.tight_layout()
# plt.show()
# Combine the input seconds and priority scores into a single dataset
data = np.column_stack((input_seconds, linear_decay_scores, logistic_decay_scores,
time_remaining_squared_scores, harmonic_priority_scores, inverse_time_remaining_scores))
columns = ["Input Seconds", "Linear Decay", "Logistic Decay", "Time Remaining Squared",
"Harmonic Priority", "Inverse Time Remaining"]
data = pd.DataFrame(data, columns=columns)
# Melt the dataset into a long format suitable for Seaborn line plots
data_melted = data.melt(id_vars="Input Seconds", var_name="Function", value_name="Priority Score")
# Plot the functions using Seaborn's relplot
g = sns.relplot(data=data_melted, x="Input Seconds", y="Priority Score", col="Function", kind="line", col_wrap=2, facet_kws={'ylim': (-0.1, 1.1)})
g.fig.subplots_adjust(top=0.9) # Adjust the top of the subplots to create space for the title
g.fig.suptitle("Additional Temporal Priority Score Functions")
plt.show()
| 3,473 |
Python
| 41.365853 | 146 | 0.720703 |
abmoRobotics/MAPs/ros2_ws/tester.py
|
#api_key = 'apk-14166daf4872c6537ffc7e1b30afacdfc519606b7eefd932c5ad4ab5d9693f480b1a254e31ff6781937a14acc1fa4e7410d0460299a4ec3d2681fe801f79f1f81912b3826ca0cfe0c596eeb683ec32eb'
api_key = 'apk-110d39e19561e2e1538f8cb18288c7543ccfbd5f6d2881f2708c64401d0f620a302f360f2123e2b9f867f56a7c5c4479a5d07c540b32d77172c5f572d8488192878aa2be3e15dd73407656a9a2d7dbf5'
from rxn4chemistry import RXN4ChemistryWrapper
import time
rxn4chemistry_wrapper = RXN4ChemistryWrapper(api_key=api_key)
#rxn4chemistry_wrapper.list_all_projects()
#rxn4chemistry_wrapper.set_project('5f9b3e2a0c3e4f0001d8e4e4')
#print(rxn4chemistry_wrapper.list_all_projects()['response']['payload']['content'][0]['name'])
projects = rxn4chemistry_wrapper.list_all_projects()['response']['payload']['content']
time.sleep(1)
for project in projects:
if project['name'] == 'MAPs':
rxn4chemistry_wrapper.set_project(project['id'])
print(project['name'])
print(rxn4chemistry_wrapper.project_id)
time.sleep(1)
response = rxn4chemistry_wrapper.predict_automatic_retrosynthesis(
'CCO'
)
time.sleep(1)
print(response)
| 1,092 |
Python
| 35.433332 | 177 | 0.818681 |
abmoRobotics/MAPs/ros2_ws/README.md
|
## ROS2 workspace
| 18 |
Markdown
| 8.499996 | 17 | 0.722222 |
abmoRobotics/MAPs/ros2_ws/plot_time_functions.py
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import pandas as pd
# Define the functions as shown in the previous response
# Time remaining until deadline
def time_remaining_priority(task_deadline_seconds):
return task_deadline_seconds
# Elapsed time since creation
def elapsed_time_priority(task_creation_seconds):
return -task_creation_seconds
# Proportional urgency (time remaining divided by total time allotted)
def proportional_urgency_priority(task_creation_seconds, task_deadline_seconds):
time_remaining = task_deadline_seconds - task_creation_seconds
total_time_allotted = task_deadline_seconds
return time_remaining / total_time_allotted
# Weighted combination of time remaining and elapsed time
def weighted_combination_priority(task_creation_seconds, task_deadline_seconds, weight=0.2):
time_remaining = task_deadline_seconds - task_creation_seconds
elapsed_time = task_creation_seconds
return weight * time_remaining - (1 - weight) * elapsed_time
# Exponential decay of importance
def exponential_decay_priority(task_deadline_seconds, half_life_seconds=20):
decay_rate = math.log(2) / half_life_seconds
return math.exp(-decay_rate * task_deadline_seconds)
# Create a range of input seconds
input_seconds = np.arange(0, 200, 1)
# Calculate priority scores for each function
time_remaining_scores = [time_remaining_priority(x) for x in input_seconds]
elapsed_time_scores = [elapsed_time_priority(x) for x in input_seconds]
proportional_urgency_scores = [proportional_urgency_priority(x, 200) for x in input_seconds]
weighted_combination_scores = [weighted_combination_priority(x, 200) for x in input_seconds]
exponential_decay_scores = [exponential_decay_priority(x) for x in input_seconds]
# # Combine the input seconds and priority scores into a single dataset
# data = np.column_stack((input_seconds, time_remaining_scores, elapsed_time_scores,
# proportional_urgency_scores, weighted_combination_scores, exponential_decay_scores))
# columns = ["Input Seconds", "Time Remaining", "Elapsed Time", "Proportional Urgency",
# "Weighted Combination", "Exponential Decay"]
# data = pd.DataFrame(data, columns=columns)
# # Melt the dataset into a long format suitable for Seaborn line plots
# data_melted = data.melt(id_vars="Input Seconds", var_name="Function", value_name="Priority Score")
# # Plot the functions using Seaborn line plot
# plt.figure(figsize=(10, 6))
# sns.lineplot(data=data_melted, x="Input Seconds", y="Priority Score", hue="Function")
# plt.title("Temporal Priority Score Functions")
# plt.show()
# Create a dictionary to store the functions and their scores
function_dict = {"Time Remaining": time_remaining_scores,
"Elapsed Time": elapsed_time_scores,
"Proportional Urgency": proportional_urgency_scores,
"Weighted Combination": weighted_combination_scores,
"Exponential Decay": exponential_decay_scores}
# Create separate plots for each function
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14, 16))
axes = axes.flatten()
sns.set_theme(style="darkgrid")
for idx, (function_name, scores) in enumerate(function_dict.items()):
ax = axes[idx]
sns.lineplot(x=input_seconds, y=scores, ax=ax)
ax.set_title(function_name)
ax.set_xlabel("Input Seconds")
ax.set_ylabel("Priority Score")
# Remove the extra empty subplot
axes[-1].axis("off")
plt.tight_layout()
plt.show()
| 3,518 |
Python
| 41.914634 | 110 | 0.737635 |
abmoRobotics/MAPs/ros2_ws/plot_time_functions2.py
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import pandas as pd
# Define the additional functions as shown in the previous response
def linear_decay_priority(task_deadline_seconds, total_time_seconds):
time_remaining = task_deadline_seconds
return total_time_seconds - time_remaining
def logistic_decay_priority(task_deadline_seconds, midpoint_seconds, steepness):
time_remaining = task_deadline_seconds
return -1 / (1 + math.exp(-steepness * (time_remaining - midpoint_seconds))) + 1
def time_remaining_squared_priority(task_deadline_seconds):
time_remaining = task_deadline_seconds
return time_remaining ** 2
def harmonic_priority(task_creation_seconds, task_deadline_seconds, alpha=1):
time_remaining = task_deadline_seconds - task_creation_seconds
return (time_remaining + alpha) / (task_deadline_seconds + alpha)
def inverse_time_remaining_priority(task_deadline_seconds, epsilon=1e-9):
time_remaining = task_deadline_seconds
return 1 / (time_remaining + epsilon)
# Create a range of input seconds
input_seconds = np.arange(0, 200, 1)
# Calculate priority scores for each function
linear_decay_scores = [linear_decay_priority(x, 200) for x in input_seconds]
logistic_decay_scores = [logistic_decay_priority(x, 100, 0.06) for x in input_seconds]
time_remaining_squared_scores = [time_remaining_squared_priority(x) for x in input_seconds]
harmonic_priority_scores = [harmonic_priority(36, x) for x in input_seconds]
inverse_time_remaining_scores = [inverse_time_remaining_priority(x) for x in input_seconds]
# Create a dictionary to store the functions and their scores
function_dict = {"Linear Decay": linear_decay_scores,
"Logistic Decay": logistic_decay_scores,
"Time Remaining Squared": time_remaining_squared_scores,
"Harmonic Priority": harmonic_priority_scores,
"Inverse Time Remaining": inverse_time_remaining_scores}
# Create separate plots for each function
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14, 16))
axes = axes.flatten()
for idx, (function_name, scores) in enumerate(function_dict.items()):
ax = axes[idx]
sns.lineplot(x=input_seconds, y=scores, ax=ax)
ax.set_title(function_name)
ax.set_xlabel("Input Seconds")
ax.set_ylabel("Priority Score")
# Remove the extra empty subplot
axes[-1].axis("off")
plt.tight_layout()
plt.show()
| 2,445 |
Python
| 37.218749 | 91 | 0.729652 |
abmoRobotics/MAPs/ros2_ws/src/README.md
|
## Where all ROS2 packages are located
| 39 |
Markdown
| 18.999991 | 38 | 0.769231 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/setup.py
|
from setuptools import setup
import os
from glob import glob
package_name = 'orchestrator'
module_utils = 'utils'
setup(
name=package_name,
version='0.0.0',
packages=[package_name, module_utils],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.launch.py')),
(os.path.join('share', package_name, 'config'), glob('config/*.yaml')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Anton, Mads',
maintainer_email='[email protected], [email protected]',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'shuttle = orchestrator.shuttle:main',
'gui = orchestrator.pyside_gui:main',
'manipulator = orchestrator.manipulator:main',
'task_planner = orchestrator.task_planner:main',
'spawn_manager = orchestrator.spawn_manager:main',
],
},
)
| 1,161 |
Python
| 31.277777 | 79 | 0.615848 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/test/test_flake8.py
|
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, \
'Found %d code style errors / warnings:\n' % len(errors) + \
'\n'.join(errors)
| 884 |
Python
| 33.03846 | 74 | 0.725113 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/test/test_pep257.py
|
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=['.', 'test'])
assert rc == 0, 'Found code style errors / warnings'
| 803 |
Python
| 32.499999 | 74 | 0.743462 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/test/test_copyright.py
|
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
import pytest
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=['.', 'test'])
assert rc == 0, 'Found errors'
| 790 |
Python
| 31.958332 | 74 | 0.74557 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/launch/orchestrator.launch.py
|
from launch_ros.actions import Node
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, RegisterEventHandler, ExecuteProcess
from launch.substitutions import LaunchConfiguration
from launch.actions import IncludeLaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch_ros.substitutions import FindPackageShare
from launch.launch_description_sources import PythonLaunchDescriptionSource
import os
from utils import save_yaml
from launch.event_handlers import (OnExecutionComplete, OnProcessExit,
OnProcessIO, OnProcessStart, OnShutdown)
def generate_launch_description():
# True / False arguments
gui = LaunchConfiguration('gui')
isaac_sim = LaunchConfiguration('isaac_sim')
shuttle = LaunchConfiguration('sim_shuttle')
no_shuttles = LaunchConfiguration('num_of_shuttles')
manipulator = LaunchConfiguration('sim_manipulator')
no_manipulators = LaunchConfiguration('num_of_manipulators')
no_table = LaunchConfiguration('number_of_tabels')
manipulator_position = LaunchConfiguration('manipulator_position')
table_position = LaunchConfiguration('table_position')
global_parameters = os.path.join(
get_package_share_directory('acopos_bringup'),
'config',
'global_params.yaml'
)
config = os.path.join(
get_package_share_directory('orchestrator'),
'config',
'initial_position.yaml'
)
# Decalre arguments
gui_launch_arg = DeclareLaunchArgument(
'gui',
description="If you want a GUI with statistiscs and feedback.",
default_value='False',
choices=['True', 'False']
)
isaac_sim_launch_arg = DeclareLaunchArgument(
'isaac_sim',
description="If you want the simuleated environment therefore also no shuttles or manipulators.",
default_value='True',
choices=['True', 'False']
)
shuttle_launch_arg = DeclareLaunchArgument(
'sim_shuttle',
description="If you want the simulated environment with the shuttles.",
default_value='True',
choices=['True', 'False']
)
manipulator_launch_arg = DeclareLaunchArgument(
'sim_manipulator',
description="If you want the simulated environment with the manipulators.",
default_value='True',
choices=['True', 'False']
)
physical_launch_arg = DeclareLaunchArgument(
'use_physical_setup',
description="If you want a physical setup or not.",
default_value='False',
choices=['True', 'False']
)
num_of_shuttle_launch_arg = DeclareLaunchArgument(
'num_of_shuttles',
description="This will decalare the number of shuttles you want on the tabel.",
default_value="5"
)
num_of_manipulators_launch_arg = DeclareLaunchArgument(
'num_of_manipulators',
description="This will decalare the number of manipulators you want to the tabel.",
default_value="6"
)
num_of_tabels_launch_arg = DeclareLaunchArgument(
'num_of_tabels',
description="This will decalare the number of tabels you want.",
default_value="6"
)
manipulator_position_launch_arg = DeclareLaunchArgument(
'manipulator_position',
description="This will decalare the nmanipulators position.",
default_value="5"
)
table_position_launch_arg = DeclareLaunchArgument(
'table_position',
description="This will decalare the number of tabels you want.",
default_value="6"
)
num_of_tabels_launch_arg = DeclareLaunchArgument(
'number_of_tabels',
description="This will decalare the number of tabels you want.",
default_value="6"
)
shuttle_node = Node(
package='orchestrator',
namespace='shuttle',
executable='shuttle',
name='shuttle',
output="screen",
emulate_tty=True,
parameters=[
{"num_of_shuttles":no_shuttles},
{"sim_shuttle":shuttle}
]
)
manipulator_node = Node(
package='orchestrator',
namespace='manipulator',
executable='manipulator',
name='manipulator',
output="screen",
emulate_tty=True,
parameters=[config,
global_parameters,
{"num_of_manipulators":no_manipulators},
{"sim_manipulators":manipulator}
]
)
task_planner_node = Node(
package='orchestrator',
namespace='task_planner',
executable='task_planner',
name='sim'
)
gui_node = Node(
package='orchestrator',
executable='gui',
name='gui',
output="screen",
emulate_tty=True,
parameters=[config,
global_parameters,
{"num_of_shuttles":no_shuttles},
{"num_of_manipulators":no_manipulators},
{"num_of_tabels":no_table},
{"table_position":table_position},
{"robot_position":manipulator_position}
]
)
spawn_node = Node(
package='orchestrator',
executable='spawn_manager',
name='spawn_manager',
output="screen",
emulate_tty=True,
parameters=[])
return LaunchDescription([
IncludeLaunchDescription(
PythonLaunchDescriptionSource([
FindPackageShare("acopos_bringup"), '/launch', '/acopos.launch.py'])
),
gui_launch_arg,
isaac_sim_launch_arg,
shuttle_launch_arg,
manipulator_launch_arg,
num_of_shuttle_launch_arg,
num_of_manipulators_launch_arg,
manipulator_position_launch_arg,
num_of_tabels_launch_arg,
table_position_launch_arg,
spawn_node,
gui_node,
shuttle_node,
manipulator_node,
task_planner_node
# RegisterEventHandler(
# OnShutdown(
# on_shutdown=[ExecuteProcess(save_yaml(shuttle_node)
# )]
# )
# ),
])
| 6,253 |
Python
| 31.743455 | 105 | 0.610587 |
abmoRobotics/MAPs/ros2_ws/src/orchestrator/orchestrator/task_plannerAnton.py
|
import rclpy
from rclpy.node import Node
import time
import heapq
from typing import List
class Station():
"""Class for a single station"""
def __init__(self) -> None:
self.available = True
def get_position(self):
pass
def set_position(self):
pass
def get_availibility(self):
return self.available
def set_availibility(self, available: bool):
self.available = available
class Task():
"""Class for a single task"""
def __init__(self) -> None:
creation_time = time.time()
def get_creation_time(self):
return self.creation_time
class Shuttle():
"""Class for a single shuttle"""
def __init__(self) -> None:
self.available = True
def get_position(self):
pass
def set_position(self):
pass
def get_availibility(self):
return self.available
def set_availibility(self, available: bool):
self.available = available
class StationManager():
"""Class for keeping track of the stations"""
def __init__(self) -> None:
self.stations = []
def add_station(self, station: Station):
self.stations.append(station)
def remove_station(self, station: Station):
self.stations.remove(station)
def get_stations(self):
return self.stations
def set_status(self, station: Station, status: bool):
station.set_availibility(status)
class TaskManager():
"""Class for keeping track of the tasks"""
def __init__(self) -> None:
self.tasks = []
def add_task(self, task: Task):
self.tasks.append(task)
def remove_task(self, task: Task):
self.tasks.remove(task)
def get_tasks(self):
return self.tasks
class ShuttleManager():
"""Class for keeping track of the shuttles"""
def __init__(self) -> None:
self.shuttles = []
def add_shuttle(self, shuttle: Shuttle):
self.shuttles.append(shuttle)
def remove_shuttle(self, shuttle: Shuttle):
self.shuttles.remove(shuttle)
def get_shuttles(self):
return self.shuttles
def get_available_shuttles(self):
pass
def set_status(self, shuttle: Shuttle, status: bool):
shuttle.set_availibility(status)
class TaskPlanner():
""" Node for planning and orchestrating tasks for the manipulators and shuttles"""
def __init__(self) -> None:
self.stations = StationManager()
self.tasks = TaskManager()
self.shuttles = ShuttleManager()
self.active_tasks: List[Task] = []
self.heuristic_spatial_weight = 0.5
self.heuristic_temporal_weight = 0.5
def main(self):
# 1. Assign the next task to the available shuttle and manipulator
self.next_task()
# 2. Check if task is done
# 3. Check if sub-task is done
def timer_callback(self):
self.main()
def priority_scores(self, task: Task) -> float:
"""Function to calculate the priority score of a task"""
# 1. Get the requirements of the task
# 2. Get the available stations and shuttles that can fulfill the requirements
stations = self.get_available_stations()
shuttles = self.get_available_shuttles()
# 3. Calculate the priority score for each station and shuttle and store them in a heapq
priority_scores_heap = []
for station in stations:
for shuttle in shuttles:
score = self.heuristic(task, station, shuttle)
# Use heapq to store the scores
heapq.heappush(priority_scores_heap, (score, station, shuttle))
# 4. Return the lowest priority score
score, station, shuttle = heapq.heappop(priority_scores_heap)
return score, station, shuttle
def heuristic(self, task: Task, station: Station, shuttle: Shuttle, materialShuttle: Shuttle = None) -> float:
"""Function to calculate the heuristic of a task"""
spatial_priority_score = 0
temporal_priority_score = 0
ws = self.heuristic_spatial_weight
wt = self.heuristic_temporal_weight
if materialShuttle is not None:
station_position = station.get_position()
spatial_priority_score = (station_position - shuttle.get_position()) + (station_position - materialShuttle.get_position())
else:
spatial_priority_score = station.get_position() - shuttle.get_position()
# 2. Calculate temporal priority score
creation_time = task.get_creation_time()
temporal_priority_score = time.time() - creation_time
score = ws * spatial_priority_score + wt * temporal_priority_score
return score
def next_task(self):
"""Function to assign find the next task to be executed"""
# 1. Check if there are any tasks
tasks = self.get_tasks()
if not tasks:
return None
# 2. Check if there are any stations available
stations = self.get_available_stations()
if not stations:
return None
# 3. Check if there are any shuttles available
shuttles = self.get_available_shuttles()
if not shuttles:
return None
# 4. Check if there are any manipulators available -> Not required for now
# 5. Calculate the priority score of the tasks
min_score = float('inf')
min_station = None
min_shuttle = None
min_task = None
for task in tasks:
score, station, shuttle = self.priority_scores(task)
if score < min_score:
min_score = score
min_station = station
min_shuttle = shuttle
min_task = task
# 6. Assign the task with the lowest priority score to the available shuttle and manipulator
# task =
# 6.1. Update the station list
self.stations.set_status(min_station, False)
# 6.2. Update the shuttle list
self.shuttles.set_status(min_shuttle, False)
# 6.3. Update the manipulator list
# 7. Update the task list
self.active_tasks.append(min_task)
self.tasks.remove_task(min_task)
def assign_task(self, task: Task):
"""Function to assign a task to a shuttle and manipulator"""
def get_tasks(self):
"""Function to get the tasks from the task manager"""
if self.tasks.get_tasks():
return self.tasks.get_tasks()
else:
return None
def get_available_stations(self):
"""Function to get the stations from the station manager"""
if self.stations.get_stations():
return self.stations.get_stations()
else:
return None
def get_available_shuttles(self):
"""Function to get the shuttles from the shuttle manager"""
if self.shuttles.get_shuttles():
return self.shuttles.get_shuttles()
else:
None
| 7,100 |
Python
| 27.748988 | 134 | 0.60507 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.