file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
DigitalBotLab/App/kit-app/source/extensions/omni.hello.world/omni/hello/world/tests/__init__.py | # Copyright 2019-2023 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .test_hello_world import *
| 617 | Python | 37.624998 | 74 | 0.768233 |
DigitalBotLab/App/kit-app/source/extensions/omni.hello.world/omni/hello/world/tests/test_hello_world.py | # Copyright 2019-2023 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.hello.world
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = omni.hello.world.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 2,253 | Python | 35.950819 | 142 | 0.70395 |
DigitalBotLab/App/kit-app/source/extensions/omni.hello.world/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
DigitalBotLab/App/kit-app/source/extensions/omni.hello.world/docs/README.md | # Simple UI Extension Template
The simplest python extension example. Use it as a starting point for your extensions.
| 119 | Markdown | 28.999993 | 86 | 0.806723 |
DigitalBotLab/App/kit-app/source/launcher/description.toml | name = "USD Explorer" # displayed application name
shortName = "USD Explorer" # displayed application name in smaller card and library view
version = "${version}" # version must be semantic
kind = "app" # enum of "app", "connector", and "experience" for now
latest = true # boolean for if this version is the latest version
slug = "my_company.usd_explorer" # unique identifier for component, all lower case, persists between versions
productArea = "My Company" # displayed before application name in launcher
category = "Apps" # category of content
channel = "beta" # 3 filter types [ "alpha", "beta", "release "]
enterpriseStatus = false # set true if you want this package to show in enterprise launcher
#values for filtering content, not implemented yet
tags = [
"Manufacturing",
"Product Design",
"Scene Composition",
"Visualization",
"Rendering"
]
#string array, each line is a new line, keep lines under 256 char and keep lines under 4
description = [
"My Company USD Explorer is an Omniverse app for Reviewing and Constructing large facilities such as factories, warehouses and more. It is built using NVIDIA Omniverse™ Kit. The Scene Description and in-memory model is based on Pixar's USD. Omniverse USD Composer takes advantage of the advanced workflows of USD like Layers, Variants, Instancing and much more.",
"When connected to a Omniverse Nucleus server, worlds can be authored LIVE across multiple Omniverse applications, machines and users for advanced collaborative workflows."
]
#array of links for more info on product
[[links]]
title = "Tutorials"
url = "http://omniverse.nvidia.com/tutorials"
[[links]]
title = "Forums"
url = "https://forums.developer.nvidia.com/c/omniverse/300"
[developer]
#name of developer
name = 'My Company'
# hyperlink on developer name (can be left as empty string)
url = 'https://www.my-company.com/'
[publisher]
#name of publisher
name = 'My Company'
# hyperlink on publisher name (can be left as empty string)
url = 'https://www.my-company.com/'
[url]
windows-x86_64 = 'windows-x86_64/package.zip'
linux-x86_64 = 'linux-x86_64/package.zip'
| 2,246 | TOML | 43.939999 | 363 | 0.704809 |
DigitalBotLab/App/kit-app/source/launcher/requirements.toml | # Optional note that will be shown below system requirements.
# Supports markdown.
note = "Note: Omniverse is built to run on any RTX-powered machine. For ideal performance, we recommend using GeForce RTX™ 2080, Quadro RTX™ 5000, or higher. For latest drivers, visit [NVIDIA Driver Downloads](https://www.nvidia.com/Download/index.aspx). For Quadro, select 'Quadro New Feature Driver (QNF)."
# System requirements specs.
# Supports line breaks.
[minimum]
cpuNames = "Intel I7\nAMD Ryzen"
cpuCores = "4"
ram = "16 GB"
storage = "512 GB SSD"
vram = "6 GB"
gpu = "Any RTX GPU"
[recommended]
cpuNames = "Intel I7\nAMD Ryzen"
cpuCores = "8"
ram = "32 GB"
storage = "512 GB M.2 SSD"
vram = "8 GB"
gpu = "GeForce RTX 2080\nQuadro RTX 5000"
| 734 | TOML | 33.999998 | 308 | 0.723433 |
DigitalBotLab/App/kit-app/source/launcher/launcher.toml | ## install and launch instructions by environment
[defaults.windows-x86_64]
url = ""
entrypoint = "${productRoot}/omni.usd_explorer.bat"
args = ["--/app/environment/name='launcher'"]
[defaults.windows-x86_64.open]
command = "${productRoot}/omni.usd_explorer.bat"
args = ['--exec "open_stage.py ${file}"', "--/app/environment/name='launcher'"]
[defaults.windows-x86_64.environment]
[defaults.windows-x86_64.install]
pre-install = ""
pre-install-args = []
install = "${productRoot}/pull_kit_sdk.bat"
install-args = []
post-install = "" # "${productRoot}/omni.usd_explorer.warmup.bat"
post-install-args = ["--/app/environment/name='launcher_warmup'"]
[defaults.windows-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
[defaults.linux-x86_64]
url = ""
entrypoint = "${productRoot}/omni.usd_explorer.sh"
args = ["--/app/environment/name='launcher'"]
[defaults.linux-x86_64.environment]
[defaults.linux-x86_64.install]
pre-install = ""
pre-install-args = []
install = "${productRoot}/pull_kit_sdk.sh"
install-args = []
post-install = "" # "${productRoot}/omni.usd_explorer.warmup.sh"
post-install-args = ["--/app/environment/name='launcher_warmup'"]
[defaults.linux-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
| 1,400 | TOML | 27.019999 | 79 | 0.696429 |
DigitalBotLab/AssetProvider/README.md | # <img src="Images/logo.png" alt="Logo" width="50" height="50"> Digital Bot Lab: AssetProvider


# Experience thousands of robots using Nvidia Omniverse

The Digital Bot Lab's Asset Provider Extension is a cutting-edge solution designed to seamlessly connect our extensive digital robot collection from `ROS` with the powerful `NVIDIA Omniverse platform`. With our connector, users can effortlessly import digital robots in `.usd` format, enabling them to leverage the full potential of Omniverse applications.
## 1. Get Started
Experience the future of robotics with the Digital Bot Lab's Insiderobo Connector, where the connection between digital robots and Omniverse becomes effortless and transformative.

### 1.1 Install Omniverse USD Composer
This project is currently targeted for `Omniverse USD Composer`. Please follow the instructions to install it first:
[USD Composer Overview](https://docs.omniverse.nvidia.com/composer/latest/index.html#:~:text=NVIDIA%20Omniverse%E2%84%A2%20USD%20Composer,is%20based%20on%20Pixar's%20USD.)
### 1.2 Import the extension
To install the extension to Omniverse USD Composer:
First, clone the respository
```bash
git clone https://github.com/DigitalBotLab/AssetProvider
```
And now Open the `Omniverse USD Composer`, go to `Menu Bar` -> `Window` -> `Extensions` -> `Options` -> `Settings`
Add your `<path_to_this_repository>/AssetProvider/dbl-exts-asset/exts` to the settings

### 1.3 Enable the extension

At the `Third Party` filter, enable our extension.
Now, after opening the `Window` -> `Asset Store (Beta)` Tab, you can see this extension by filter the asset provider into `DIGITAL BOT LAB`.
## 2. Format: USD
Our digital robots are meticulously crafted and well-configured in .usd format, complete with physics, rigid bodies, and joints. This ensures a realistic and immersive experience when interacting with the robots within Omniverse.
## 3. ROS <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="Ros" width="70" height="70">
The Insiderobo Connector is built upon the foundation of the Robot Operating System (ROS), an open-source framework that empowers researchers and developers to easily build and reuse code across various robotics applications. This integration allows for enhanced collaboration, accelerated development, and seamless integration of digital robots into the Omniverse ecosystem.
## 4. License
Our project adheres to the Robot Operating System (ROS) framework, which enables us to develop and integrate robotic systems efficiently. We are proud to announce that our project is released under the BSD 3.0 license. This license ensures that our software is open-source, allowing users to freely use, modify, and distribute it while maintaining the necessary attribution and disclaimer requirements. By embracing ROS and the BSD 3.0 license, we aim to foster collaboration and innovation within the robotics community.
| 3,228 | Markdown | 50.253967 | 521 | 0.784077 |
DigitalBotLab/AssetProvider/dbl-exts-asset/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "omni.assetprovider.digitalbotlab" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,056 | Markdown | 37.81132 | 258 | 0.758755 |
DigitalBotLab/AssetProvider/dbl-exts-asset/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
DigitalBotLab/AssetProvider/dbl-exts-asset/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
DigitalBotLab/AssetProvider/dbl-exts-asset/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/config/extension.toml | [package]
authors = ["Digital Bot Lab"]
category = "services"
changelog = "docs/CHANGELOG.md"
version = "1.0.0"
title = "Digital Bot Lab Asset Provider"
description="Asset provider for Evermotion"
readme = "docs/README.md"
keywords = ["asset", "provider", "robot", "search", "digitalbotlab"]
icon = "data/logo.png"
preview_image = "data/preview.png"
repository = ""
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.assetprovider.digitalbotlab"
| 605 | TOML | 27.857142 | 105 | 0.722314 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/config/extension.gen.toml | [package]
exchange = true
partner = true | 40 | TOML | 12.666662 | 15 | 0.75 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/constants.py | IN_RELEASE = True
SETTING_ROOT = "/exts/omni.assetprovider.template/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
STORE_URL = "http://api.digitalbotlab.com/api/omniverse/assets" if IN_RELEASE else "http://localhost:8000/api/omniverse/assets"
THUMBNAIL_URL = "http://api.digitalbotlab.com/image/" if IN_RELEASE else "http://localhost:8000/image/"
DBL_ASSETPROVIDER_INTRO = "\n The Digital Bot Lab's Insiderobo Connector is \n a cutting-edge solution designed to seamlessly \n connect our extensive digital robot collection \n with the powerful NVIDIA Omniverse platform. \n\n Learn more about us: https://digitalbotlab.com/ \n Learn more about Omniverse: https://www.nvidia.com/en-us/omniverse/ \n Learn more about Insiderobo Connector: https://digitalbotlab.com/omniverse/asset-provider \n \n Contact us: [email protected]" | 836 | Python | 75.090902 | 482 | 0.773923 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/extension.py | import importlib
import carb
import carb.settings
import carb.tokens
import omni.ui as ui
import omni.ext
from omni.services.browser.asset import get_instance as get_asset_services
from .model import DBLAssetProvider
from .constants import SETTING_STORE_ENABLE, IN_RELEASE, DBL_ASSETPROVIDER_INTRO
import aiohttp
import asyncio
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniAssetproviderDigitalbotlabExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.assetprovider.digitalbotlab] omni assetprovider digitalbotlab startup")
self._asset_provider = DBLAssetProvider()
self._asset_service = get_asset_services()
self._asset_service.register_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, True)
print("what", carb.settings.get_settings().get(SETTING_STORE_ENABLE))
self._window = ui.Window("Digital Bot Lab: AssetProvider", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.ImageWithProvider(
f"{EXTENSION_FOLDER_PATH}/data/logo.png",
width=30,
height=30,
)
ui.Label("Introduction:", height = 20)
#intro_field = ui.StringField(multiline = True, readonly = True)
model = ui.SimpleStringModel(DBL_ASSETPROVIDER_INTRO)
field = ui.StringField(model, multiline=True, readonly=True, height=200)
# intro_field.model.set_value()
with ui.VStack(visible= not IN_RELEASE):
ui.Button("debug_authenticate", height = 20, clicked_fn = self.debug_authenticate)
ui.Button("debug_token", height = 20, clicked_fn = self.debug_token)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
def on_shutdown(self):
print("[omni.assetprovider.digitalbotlab] omni assetprovider digitalbotlab shutdown")
self._asset_service.unregister_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, False)
self._asset_provider = None
self._asset_service = None
def debug_authenticate(self):
async def authenticate():
params = {"email": "[email protected]", "password": "97654321abc"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/auth/signin", json=params) as response:
self._auth_params = await response.json()
print("auth_params", self._auth_params)
self.token = self._auth_params["token"]
asyncio.ensure_future(authenticate())
def debug_token(self):
async def verify_token():
params = {"token": self.token, "asset": "test"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/omniverse/download", json=params) as response:
response = await response.json()
print("response", response)
asyncio.ensure_future(verify_token())
def debug(self):
print("debug")
STORE_URL = "http://localhost:8000/api/omniverse/assets"
params = {}
params["page"] = 1
async def search():
# Uncomment once valid Store URL has been provided
async with aiohttp.ClientSession() as session:
async with session.get(f"{STORE_URL}", params=params) as resp:
result = await resp.read()
result = await resp.json()
items = result
print("items", items)
asyncio.ensure_future(search()) | 4,414 | Python | 40.650943 | 119 | 0.614182 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Dict, List, Optional, Union, Tuple
import aiohttp
from omni.services.browser.asset import BaseAssetStore, AssetModel, SearchCriteria, ProviderModel
from .constants import SETTING_STORE_ENABLE, STORE_URL, THUMBNAIL_URL
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.joinpath("data")
# The name of your company
PROVIDER_ID = "Digital Bot Lab"
class DBLAssetProvider(BaseAssetStore):
"""
Asset provider implementation.
"""
def __init__(self, ov_app="Kit", ov_version="na") -> None:
super().__init__(PROVIDER_ID)
self._ov_app = ov_app
self._ov_version = ov_version
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
""" Searches the asset store.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `search` function that will wrap this function in a timeout.
"""
params = {}
# Setting for filter search criteria
if search_criteria.filter.categories:
# No category search, also use keywords instead
categories = search_criteria.filter.categories
for category in categories:
if category.startswith("/"):
category = category[1:]
category_keywords = category.split("/")
params["filter[categories]"] = ",".join(category_keywords).lower()
# Setting for keywords search criteria
if search_criteria.keywords:
params["keywords"] = ",".join(search_criteria.keywords)
# Setting for page number search criteria
if search_criteria.page.number:
params["page"] = search_criteria.page.number
# Setting for max number of items per page
if search_criteria.page.size:
params["page_size"] = search_criteria.page.size
items = []
print("[model] params", params)
# Uncomment once valid Store URL has been provided
async with aiohttp.ClientSession() as session:
async with session.get(f"{STORE_URL}", params=params) as resp:
result = await resp.read()
result = await resp.json()
items = result
print("[model] items", items)
assets: List[AssetModel] = []
# Create AssetModel based off of JSON data
for item in items:
thumbnail = item.get("thumbnail", "")
thumbnail_name = thumbnail.split("/")[-1]
assets.append(
AssetModel(
identifier=item.get("id", ""),
name=item.get("name", ""),
published_at=item.get("pub_at", ""),
categories=[item.get("manufacturer", "robot")],
tags=item.get("searchField", []),
vendor=PROVIDER_ID,
product_url=item.get("url", ""),
download_url=item.get("download_url", ""),
price=item.get("price", 0),
thumbnail= THUMBNAIL_URL + thumbnail_name #"http://localhost:8000/image/258.png", #item.get("thumbnail", ""),
)
)
# Are there more assets that we can load?
print("[model] assets", len(assets), search_criteria.page.size)
more = True
if search_criteria.page.size and len(assets) < search_criteria.page.size:
more = False
return (assets, more)
# Uncomment once valid Store URL has been provided
async with aiohttp.ClientSession() as session:
async with session.get(f"{STORE_URL}", params=params) as resp:
result = await resp.read()
result = await resp.json()
items = result
print("[model] items", items)
assets: List[AssetModel] = []
# Create AssetModel based off of JSON data
for item in items:
assets.append(
AssetModel(
identifier=item.get("identifier", ""),
name=item.get("name", ""),
published_at=item.get("pub_at", ""),
categories=item.get("categories", []),
tags=item.get("tags", []),
vendor=PROVIDER_ID,
product_url=item.get("url", ""),
download_url=item.get("download_url", ""),
price=item.get("price", 0),
thumbnail=item.get("thumbnail", ""),
)
)
# Are there more assets that we can load?
more = True
if search_criteria.page.size and len(assets) < search_criteria.page.size:
more = False
return (assets, more)
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(
name=PROVIDER_ID, icon=f"{DATA_PATH}/logo.png", enable_setting=SETTING_STORE_ENABLE
)
| 5,510 | Python | 36.236486 | 127 | 0.580581 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.assetprovider.digitalbotlab
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = omni.assetprovider.digitalbotlab.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,700 | Python | 35.191489 | 142 | 0.687059 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/docs/README.md | # Python Extension Example [omni.assetprovider.digitalbotlab]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 191 | Markdown | 37.399993 | 126 | 0.801047 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/docs/index.rst | omni.assetprovider.digitalbotlab
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"omni.assetprovider.digitalbotlab"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 365 | reStructuredText | 16.428571 | 49 | 0.646575 |
DigitalBotLab/Robots/README.md | # Robots
Repostory for all robots
| 34 | Markdown | 10.666663 | 24 | 0.794118 |
DigitalBotLab/Robots/robot-exts-control/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "control" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,031 | Markdown | 37.339622 | 258 | 0.75677 |
DigitalBotLab/Robots/robot-exts-control/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
DigitalBotLab/Robots/robot-exts-control/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
DigitalBotLab/Robots/robot-exts-control/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/extension.py | import omni.ext
import omni.ui as ui
import omni.timeline
import omni.kit.app
import carb
from typing import Optional, List
import numpy as np
from pxr import Gf
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
from .kinova.kinova import Kinova
from .kinova.coffee_controller import CoffeeMakerController
from .kinova.numpy_utils import euler_angles_to_quat, quat_mul
# UI
from .ui.style import julia_modeler_style
from .ui.custom_multifield_widget import CustomMultifieldWidget
from .ui.custom_bool_widget import CustomBoolWidget
class ControlExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[control] control startup")
self.ext_id = ext_id
# set up fps limit
carb.settings.get_settings().set_float("/app/runLoops/main/rateLimitFrequency", 30)
carb.settings.get_settings().set_float("/app/runLoops/present/rateLimitFrequency", 30)
carb.settings.get_settings().set_bool("/rtx/ecoMode/enabled", True)
# ui
self._window = ui.Window("Robot control", width=300, height=300)
self._window.frame.style = julia_modeler_style
with self._window.frame:
with ui.VStack():
# ui.Button("Set Robot", height = 20, clicked_fn=self.set_robot)
ui.Line(height = 2)
ui.Button("Register Physics Event", height = 50, clicked_fn=self.register_physics_event)
with ui.HStack(height = 20):
ui.Label("Robot Prim Path:", width = 200)
self.robot_path_widget = ui.StringField(width = 300)
self.robot_path_widget.model.set_value("/World/kinova_gen3_7_hand/kinova")
with ui.HStack(height = 20):
self.server_widget = CustomBoolWidget(label="Connect to Server", default_value=False)
ui.Spacer(height = 9)
ui.Label("End Effector", height = 20)
with ui.HStack(height = 20):
self.ee_pos_widget = CustomMultifieldWidget(
label="Transform",
default_vals=[0, 0, 0],
height = 20,
)
ui.Spacer(height = 9)
with ui.HStack(height = 20):
self.ee_ori_widget = CustomMultifieldWidget(
label="Orient (Euler)",
default_vals=[90, 0.0, 90],
height = 20,
)
ui.Spacer(height = 9)
ui.Button("Update EE Target", height = 20, clicked_fn=self.update_ee_target)
ui.Button("Open/Close Gripper", height = 20, clicked_fn=self.toggle_gripper)
ui.Spacer(height = 9)
ui.Line(height = 2)
with ui.HStack(height = 20):
self.joint_read_widget = CustomMultifieldWidget(
label="Joint Angle (read only):",
sublabels=["j1", "j2", "j3", "j4", "j5", "j6", "j7"],
default_vals=[0.0] * 7,
read_only= True
)
with ui.HStack(height = 20):
self.ee_pos_read_widget = CustomMultifieldWidget(
label="EE Position(read only):",
sublabels=["x", "y", "z"],
default_vals=[0, 0, 0],
read_only= True
)
with ui.HStack(height = 20):
self.ee_ori_quat_read_widget = CustomMultifieldWidget(
label="EE Quaternion(read only):",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
read_only= True
)
# with ui.HStack(height = 20):
# self.ee_ori_euler_read_widget = CustomMultifieldWidget(
# label="EE Euler Rot(read only):",
# sublabels=["x", "y", "z"],
# default_vals=[0, 0, 0],
# read_only= True
# )
# vision part
ui.Spacer(height = 9)
ui.Line(height = 2)
ui.Button("Test vision", height = 20, clicked_fn = self.test_vision)
ui.Button("Draw vision", height = 20, clicked_fn = self.draw_vision)
ui.Button("Draw vision 2", height = 20, clicked_fn = self.draw_vision2)
ui.Spacer(height = 9)
ui.Line(height = 2)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
ui.Button("Debug2", height = 20, clicked_fn = self.debug2)
ui.Button("yh Debug", height = 20, clicked_fn = self.yuanhong_debug)
ui.Spacer(height = 9)
ui.Line(height = 2)
# robot
self.robot = None
self.controller = None
self.event_t = 0.0
# stream
self._is_stopped = True
self._tensor_started = False
def on_shutdown(self):
print("[control] control shutdown")
########################## events #######################################################
def register_physics_event(self):
print("register_physics_event")
# timeline
stream = omni.timeline.get_timeline_interface().get_timeline_event_stream()
self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event)
def _on_timeline_event(self, event):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
self._physics_update_sub = omni.physx.get_physx_interface().subscribe_physics_step_events(self._on_physics_step)
self._is_stopped = False
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._physics_update_sub = None
self._timeline_sub = None
self._is_stopped = True
self._tensor_started = False
self.robot = None
self.controller = None
def _can_callback_physics_step(self) -> bool:
if self._is_stopped:
return False
if self._tensor_started:
return True
self._tensor_started = True
self.set_robot()
return True
def _on_physics_step(self, dt):
self.event_t += dt # update time
if not self._can_callback_physics_step():
return
if self.controller:
# print("_on_physics_step")
self.controller.forward()
if self.event_t >= 1.0:
# update joint info
self.update_robot_ui()
self.event_t = 0.0
############################################# Robot #######################################
def update_ee_target(self):
print("update_ee_target")
if self.controller:
self.controller.update_event("move")
current_pos, current_rot = self.robot.end_effector.get_world_pose()
pos = [self.ee_pos_widget.multifields[i].model.as_float for i in range(3)]
rot = [self.ee_ori_widget.multifields[i].model.as_float for i in range(3)]
pos = np.array(current_pos) + np.array(pos)
rot = euler_angles_to_quat(rot, degrees=True)
# current_rot = np.array([current_rot[1], current_rot[2], current_rot[3], current_rot[0]])
# rot = quat_mul(current_rot, rot)
rot = np.array([rot[3], rot[0], rot[1], rot[2]])
print("updating controller ee target:", pos, rot)
self.controller.update_ee_target(pos, rot)
def set_robot(self):
print("set_robot")
# set robot
prim_path = self.robot_path_widget.model.as_string
self.robot = Kinova(prim_path = prim_path, name = "kinova_robot")
self.robot.initialize()
print("kinova_info", self.robot.num_dof)
print("kinova_gripper", self.robot.gripper._gripper_joint_num)
# set controller
self.controller = CoffeeMakerController("task_controller", self.robot, connect_server=self.server_widget.value)
def toggle_gripper(self):
print("Toggle Gripper")
if self.controller:
event = "open" if self.controller.event == "close" else "close"
self.controller.update_event(event)
######################### ui #############################################################
def update_robot_ui(self):
"""
read robot joint angles and update ui
"""
assert self.robot, "robot is not initialized"
joint_angles = self.robot.get_joint_positions()
joint_angles = [np.rad2deg(joint_angles[i]) for i in range(7)]
self.joint_read_widget.update(joint_angles)
self.ee_pos_read_widget.update(self.robot.end_effector.get_world_pose()[0])
rot_quat = self.robot.end_effector.get_world_pose()[1]
self.ee_ori_quat_read_widget.update(rot_quat)
# rot_euler = quat_to_euler_angles(rot_quat, degrees=True)
# print("rot_euler:", rot_euler)
# self.ee_ori_euler_read_widget.update(rot_euler[0])
def debug(self):
print("debug")
# if self.robot:
# self.controller.apply_high_level_action("pick_up_capsule")
# self.controller.apply_high_level_action("move_capsule_to_coffee_machine")
if self.robot:
self.controller.apply_high_level_action("pick_up_box")
def debug2(self):
print("debug2")
if self.robot:
# self.controller.apply_high_level_action("pick_up_capsule")
# self.controller.apply_high_level_action("move_capsule_to_coffee_machine")
# self.controller.apply_high_level_action("pick_up_papercup")
# self.controller.apply_high_level_action("open_coffee_machine_handle")
self.controller.apply_high_level_action("close_coffee_machine_handle")
self.controller.apply_high_level_action("press_coffee_machine_button")
# from omni.isaac.core.prims import XFormPrim
# from .kinova.utils import get_transform_mat_from_pos_rot
# stage = omni.usd.get_context().get_stage()
# base_prim = XFormPrim("/World/capsule")
# base_world_pos, base_world_rot = base_prim.get_world_pose()
# base_mat = get_transform_mat_from_pos_rot(base_world_pos, base_world_rot)
def yuanhong_debug(self):
# target_mat = get_transform_mat_from_pos_rot([-0.083, 0.43895, 0], [0.5] * 4)
# rel_mat = target_mat * base_mat.GetInverse()
# print("base_mat:", base_mat)
# print("target_mat:", target_mat)
# print("rel_mat:", rel_mat.ExtractTranslation(), rel_mat.ExtractRotationQuat())
print("yuanhong_debug")
if self.robot:
self.controller.apply_high_level_action("pick_up_papercup")
self.controller.apply_high_level_action("move_papercup_to_coffee_machine")
#obtain_robot_state = self.controller.obtain_robot_state()
# print("obtain_robot_state:", obtain_robot_state)
# from pxr import UsdGeom, Usd
# stage = omni.usd.get_context().get_stage()
# cup_prim = stage.GetPrimAtPath("/World/Simple_Paper_Cup")
# xformable = UsdGeom.Xformable(cup_prim)
# mat0 = xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
# pos = mat0.ExtractTranslation()
# print("cup pos:", pos)
pass
########################## vision ########################################################
def test_vision(self):
print("test_vision")
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url=None, vision_folder="I:\\Temp")
# self.vision_helper.get_image_from_webcam()
self.vision_helper.obtain_camera_transform(camara_path="/World/Camera")
t = self.vision_helper.camera_mat.ExtractTranslation()
print("camera offset", t)
foc = 1000
world_d = self.vision_helper.get_world_direction_from_camera_point(0, 0, foc, foc)
world_d= world_d.GetNormalized()
print("world_d:", world_d)
self.vision_helper.draw_debug_line(t, world_d)
self.vision_helper.get_hit_position(t, world_d, target_prim_path="/World/Desk")
# from omni.physx import get_physx_scene_query_interface
# t = carb.Float3(t[0], t[1], t[2])
# d = carb.Float3(world_d[0], world_d[1], world_d[2])
# get_physx_scene_query_interface().raycast_all(t, d, 100.0, self.report_all_hits)
# def report_all_hits(self, hit):
# stage = omni.usd.get_context().get_stage()
# from pxr import UsdGeom
# usdGeom = UsdGeom.Mesh.Get(stage, hit.rigid_body)
# print("hit:", hit.rigid_body, usdGeom.GetPrim().GetPath(), hit.position, hit.normal, hit.distance, hit.face_index)
# return True
def draw_vision(self):
# print("draw_vision")
# from omni.ui import scene as sc
# from omni.ui import color as cl
# from omni.kit.viewport.utility import get_active_viewport_window
# self._viewport_window = get_active_viewport_window()
# if hasattr(self, "scene_view"):
# self.scene_view.scene.clear()
# if self._viewport_window:
# self._viewport_window.viewport_api.remove_scene_view(self.scene_view)
# self.scene_view = None
# with self._viewport_window.get_frame(0):
# self.scene_view = sc.SceneView()
# self.scene_view.scene.clear()
# points_b = [[12500.0, 0, 0.0], [0.0, 0, 12500.0], [-12500.0, 0, 0.0], [-0.0, 0, -12500.0], [12500.0, 0, -0.0]]
# with self.scene_view.scene:
# transform = sc.Transform()
# # move_ges = MoveGesture(transform)
# with transform:
# for pt in points_b:
# sc.Curve([pt, [0, 0, 0]], thicknesses=[1.0], colors=[cl.green], curve_type=sc.Curve.CurveType.LINEAR)
# self._viewport_window.viewport_api.add_scene_view(self.scene_view)
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url="http://127.0.0.1:7860/run/predict",
vision_folder="I:\\Temp",
camera_prim_path="/World/Camera",
vision_model="fastsam")
self.vision_helper.capture_image(folder_path="I:\\Temp\\VisionTest", image_name="test")
return
def draw_vision2(self):
# print("draw_vision2")
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url="http://127.0.0.1:7860/run/predict",
vision_folder="I:\\Temp",
camera_prim_path="/World/Camera",
vision_model="fastsam")
# self.vision_helper.capture_image(folder_path="I:\\Temp\\VisionTest", image_name="test")
# return
import cv2
import os
import numpy as np
from .vision.utils import find_bottom_point, find_left_point, get_projection, get_box_transform_from_point
img_path = None
print("os.listdir", os.listdir("I:\\Temp\\VisionTest"))
for item in os.listdir("I:\\Temp\\VisionTest"):
print("item:", item)
if item.endswith(".png") and item.startswith("test"):
img_path = os.path.join("I:\\Temp\\VisionTest", item)
break
assert img_path, "image not found"
print("img_path:", img_path)
image = cv2.imread(img_path)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = contours[0]
arclen = cv2.arcLength(contour, True)
# WARNING: 0.005 is a magic number
contour = cv2.approxPolyDP(contour, arclen*0.005, True)
cv2.drawContours(image, [contour], -1, (0, 255, 0), 2) # Green color, thickness 2
print("contour:", contour, len(contour))
# response_data = self.vision_helper.get_prediction_data("I:\\Temp\\0.jpg", "grey tea tower")
# print(response_data)
# response_data = {'data': ['[[[[736, 113]], [[608, 133]], [[591, 151]], [[590, 373]], [[620, 419]], [[646, 419]], [[741, 392]], [[790, 162]]]]'], 'is_generating': False, 'duration': 11.769976139068604, 'average_duration': 11.769976139068604}
# import json
# import numpy as np
# countour = json.loads(response_data["data"][0])
print("countour", contour)
points = np.array([p[0] for p in contour])
print("p0", points)
bottom_point = find_bottom_point(points)
left_point = find_left_point(points)
print("bottom_point", bottom_point)
image = cv2.circle(image, bottom_point, radius=10, color=(255, 0, 255), thickness=-1)
image = cv2.circle(image, left_point, radius=10, color=(255, 255, 0), thickness=-1)
cv2.imshow('Blue Contours', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#REFERENCE: Camera Calibration and 3D Reconstruction from Single Images Using Parallelepipeds
self.vision_helper.obtain_camera_transform(camara_path="/World/Camera")
camera_pos = self.vision_helper.camera_mat.ExtractTranslation()
print("camera offset", camera_pos)
foc = 910
bottom_d = self.vision_helper.get_world_direction_from_camera_point(bottom_point[0], 1080 - bottom_point[1], foc, foc)
bottom_d= bottom_d.GetNormalized()
print("bottom_d:", bottom_d)
left_d = self.vision_helper.get_world_direction_from_camera_point(left_point[0], 1080 - left_point[1], foc, foc)
left_d= left_d.GetNormalized()
print("left_d:", left_d)
self.vision_helper.draw_debug_line(camera_pos, left_d, length=10)
# self.vision_helper.get_hit_position(t, world_d, target_prim_path="/World/Desk")
box_transform, box_rotation = get_box_transform_from_point(camera_pos, bottom_d, left_d, affordance_z = -0.02)
print("box_transform:", box_transform)
print("box_rotation:", box_rotation)
stage = omni.usd.get_context().get_stage()
stage.DefinePrim("/World/box", "Xform")
mat = Gf.Matrix4d().SetScale(1) * \
Gf.Matrix4d().SetRotate(box_rotation) * \
Gf.Matrix4d().SetTranslate(Gf.Vec3d(box_transform[0], box_transform[1], box_transform[2]))
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/box",
new_transform_matrix=mat,
)
| 19,930 | Python | 41.862366 | 251 | 0.559508 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/rtc/test.py | import omni
import importlib
import carb.settings
class RTCTest():
def __init__(self):
manager = omni.kit.app.get_app().get_extension_manager()
self._webrtc_was_enabled = manager.is_extension_enabled("omni.services.streamclient.webrtc")
if not self._webrtc_was_enabled:
manager.set_extension_enabled_immediate("omni.services.streamclient.webrtc", True)
# self._webrtc_api = importlib.import_module("omni.physics.tensors")
def test_main(self):
from omni.services.client import AsyncClient
from omni.services.streamclient.webrtc.services.browser_frontend import example_page, redirect_url, router_prefix
frontend_port = carb.settings.get_settings().get_as_int("exts/omni.services.transport.server.http/port")
frontend_prefix = f"http://localhost:{frontend_port}{router_prefix}"
self._redirect_page_path = f"{frontend_prefix}{example_page}"
self._client_page_path = f"{frontend_prefix}{redirect_url}"
print("frontend_port", frontend_port)
print("frontend_prefix", frontend_prefix)
print("self._redirect_page_path", self._redirect_page_path)
print("self._client_page_path", self._client_page_path) | 1,230 | Python | 46.346152 | 121 | 0.692683 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/vision_helper.py | # send message to Kinova Server to control the real robot
try:
import cv2
except:
# omni.kit.pipapi extension is required
import omni.kit.pipapi
# It wraps `pip install` calls and reroutes package installation into user specified environment folder.
# That folder is added to sys.path.
# Note: This call is blocking and slow. It is meant to be used for debugging, development. For final product packages
# should be installed at build-time and packaged inside extensions.
omni.kit.pipapi.install(
package="opencv-python",
)
import cv2
from PIL import Image
import requests
import base64
import os
import omni.usd
import carb
from pxr import Gf, UsdGeom
import omni.timeline
import omni.graph.core as og
from omni.physx import get_physx_scene_query_interface
from omni.debugdraw import get_debug_draw_interface
CX = 1920/2 # principal point x
CY = 1080/2 # principal point y
class VisionHelper():
def __init__(self,
vision_url: str,
vision_folder:str,
camera_prim_path = "/OmniverseKit_Persp",
vision_model = "dino") -> None:
# vision
self.vision_url = vision_url
self.vision_folder = vision_folder
self.vision_model = vision_model
self.camera_prim_path = camera_prim_path
# stage
self.stage = omni.usd.get_context().get_stage()
def get_prediction_data(self, image_file: str, object_name: str):
"""
Get bounding box data from the Gradio server
"""
# Set the request payload
with open(image_file, "rb") as f:
encoded_string = base64.b64encode(f.read())
data_url = "data:image/png;base64," + encoded_string.decode("utf-8")
payload = {
"data": [
data_url, object_name
]
}
# Send the request to the Gradio server
response = requests.post(self.vision_url, json=payload)
# Get the response data as a Python object
response_data = response.json()
# Print the response data
# print(response_data)
return response_data
def get_image_from_webcam(self, image_name = "0.jpg"):
"""
Get image from webcam
"""
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame, 'RGB')
image.save(self.vision_folder + f"/{image_name}")
print("Image saved at path: " + self.vision_folder + f"/{image_name}")
cap.release()
def obtain_camera_transform(self, camara_path: str):
"""
Obtain camera transform
"""
camera_prim = omni.usd.get_context().get_stage().GetPrimAtPath(camara_path)
xformable = UsdGeom.Xformable(camera_prim)
self.camera_mat = xformable.ComputeLocalToWorldTransform(0)
def get_world_direction_from_camera_point(self, x, y, fx, fy):
"""
Get world direction from camera point
"""
# camera_point = Gf.Vec3d(x, y, 1)
# K = Gf.Matrix3d(fx, 0, 0, 0, fy, 0, CX, CY, 1)
# K_inverse = K.GetInverse()
Z = -1
R = self.camera_mat.ExtractRotationMatrix()
R_inverse = R.GetInverse()
# world_point = (camera_point * K_inverse - t) * R_inverse
D = Gf.Vec3d((CX - x) * Z / fx, (CY - y) * Z / fy, Z)
world_direction = R_inverse * D
return world_direction
def draw_debug_line(self, origin, direction, length = 1, node_path = "/World/PushGraph/make_array"):
"""
Draw debug line
"""
make_array_node = og.Controller.node(node_path)
if make_array_node.is_valid():
# print("draw debug line")
origin_attribute = make_array_node.get_attribute("inputs:input0")
target_attribute = make_array_node.get_attribute("inputs:input1")
size_attribute = make_array_node.get_attribute("inputs:arraySize")
# attr_value = og.Controller.get(attribute)
og.Controller.set(size_attribute, 2)
og.Controller.set(origin_attribute, [origin[0], origin[1], origin[2]])
og.Controller.set(target_attribute, [direction[0] * length + origin[0], direction[1] * length + origin[1], direction[2] * length + origin[2]])
# print("attr:", attr_value)
def get_hit_position(self, origin, direction, target_prim_path = "/World/Desk"):
"""
Get hit position
note: should be call while timeline is playing
"""
timeline = omni.timeline.get_timeline_interface()
assert timeline.is_playing(), "timeline is not playing"
def report_all_hits(hit):
usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
print("hit:", hit.rigid_body, usdGeom.GetPrim().GetPath(), hit.position, hit.normal, hit.distance, hit.face_index)
if usdGeom.GetPrim().GetPath().pathString == target_prim_path:
hit_position = hit.position
hit_position = None
t = carb.Float3(origin[0], origin[1], origin[2])
d = carb.Float3(direction[0], direction[1], direction[2])
# print("t:", t, "d:", d)
get_physx_scene_query_interface().raycast_all(t, d, 100.0, report_all_hits)
return hit_position
############################################# action #############################################
def capture_image(self, folder_path = "I:\\Temp\\VisionTest", image_name = "test"):
from omni.kit.capture.viewport import CaptureOptions, CaptureExtension
options = CaptureOptions()
options.file_name = image_name
options.file_type = ".png"
options.output_folder = str(folder_path)
options.camera = self.camera_prim_path
if not os.path.exists(options.output_folder):
pass
images = os.listdir(options.output_folder)
for item in images:
if item.endswith(options.file_type) and item.startswith(options.file_name):
os.remove(os.path.join(options.output_folder, item))
capture_instance = CaptureExtension().get_instance()
capture_instance.options = options
capture_instance.start() | 6,362 | Python | 36.650887 | 154 | 0.595253 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/utils.py | import numpy as np
import cv2
from pxr import Gf
BOX_SIZE = [0.071, 0.0965, 0.1198] # in cm
def find_bottom_point(points):
"""
Find the bottom point from a list of points
"""
bottom_point = points[0]
for point in points:
if point[1] > bottom_point[1]:
bottom_point = point
return bottom_point
def find_left_point(points):
"""
Find the left point from a list of points
"""
left_point = points[0]
for point in points:
if point[0] < left_point[0]:
left_point = point
return left_point
def get_projection(point, direction, z):
"""
Get projection
"""
t = (z - point[2]) / direction[2]
x = point[0] + direction[0] * t
y = point[1] + direction[1] * t
return np.array((x, y, z))
def get_box_transform_from_point(camera_position, bottom_direction, left_direction, affordance_z = 0):
"""
Get box points
"""
bottom_point = get_projection(camera_position, bottom_direction, affordance_z)
left_point = get_projection(camera_position, left_direction, affordance_z)
distance = np.linalg.norm(bottom_point - left_point)
closest_value = min(BOX_SIZE,key=lambda x:abs(x-distance))
print("distance: ", distance, bottom_point, left_point, "\n close to: ", closest_value)
direction = left_point - bottom_point
direction = direction / np.linalg.norm(direction)
direction = Gf.Vec3d(direction[0], direction[1], direction[2])
print("direction: ", direction)
# determine the box rotation
if closest_value == BOX_SIZE[0]:
direction_r = np.array([direction[1], -direction[0], 0])
right_point = bottom_point + direction_r * BOX_SIZE[1]
center_point = (left_point + right_point) / 2
rotation = Gf.Rotation(Gf.Vec3d(0, -1, 0), direction)
elif closest_value == BOX_SIZE[1]:
direction_r = np.array([direction[1], -direction[0], 0])
right_point = bottom_point + direction_r * BOX_SIZE[0]
center_point = (left_point + right_point) / 2
rotation = Gf.Rotation(Gf.Vec3d(-1, 0, 0), direction)
else:
center_point = (left_point + bottom_point) / 2
from_direction = Gf.Vec3d([BOX_SIZE[1], -BOX_SIZE[1], 0]).GetNormalized()
rotation = Gf.Rotation(from_direction, direction)
return center_point, rotation.GetQuat()
| 2,375 | Python | 32 | 102 | 0.620632 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/gpt/chatgpt_apiconnect.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import omni.usd
import carb
import os
import aiohttp
import asyncio
from pxr import Sdf
from .prompts import system_input, user_input, assistant_input
from .deep_search import query_items
from .item_generator import place_greyboxes, place_deepsearch_results
async def chatGPT_call(prompt: str):
# Load your API key from an environment variable or secret management service
settings = carb.settings.get_settings()
apikey = settings.get_as_string("/persistent/exts/omni.example.airoomgenerator/APIKey")
my_prompt = prompt.replace("\n", " ")
# Send a request API
try:
parameters = {
"model": "gpt-3.5-turbo",
"messages": [
{"role": "system", "content": system_input},
{"role": "user", "content": user_input},
{"role": "assistant", "content": assistant_input},
{"role": "user", "content": my_prompt}
]
}
chatgpt_url = "https://api.openai.com/v1/chat/completions"
headers = {"Authorization": "Bearer %s" % apikey}
# Create a completion using the chatGPT model
async with aiohttp.ClientSession() as session:
async with session.post(chatgpt_url, headers=headers, json=parameters) as r:
response = await r.json()
text = response["choices"][0]["message"]['content']
except Exception as e:
carb.log_error("An error as occurred")
return None, str(e)
# Parse data that was given from API
try:
#convert string to object
data = json.loads(text)
except ValueError as e:
carb.log_error(f"Exception occurred: {e}")
return None, text
else:
# Get area_objects_list
object_list = data['area_objects_list']
return object_list, text
| 2,548 | Python | 35.942028 | 98 | 0.648744 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/gpt/prompts.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
system_input='''You are an area generator expert. Given an area of a certain size, you can generate a list of items that are appropriate to that area, in the right place, and with a representative material.
You operate in a 3D Space. You work in a X,Y,Z coordinate system. X denotes width, Y denotes height, Z denotes depth. 0.0,0.0,0.0 is the default space origin.
You receive from the user the name of the area, the size of the area on X and Z axis in centimetres, the origin point of the area (which is at the center of the area).
You answer by only generating JSON files that contain the following information:
- area_name: name of the area
- X: coordinate of the area on X axis
- Y: coordinate of the area on Y axis
- Z: coordinate of the area on Z axis
- area_size_X: dimension in cm of the area on X axis
- area_size_Z: dimension in cm of the area on Z axis
- area_objects_list: list of all the objects in the area
For each object you need to store:
- object_name: name of the object
- X: coordinate of the object on X axis
- Y: coordinate of the object on Y axis
- Z: coordinate of the object on Z axis
- Length: dimension in cm of the object on X axis
- Width: dimension in cm of the object on Y axis
- Height: dimension in cm of the object on Z axis
- Material: a reasonable material of the object using an exact name from the following list: Plywood, Leather_Brown, Leather_Pumpkin, Leather_Black, Aluminum_Cast, Birch, Beadboard, Cardboard, Cloth_Black, Cloth_Gray, Concrete_Polished, Glazed_Glass, CorrugatedMetal, Cork, Linen_Beige, Linen_Blue, Linen_White, Mahogany, MDF, Oak, Plastic_ABS, Steel_Carbon, Steel_Stainless, Veneer_OU_Walnut, Veneer_UX_Walnut_Cherry, Veneer_Z5_Maple.
Each object name should include an appropriate adjective.
Keep in mind, objects should be disposed in the area to create the most meaningful layout possible, and they shouldn't overlap.
All objects must be within the bounds of the area size; Never place objects further than 1/2 the length or 1/2 the depth of the area from the origin.
Also keep in mind that the objects should be disposed all over the area in respect to the origin point of the area, and you can use negative values as well to display items correctly, since origin of the area is always at the center of the area.
Remember, you only generate JSON code, nothing else. It's very important.
'''
user_input="Warehouse, 1000x1000, origin at (0.0,0.0,0.0), generate a list of appropriate items in the correct places. Generate warehouse objects"
assistant_input='''{
"area_name": "Warehouse_Area",
"X": 0.0,
"Y": 0.0,
"Z": 0.0,
"area_size_X": 1000,
"area_size_Z": 1000,
"area_objects_list": [
{
"object_name": "Parts_Pallet_1",
"X": -150,
"Y": 0.0,
"Z": 250,
"Length": 100,
"Width": 100,
"Height": 10,
"Material": "Plywood"
},
{
"object_name": "Boxes_Pallet_2",
"X": -150,
"Y": 0.0,
"Z": 150,
"Length": 100,
"Width": 100,
"Height": 10,
"Material": "Plywood"
},
{
"object_name": "Industrial_Storage_Rack_1",
"X": -150,
"Y": 0.0,
"Z": 50,
"Length": 200,
"Width": 50,
"Height": 300,
"Material": "Steel_Carbon"
},
{
"object_name": "Empty_Pallet_3",
"X": -150,
"Y": 0.0,
"Z": -50,
"Length": 100,
"Width": 100,
"Height": 10,
"Material": "Plywood"
},
{
"object_name": "Yellow_Forklift_1",
"X": 50,
"Y": 0.0,
"Z": -50,
"Length": 200,
"Width": 100,
"Height": 250,
"Material": "Plastic_ABS"
},
{
"object_name": "Heavy_Duty_Forklift_2",
"X": 150,
"Y": 0.0,
"Z": -50,
"Length": 200,
"Width": 100,
"Height": 250,
"Material": "Steel_Stainless"
}
]
}'''
| 4,898 | Python | 37.880952 | 435 | 0.612903 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_radio_collection.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomRadioCollection"]
from typing import List, Optional
import omni.ui as ui
from .style import ATTR_LABEL_WIDTH
SPACING = 5
class CustomRadioCollection:
"""A custom collection of radio buttons. The group_name is on the first
line, and each label and radio button are on subsequent lines. This one
does not inherit from CustomBaseWidget because it doesn't have the same
Head label, and doesn't have a Revert button at the end.
"""
def __init__(self,
group_name: str,
labels: List[str],
model: ui.AbstractItemModel = None,
default_value: bool = True,
**kwargs):
self.__group_name = group_name
self.__labels = labels
self.__default_val = default_value
self.__images = []
self.__selection_model = ui.SimpleIntModel(default_value)
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.__images = []
self.__selection_model = None
self.__frame = None
@property
def model(self) -> Optional[ui.AbstractValueModel]:
"""The widget's model"""
if self.__selection_model:
return self.__selection_model
@model.setter
def model(self, value: int):
"""The widget's model"""
self.__selection_model.set(value)
def __getattr__(self, attr):
"""
Pretend it's self.__frame, so we have access to width/height and
callbacks.
"""
return getattr(self.__frame, attr)
def _on_value_changed(self, index: int = 0):
"""Set states of all radio buttons so only one is On."""
self.__selection_model.set_value(index)
for i, img in enumerate(self.__images):
img.checked = i == index
img.name = "radio_on" if img.checked else "radio_off"
def _build_fn(self):
"""Main meat of the widget. Draw the group_name label, label and
radio button for each row, and set up callbacks to keep them updated.
"""
with ui.VStack(spacing=SPACING):
ui.Spacer(height=2)
ui.Label(self.__group_name.upper(), name="radio_group_name",
width=ATTR_LABEL_WIDTH)
for i, label in enumerate(self.__labels):
with ui.HStack():
ui.Label(label, name="attribute_name",
width=ATTR_LABEL_WIDTH)
with ui.HStack():
with ui.VStack():
ui.Spacer(height=2)
self.__images.append(
ui.Image(
name=("radio_on" if self.__default_val == i else "radio_off"),
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
height=16, width=16, checked=self.__default_val
)
)
ui.Spacer()
ui.Spacer(height=2)
# Set up a mouse click callback for each radio button image
for i in range(len(self.__labels)):
self.__images[i].set_mouse_pressed_fn(
lambda x, y, b, m, i=i: self._on_value_changed(i))
| 3,781 | Python | 35.718446 | 98 | 0.556467 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["julia_modeler_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
ATTR_LABEL_WIDTH = 150
BLOCK_HEIGHT = 22
TAIL_WIDTH = 35
WIN_WIDTH = 400
WIN_HEIGHT = 930
# Pre-defined constants. It's possible to change them at runtime.
cl_window_bg_color = cl(0.2, 0.2, 0.2, 1.0)
cl_window_title_text = cl(.9, .9, .9, .9)
cl_collapsible_header_text = cl(.8, .8, .8, .8)
cl_collapsible_header_text_hover = cl(.95, .95, .95, 1.0)
cl_main_attr_label_text = cl(.65, .65, .65, 1.0)
cl_main_attr_label_text_hover = cl(.9, .9, .9, 1.0)
cl_multifield_label_text = cl(.65, .65, .65, 1.0)
cl_combobox_label_text = cl(.65, .65, .65, 1.0)
cl_field_bg = cl(0.18, 0.18, 0.18, 1.0)
cl_field_border = cl(1.0, 1.0, 1.0, 0.2)
cl_btn_border = cl(1.0, 1.0, 1.0, 0.4)
cl_slider_fill = cl(1.0, 1.0, 1.0, 0.3)
cl_revert_arrow_enabled = cl(.25, .5, .75, 1.0)
cl_revert_arrow_disabled = cl(.35, .35, .35, 1.0)
cl_transparent = cl(0, 0, 0, 0)
fl_main_label_attr_hspacing = 10
fl_attr_label_v_spacing = 3
fl_collapsable_group_spacing = 2
fl_outer_frame_padding = 15
fl_tail_icon_width = 15
fl_border_radius = 3
fl_border_width = 1
fl_window_title_font_size = 18
fl_field_text_font_size = 14
fl_main_label_font_size = 14
fl_multi_attr_label_font_size = 14
fl_radio_group_font_size = 14
fl_collapsable_header_font_size = 13
fl_range_text_size = 10
url_closed_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/closed.svg"
url_open_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/opened.svg"
url_revert_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/revert_arrow.svg"
url_checkbox_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_on.svg"
url_checkbox_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_off.svg"
url_radio_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_on.svg"
url_radio_btn_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_off.svg"
url_diag_bg_lines_texture = f"{EXTENSION_FOLDER_PATH}/icons/diagonal_texture_screenshot.png"
# D:\DBL\Robots\robot-exts-control\exts\control\icons\diagonal_texture_screenshot.png
print("url_revert_arrow_icon: ", EXTENSION_FOLDER_PATH, "-", url_revert_arrow_icon)
# The main style dict
julia_modeler_style = {
"Button::tool_button": {
"background_color": cl_field_bg,
"margin_height": 0,
"margin_width": 6,
"border_color": cl_btn_border,
"border_width": fl_border_width,
"font_size": fl_field_text_font_size,
},
"CollapsableFrame::group": {
"margin_height": fl_collapsable_group_spacing,
"background_color": cl_transparent,
},
# TODO: For some reason this ColorWidget style doesn't respond much, if at all (ie, border_radius, corner_flag)
"ColorWidget": {
"border_radius": fl_border_radius,
"border_color": cl(0.0, 0.0, 0.0, 0.0),
},
"Field": {
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"border_color": cl_field_border,
"border_width": fl_border_width,
},
"Field::attr_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": 2, # fl_field_text_font_size, # Hack to allow for a smaller field border until field padding works
},
"Field::attribute_color": {
"font_size": fl_field_text_font_size,
},
"Field::multi_attr_field": {
"padding": 4, # TODO: Hacky until we get padding fix
"font_size": fl_field_text_font_size,
},
"Field::path_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": fl_field_text_font_size,
},
"HeaderLine": {"color": cl(.5, .5, .5, .5)},
"Image::collapsable_opened": {
"color": cl_collapsible_header_text,
"image_url": url_open_arrow_icon,
},
"Image::collapsable_opened:hovered": {
"color": cl_collapsible_header_text_hover,
"image_url": url_open_arrow_icon,
},
"Image::collapsable_closed": {
"color": cl_collapsible_header_text,
"image_url": url_closed_arrow_icon,
},
"Image::collapsable_closed:hovered": {
"color": cl_collapsible_header_text_hover,
"image_url": url_closed_arrow_icon,
},
"Image::radio_on": {"image_url": url_radio_btn_on_icon},
"Image::radio_off": {"image_url": url_radio_btn_off_icon},
"Image::revert_arrow": {
"image_url": url_revert_arrow_icon,
"color": cl_revert_arrow_enabled,
},
"Image::revert_arrow:disabled": {"color": cl_revert_arrow_disabled},
"Image::checked": {"image_url": url_checkbox_on_icon},
"Image::unchecked": {"image_url": url_checkbox_off_icon},
"Image::slider_bg_texture": {
"image_url": url_diag_bg_lines_texture,
"border_radius": fl_border_radius,
"corner_flag": ui.CornerFlag.LEFT,
},
"Label::attribute_name": {
"alignment": ui.Alignment.RIGHT_TOP,
"margin_height": fl_attr_label_v_spacing,
"margin_width": fl_main_label_attr_hspacing,
"color": cl_main_attr_label_text,
"font_size": fl_main_label_font_size,
},
"Label::attribute_name:hovered": {"color": cl_main_attr_label_text_hover},
"Label::collapsable_name": {"font_size": fl_collapsable_header_font_size},
"Label::multi_attr_label": {
"color": cl_multifield_label_text,
"font_size": fl_multi_attr_label_font_size,
},
"Label::radio_group_name": {
"font_size": fl_radio_group_font_size,
"alignment": ui.Alignment.CENTER,
"color": cl_main_attr_label_text,
},
"Label::range_text": {
"font_size": fl_range_text_size,
},
"Label::window_title": {
"font_size": fl_window_title_font_size,
"color": cl_window_title_text,
},
"ScrollingFrame::window_bg": {
"background_color": cl_window_bg_color,
"padding": fl_outer_frame_padding,
"border_radius": 20 # Not obvious in a window, but more visible with only a frame
},
"Slider::attr_slider": {
"draw_mode": ui.SliderDrawMode.FILLED,
"padding": 0,
"color": cl_transparent,
# Meant to be transparent, but completely transparent shows opaque black instead.
"background_color": cl(0.28, 0.28, 0.28, 0.01),
"secondary_color": cl_slider_fill,
"border_radius": fl_border_radius,
"corner_flag": ui.CornerFlag.LEFT, # TODO: Not actually working yet OM-53727
},
# Combobox workarounds
"Rectangle::combobox": { # TODO: remove when ComboBox can have a border
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"border_color": cl_btn_border,
"border_width": fl_border_width,
},
"ComboBox::dropdown_menu": {
"color": cl_combobox_label_text, # label color
"padding_height": 1.25,
"margin": 2,
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"font_size": fl_field_text_font_size,
"secondary_color": cl_transparent, # button background color
},
"Rectangle::combobox_icon_cover": {"background_color": cl_field_bg}
}
| 7,702 | Python | 37.323383 | 121 | 0.636588 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_bool_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomBoolWidget"]
import omni.ui as ui
from .custom_base_widget import CustomBaseWidget
class CustomBoolWidget(CustomBaseWidget):
"""A custom checkbox or switch widget"""
def __init__(self,
model: ui.AbstractItemModel = None,
default_value: bool = True,
**kwargs):
self.__default_val = default_value
self.bool_image = None
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
@property
def value(self):
"""Return the current value of the widget."""
return self.bool_image.checked
def destroy(self):
CustomBaseWidget.destroy()
self.bool_image = None
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.bool_image.checked = self.__default_val
self.bool_image.name = (
"checked" if self.bool_image.checked else "unchecked"
)
self.revert_img.enabled = False
def _on_value_changed(self):
"""Swap checkbox images and set revert_img to correct state."""
self.bool_image.checked = not self.bool_image.checked
self.bool_image.name = (
"checked" if self.bool_image.checked else "unchecked"
)
self.revert_img.enabled = self.__default_val != self.bool_image.checked
def _build_body(self):
"""Main meat of the widget. Draw the appropriate checkbox image, and
set up callback.
"""
with ui.HStack():
with ui.VStack():
# Just shift the image down slightly (2 px) so it's aligned the way
# all the other rows are.
ui.Spacer(height=2)
self.bool_image = ui.Image(
name="checked" if self.__default_val else "unchecked",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
height=16, width=16, checked=self.__default_val
)
# Let this spacer take up the rest of the Body space.
ui.Spacer()
self.bool_image.set_mouse_pressed_fn(
lambda x, y, b, m: self._on_value_changed())
| 2,731 | Python | 35.918918 | 87 | 0.610033 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_multifield_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomMultifieldWidget"]
from typing import List, Optional
import omni.ui as ui
from .custom_base_widget import CustomBaseWidget
class CustomMultifieldWidget(CustomBaseWidget):
"""A custom multifield widget with a variable number of fields, and
customizable sublabels.
"""
def __init__(self,
model: ui.AbstractItemModel = None,
sublabels: Optional[List[str]] = None,
default_vals: Optional[List[float]] = None,
read_only: bool = False,
**kwargs):
self.__field_labels = sublabels or ["X", "Y", "Z"]
self.__default_vals = default_vals or [0.0] * len(self.__field_labels)
self.read_only = read_only
self.multifields = []
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.multifields = []
@property
def model(self, index: int = 0) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.multifields:
return self.multifields[index].model
@model.setter
def model(self, value: ui.AbstractItemModel, index: int = 0):
"""The widget's model"""
self.multifields[index].model = value
def _restore_default(self):
"""Restore the default values."""
if self.revert_img.enabled:
for i in range(len(self.multifields)):
model = self.multifields[i].model
model.as_float = self.__default_vals[i]
self.revert_img.enabled = False
def _on_value_changed(self, val_model: ui.SimpleFloatModel, index: int):
"""Set revert_img to correct state."""
val = val_model.as_float
self.revert_img.enabled = self.__default_vals[index] != val
def _build_body(self):
"""Main meat of the widget. Draw the multiple Fields with their
respective labels, and set up callbacks to keep them updated.
"""
with ui.HStack():
for i, (label, val) in enumerate(zip(self.__field_labels, self.__default_vals)):
with ui.HStack(spacing=3):
ui.Label(label, name="multi_attr_label", width=0)
model = ui.SimpleFloatModel(val)
# TODO: Hopefully fix height after Field padding bug is merged!
self.multifields.append(
ui.FloatField(model=model, name="multi_attr_field"))
if self.read_only:
self.multifields[i].enabled = False
if i < len(self.__default_vals) - 1:
# Only put space between fields and not after the last one
ui.Spacer(width=15)
for i, f in enumerate(self.multifields):
f.model.add_value_changed_fn(lambda v: self._on_value_changed(v, i))
def update(self, multi_values: list):
"""Update the widget."""
for i, f in enumerate(self.multifields):
f.model.as_float = multi_values[i] | 3,584 | Python | 39.280898 | 92 | 0.607422 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_color_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomColorWidget"]
from ctypes import Union
import re
from typing import List, Optional
import omni.ui as ui
from .custom_base_widget import CustomBaseWidget
from .style import BLOCK_HEIGHT
COLOR_PICKER_WIDTH = ui.Percent(35)
FIELD_WIDTH = ui.Percent(65)
COLOR_WIDGET_NAME = "color_block"
SPACING = 4
class CustomColorWidget(CustomBaseWidget):
"""The compound widget for color input. The color picker widget model converts
its 3 RGB values into a comma-separated string, to display in the StringField.
And vice-versa.
"""
def __init__(self, *args, model=None, **kwargs):
self.__defaults: List[Union[float, int]] = [a for a in args if a is not None]
self.__strfield: Optional[ui.StringField] = None
self.__colorpicker: Optional[ui.ColorWidget] = None
self.__color_sub = None
self.__strfield_sub = None
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.__strfield = None
self.__colorpicker = None
self.__color_sub = None
self.__strfield_sub = None
@property
def model(self) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.__colorpicker:
return self.__colorpicker.model
@model.setter
def model(self, value: ui.AbstractItemModel):
"""The widget's model"""
self.__colorpicker.model = value
@staticmethod
def simplify_str(val):
s = str(round(val, 3))
s_clean = re.sub(r'0*$', '', s) # clean trailing 0's
s_clean = re.sub(r'[.]$', '', s_clean) # clean trailing .
s_clean = re.sub(r'^0', '', s_clean) # clean leading 0
return s_clean
def set_color_stringfield(self, item_model: ui.AbstractItemModel,
children: List[ui.AbstractItem]):
"""Take the colorpicker model that has 3 child RGB values,
convert them to a comma-separated string, and set the StringField value
to that string.
Args:
item_model: Colorpicker model
children: child Items of the colorpicker
"""
field_str = ", ".join([self.simplify_str(item_model.get_item_value_model(c).as_float)
for c in children])
self.__strfield.model.set_value(field_str)
if self.revert_img:
self._on_value_changed()
def set_color_widget(self, str_model: ui.SimpleStringModel,
children: List[ui.AbstractItem]):
"""Parse the new StringField value and set the ui.ColorWidget
component items to the new values.
Args:
str_model: SimpleStringModel for the StringField
children: Child Items of the ui.ColorWidget's model
"""
joined_str = str_model.get_value_as_string()
for model, comp_str in zip(children, joined_str.split(",")):
comp_str_clean = comp_str.strip()
try:
self.__colorpicker.model.get_item_value_model(model).as_float = float(comp_str_clean)
except ValueError:
# Usually happens in the middle of typing
pass
def _on_value_changed(self, *args):
"""Set revert_img to correct state."""
default_str = ", ".join([self.simplify_str(val) for val in self.__defaults])
cur_str = self.__strfield.model.as_string
self.revert_img.enabled = default_str != cur_str
def _restore_default(self):
"""Restore the default values."""
if self.revert_img.enabled:
field_str = ", ".join([self.simplify_str(val) for val in self.__defaults])
self.__strfield.model.set_value(field_str)
self.revert_img.enabled = False
def _build_body(self):
"""Main meat of the widget. Draw the colorpicker, stringfield, and
set up callbacks to keep them updated.
"""
with ui.HStack(spacing=SPACING):
# The construction of the widget depends on what the user provided,
# defaults or a model
if self.existing_model:
# the user provided a model
self.__colorpicker = ui.ColorWidget(
self.existing_model,
width=COLOR_PICKER_WIDTH,
height=BLOCK_HEIGHT,
name=COLOR_WIDGET_NAME
)
color_model = self.existing_model
else:
# the user provided a list of default values
self.__colorpicker = ui.ColorWidget(
*self.__defaults,
width=COLOR_PICKER_WIDTH,
height=BLOCK_HEIGHT,
name=COLOR_WIDGET_NAME
)
color_model = self.__colorpicker.model
self.__strfield = ui.StringField(width=FIELD_WIDTH, name="attribute_color")
self.__color_sub = self.__colorpicker.model.subscribe_item_changed_fn(
lambda m, _, children=color_model.get_item_children():
self.set_color_stringfield(m, children))
self.__strfield_sub = self.__strfield.model.subscribe_value_changed_fn(
lambda m, children=color_model.get_item_children():
self.set_color_widget(m, children))
# show data at the start
self.set_color_stringfield(self.__colorpicker.model,
children=color_model.get_item_children())
| 6,076 | Python | 39.245033 | 101 | 0.59842 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_combobox_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomComboboxWidget"]
from typing import List, Optional
import omni.ui as ui
from .custom_base_widget import CustomBaseWidget
from .style import BLOCK_HEIGHT
class CustomComboboxWidget(CustomBaseWidget):
"""A customized combobox widget"""
def __init__(self,
model: ui.AbstractItemModel = None,
options: List[str] = None,
default_value=0,
**kwargs):
self.__default_val = default_value
self.__options = options or ["1", "2", "3"]
self.__combobox_widget = None
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.__options = None
self.__combobox_widget = None
@property
def model(self) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.__combobox_widget:
return self.__combobox_widget.model
@model.setter
def model(self, value: ui.AbstractItemModel):
"""The widget's model"""
self.__combobox_widget.model = value
def _on_value_changed(self, *args):
"""Set revert_img to correct state."""
model = self.__combobox_widget.model
index = model.get_item_value_model().get_value_as_int()
self.revert_img.enabled = self.__default_val != index
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.__combobox_widget.model.get_item_value_model().set_value(
self.__default_val)
self.revert_img.enabled = False
def _build_body(self):
"""Main meat of the widget. Draw the Rectangle, Combobox, and
set up callbacks to keep them updated.
"""
with ui.HStack():
with ui.ZStack():
# TODO: Simplify when borders on ComboBoxes work in Kit!
# and remove style rule for "combobox" Rect
# Use the outline from the Rectangle for the Combobox
ui.Rectangle(name="combobox",
height=BLOCK_HEIGHT)
option_list = list(self.__options)
self.__combobox_widget = ui.ComboBox(
0, *option_list,
name="dropdown_menu",
# Abnormal height because this "transparent" combobox
# has to fit inside the Rectangle behind it
height=10
)
# Swap for different dropdown arrow image over current one
with ui.HStack():
ui.Spacer() # Keep it on the right side
with ui.VStack(width=0): # Need width=0 to keep right-aligned
ui.Spacer(height=5)
with ui.ZStack():
ui.Rectangle(width=15, height=15, name="combobox_icon_cover")
ui.Image(name="collapsable_closed", width=12, height=12)
ui.Spacer(width=2) # Right margin
ui.Spacer(width=ui.Percent(30))
self.__combobox_widget.model.add_item_changed_fn(self._on_value_changed)
| 3,724 | Python | 37.010204 | 89 | 0.585124 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_path_button.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomPathButtonWidget"]
from typing import Callable, Optional
import omni.ui as ui
from .style import ATTR_LABEL_WIDTH, BLOCK_HEIGHT
class CustomPathButtonWidget:
"""A compound widget for holding a path in a StringField, and a button
that can perform an action.
TODO: Get text ellision working in the path field, to start with "..."
"""
def __init__(self,
label: str,
path: str,
btn_label: str,
btn_callback: Callable):
self.__attr_label = label
self.__pathfield: ui.StringField = None
self.__path = path
self.__btn_label = btn_label
self.__btn = None
self.__callback = btn_callback
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.__pathfield = None
self.__btn = None
self.__callback = None
self.__frame = None
@property
def model(self) -> Optional[ui.AbstractItem]:
"""The widget's model"""
if self.__pathfield:
return self.__pathfield.model
@model.setter
def model(self, value: ui.AbstractItem):
"""The widget's model"""
self.__pathfield.model = value
def get_path(self):
return self.model.as_string
def _build_fn(self):
"""Draw all of the widget parts and set up callbacks."""
with ui.HStack():
ui.Label(
self.__attr_label,
name="attribute_name",
width=ATTR_LABEL_WIDTH
)
self.__pathfield = ui.StringField(
name="path_field",
height=BLOCK_HEIGHT,
width=ui.Fraction(2),
)
# TODO: Add clippingType=ELLIPSIS_LEFT for long paths
self.__pathfield.model.set_value(self.__path)
self.__btn = ui.Button(
self.__btn_label,
name="tool_button",
height=BLOCK_HEIGHT,
width=ui.Fraction(1),
clicked_fn=lambda path=self.get_path(): self.__callback(path),
)
| 2,599 | Python | 30.707317 | 78 | 0.576376 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_slider_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomSliderWidget"]
from typing import Optional
import omni.ui as ui
from omni.ui import color as cl
from omni.ui import constant as fl
from .custom_base_widget import CustomBaseWidget
NUM_FIELD_WIDTH = 50
SLIDER_WIDTH = ui.Percent(100)
FIELD_HEIGHT = 22 # TODO: Once Field padding is fixed, this should be 18
SPACING = 4
TEXTURE_NAME = "slider_bg_texture"
class CustomSliderWidget(CustomBaseWidget):
"""A compound widget for scalar slider input, which contains a
Slider and a Field with text input next to it.
"""
def __init__(self,
model: ui.AbstractItemModel = None,
num_type: str = "float",
min=0.0,
max=1.0,
default_val=0.0,
display_range: bool = False,
**kwargs):
self.__slider: Optional[ui.AbstractSlider] = None
self.__numberfield: Optional[ui.AbstractField] = None
self.__min = min
self.__max = max
self.__default_val = default_val
self.__num_type = num_type
self.__display_range = display_range
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.__slider = None
self.__numberfield = None
@property
def model(self) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.__slider:
return self.__slider.model
@model.setter
def model(self, value: ui.AbstractItemModel):
"""The widget's model"""
self.__slider.model = value
self.__numberfield.model = value
def _on_value_changed(self, *args):
"""Set revert_img to correct state."""
if self.__num_type == "float":
index = self.model.as_float
else:
index = self.model.as_int
self.revert_img.enabled = self.__default_val != index
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.model.set_value(self.__default_val)
self.revert_img.enabled = False
def _build_display_range(self):
"""Builds just the tiny text range under the slider."""
with ui.HStack():
ui.Label(str(self.__min), alignment=ui.Alignment.LEFT, name="range_text")
if self.__min < 0 and self.__max > 0:
# Add middle value (always 0), but it may or may not be centered,
# depending on the min/max values.
total_range = self.__max - self.__min
# subtract 25% to account for end number widths
left = 100 * abs(0 - self.__min) / total_range - 25
right = 100 * abs(self.__max - 0) / total_range - 25
ui.Spacer(width=ui.Percent(left))
ui.Label("0", alignment=ui.Alignment.CENTER, name="range_text")
ui.Spacer(width=ui.Percent(right))
else:
ui.Spacer()
ui.Label(str(self.__max), alignment=ui.Alignment.RIGHT, name="range_text")
ui.Spacer(height=.75)
def _build_body(self):
"""Main meat of the widget. Draw the Slider, display range text, Field,
and set up callbacks to keep them updated.
"""
with ui.HStack(spacing=0):
# the user provided a list of default values
with ui.VStack(spacing=3, width=ui.Fraction(3)):
with ui.ZStack():
# Put texture image here, with rounded corners, then make slider
# bg be fully transparent, and fg be gray and partially transparent
with ui.Frame(width=SLIDER_WIDTH, height=FIELD_HEIGHT,
horizontal_clipping=True):
# Spacing is negative because "tileable" texture wasn't
# perfectly tileable, so that adds some overlap to line up better.
with ui.HStack(spacing=-12):
for i in range(50): # tiling the texture
ui.Image(name=TEXTURE_NAME,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
width=50,)
slider_cls = (
ui.FloatSlider if self.__num_type == "float" else ui.IntSlider
)
self.__slider = slider_cls(
height=FIELD_HEIGHT,
min=self.__min, max=self.__max, name="attr_slider"
)
if self.__display_range:
self._build_display_range()
with ui.VStack(width=ui.Fraction(1)):
model = self.__slider.model
model.set_value(self.__default_val)
field_cls = (
ui.FloatField if self.__num_type == "float" else ui.IntField
)
# Note: This is a hack to allow for text to fill the Field space more, as there was a bug
# with Field padding. It is fixed, and will be available in the next release of Kit.
with ui.ZStack():
# height=FIELD_HEIGHT-1 to account for the border, so the field isn't
# slightly taller than the slider
ui.Rectangle(
style_type_name_override="Field",
name="attr_field",
height=FIELD_HEIGHT - 1
)
with ui.HStack(height=0):
ui.Spacer(width=2)
self.__numberfield = field_cls(
model,
height=0,
style={
"background_color": cl.transparent,
"border_color": cl.transparent,
"padding": 4,
"font_size": fl.field_text_font_size,
},
)
if self.__display_range:
ui.Spacer()
model.add_value_changed_fn(self._on_value_changed)
| 6,797 | Python | 40.451219 | 105 | 0.529351 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/custom_base_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomBaseWidget"]
from typing import Optional
import omni.ui as ui
from .style import ATTR_LABEL_WIDTH
class CustomBaseWidget:
"""The base widget for custom widgets that follow the pattern of Head (Label),
Body Widgets, Tail Widget"""
def __init__(self, *args, model=None, **kwargs):
self.existing_model: Optional[ui.AbstractItemModel] = kwargs.pop("model", None)
self.revert_img = None
self.__attr_label: Optional[str] = kwargs.pop("label", "")
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.existing_model = None
self.revert_img = None
self.__attr_label = None
self.__frame = None
def __getattr__(self, attr):
"""Pretend it's self.__frame, so we have access to width/height and
callbacks.
"""
return getattr(self.__frame, attr)
def _build_head(self):
"""Build the left-most piece of the widget line (label in this case)"""
ui.Label(
self.__attr_label,
name="attribute_name",
width=ATTR_LABEL_WIDTH
)
def _build_body(self):
"""Build the custom part of the widget. Most custom widgets will
override this method, as it is where the meat of the custom widget is.
"""
ui.Spacer()
def _build_tail(self):
"""Build the right-most piece of the widget line. In this case,
we have a Revert Arrow button at the end of each widget line.
"""
with ui.HStack(width=0):
ui.Spacer(width=5)
with ui.VStack(height=0):
ui.Spacer(height=3)
self.revert_img = ui.Image(
name="revert_arrow",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
width=12,
height=13,
enabled=False,
)
ui.Spacer(width=5)
# call back for revert_img click, to restore the default value
self.revert_img.set_mouse_pressed_fn(
lambda x, y, b, m: self._restore_default())
def _build_fn(self):
"""Puts the 3 pieces together."""
with ui.HStack():
self._build_head()
self._build_body()
self._build_tail()
| 2,781 | Python | 32.518072 | 87 | 0.591514 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/numpy_utils.py | import numpy as np
import math
def quat_to_euler_angles(q, degrees: bool = False):
"""Convert quaternion to Euler XYZ angles.
Args:
q (np.ndarray): quaternion (w, x, y, z).
degrees (bool, optional): Whether output angles are in degrees. Defaults to False.
Returns:
np.ndarray: Euler XYZ angles.
"""
q = q.reshape(-1, 4)
w, x, y, z = q[:, 0], q[:, 1], q[:, 2], q[:, 3]
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x * x + y * y))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y * y + z * z))
if degrees:
roll = np.degrees(roll)
pitch = np.degrees(pitch)
yaw = np.degrees(yaw)
return np.stack([roll, pitch, yaw], axis=-1)
def euler_angles_to_quat(euler_angles: np.ndarray, degrees: bool = False) -> np.ndarray:
"""Convert Euler XYZ angles to quaternion.
Args:
euler_angles (np.ndarray): Euler XYZ angles.
degrees (bool, optional): Whether input angles are in degrees. Defaults to False.
Returns:
np.ndarray: quaternion (w, x, y, z).
"""
roll, pitch, yaw = euler_angles
if degrees:
roll = math.radians(roll)
pitch = math.radians(pitch)
yaw = math.radians(yaw)
cr = np.cos(roll / 2.0)
sr = np.sin(roll / 2.0)
cy = np.cos(yaw / 2.0)
sy = np.sin(yaw / 2.0)
cp = np.cos(pitch / 2.0)
sp = np.sin(pitch / 2.0)
w = (cr * cp * cy) + (sr * sp * sy)
x = (sr * cp * cy) - (cr * sp * sy)
y = (cr * sp * cy) + (sr * cp * sy)
z = (cr * cp * sy) - (sr * sp * cy)
return np.array([w, x, y, z])
def orientation_error(desired, current):
cc = quat_conjugate(current)
q_r = quat_mul(desired, cc)
return q_r[:, 0:3] * np.sign(q_r[:, 3])[:, None]
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = np.stack([x, y, z, w], axis=-1).reshape(shape)
return quat
def normalize(x, eps: float = 1e-9):
return x / np.clip(np.linalg.norm(x, axis=-1), a_min=eps, a_max=None)[:, None]
def quat_unit(a):
return normalize(a)
def quat_from_angle_axis(angle, axis):
theta = (angle / 2)[:, None]
xyz = normalize(axis) * np.sin(theta)
w = np.cos(theta)
return quat_unit(np.concatenate([xyz, w], axis=-1))
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0)[:, None]
b = np.cross(q_vec, v) * q_w[:, None] * 2.0
c = q_vec * np.sum(q_vec * v, axis=1).reshape(shape[0], -1) * 2.0
return a + b + c
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape)
def quat_axis(q, axis=0):
basis_vec = np.zeros((q.shape[0], 3))
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
| 3,329 | Python | 27.706896 | 90 | 0.50766 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova_gripper.py | from typing import List, Callable
import numpy as np
from omni.isaac.manipulators.grippers.gripper import Gripper
from omni.isaac.core.utils.types import ArticulationAction
import omni.kit.app
class KinovaGripper(Gripper):
def __init__(
self,
end_effector_prim_path: str,
joint_prim_names: List[str],
joint_opened_positions: np.ndarray,
joint_closed_positions: np.ndarray,
action_deltas: np.ndarray = None,
) -> None:
Gripper.__init__(self, end_effector_prim_path=end_effector_prim_path)
self._joint_prim_names = joint_prim_names
self._gripper_joint_num = 6
self._joint_dof_indicies = np.array([None] * self._gripper_joint_num)
self._joint_opened_positions = joint_opened_positions
self._joint_closed_positions = joint_closed_positions
self._get_joint_positions_func = None
self._set_joint_positions_func = None
self._action_deltas = action_deltas
self._articulation_num_dofs = None
self._close_ratio = 1.0
return
@property
def joint_opened_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively when opened.
"""
return self._joint_opened_positions
@property
def joint_closed_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively when closed.
"""
return self._joint_closed_positions
@property
def joint_dof_indicies(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint dof indices in the articulation of the left finger joint and the right finger joint respectively.
"""
return self._joint_dof_indicies
@property
def joint_prim_names(self) -> List[str]:
"""
Returns:
List[str]: the left finger joint prim name and the right finger joint prim name respectively.
"""
return self._joint_prim_names
def initialize(
self,
articulation_apply_action_func: Callable,
get_joint_positions_func: Callable,
set_joint_positions_func: Callable,
dof_names: List,
physics_sim_view: omni.physics.tensors.SimulationView = None,
) -> None:
"""Create a physics simulation view if not passed and creates a rigid prim view using physX tensor api.
This needs to be called after each hard reset (i.e stop + play on the timeline) before interacting with any
of the functions of this class.
Args:
articulation_apply_action_func (Callable): apply_action function from the Articulation class.
get_joint_positions_func (Callable): get_joint_positions function from the Articulation class.
set_joint_positions_func (Callable): set_joint_positions function from the Articulation class.
dof_names (List): dof names from the Articulation class.
physics_sim_view (omni.physics.tensors.SimulationView, optional): current physics simulation view. Defaults to None
Raises:
Exception: _description_
"""
Gripper.initialize(self, physics_sim_view=physics_sim_view)
self._get_joint_positions_func = get_joint_positions_func
self._articulation_num_dofs = len(dof_names)
for index in range(len(dof_names)):
if dof_names[index] in self._joint_prim_names:
which_index = self._joint_prim_names.index(dof_names[index])
self._joint_dof_indicies[which_index] = index
# make sure that all gripper dof names were resolved
if None in self._joint_dof_indicies:
raise Exception("Not all gripper dof names were resolved to dof handles and dof indices.")
self._articulation_apply_action_func = articulation_apply_action_func
current_joint_positions = get_joint_positions_func()
if self._default_state is None:
self._default_state = np.array(
[0.0] * self._gripper_joint_num
)
self._set_joint_positions_func = set_joint_positions_func
return
def open(self) -> None:
"""Applies actions to the articulation that opens the gripper (ex: to release an object held).
"""
self._articulation_apply_action_func(self.forward(action="open"))
return
def close(self) -> None:
"""Applies actions to the articulation that closes the gripper (ex: to hold an object).
"""
self._articulation_apply_action_func(self.forward(action="close"))
return
def set_action_deltas(self, value: np.ndarray) -> None:
"""
Args:
value (np.ndarray): deltas to apply for finger joint positions when openning or closing the gripper.
[left, right]. Defaults to None.
"""
self._action_deltas = value
return
def get_action_deltas(self) -> np.ndarray:
"""
Returns:
np.ndarray: deltas that will be applied for finger joint positions when openning or closing the gripper.
[left, right]. Defaults to None.
"""
return self._action_deltas
def set_default_state(self, joint_positions: np.ndarray) -> None:
"""Sets the default state of the gripper
Args:
joint_positions (np.ndarray): joint positions of the left finger joint and the right finger joint respectively.
"""
self._default_state = joint_positions
return
def get_default_state(self) -> np.ndarray:
"""Gets the default state of the gripper
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively.
"""
return self._default_state
def post_reset(self):
Gripper.post_reset(self)
self._set_joint_positions_func(
positions=self._default_state, joint_indices=list(self._joint_dof_indicies)
)
return
def set_joint_positions(self, positions: np.ndarray) -> None:
"""
Args:
positions (np.ndarray): joint positions of the left finger joint and the right finger joint respectively.
"""
self._set_joint_positions_func(
positions=positions, joint_indices=list(self._joint_dof_indicies)
)
return
def get_joint_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively.
"""
return self._get_joint_positions_func(joint_indices=list(self._joint_dof_indicies))
def forward(self, action: str) -> ArticulationAction:
"""calculates the ArticulationAction for all of the articulation joints that corresponds to "open"
or "close" actions.
Args:
action (str): "open" or "close" as an abstract action.
Raises:
Exception: _description_
Returns:
ArticulationAction: articulation action to be passed to the articulation itself
(includes all joints of the articulation).
"""
if action == "open":
target_joint_positions = [None] * self._articulation_num_dofs
if self._action_deltas is None:
for i in range(self._gripper_joint_num):
target_joint_positions[self._joint_dof_indicies[i]] = self._joint_opened_positions[i]
else:
current_joint_positions = self._get_joint_positions_func()
for i in range(self._gripper_joint_num):
current_finger_position = current_joint_positions[self._joint_dof_indicies[i]]
next_position = self.regulate_joint_position(
current_finger_position + self._action_deltas[i],
self._joint_opened_positions[i],
self._joint_closed_positions[i]
)
target_joint_positions[self._joint_dof_indicies[i]] = (
next_position
)
elif action == "close":
target_joint_positions = [None] * self._articulation_num_dofs
if self._action_deltas is None:
for i in range(self._gripper_joint_num):
target_joint_positions[self._joint_dof_indicies[i]] = self._joint_closed_positions[i] * self._close_ratio
else:
current_joint_positions = self._get_joint_positions_func()
for i in range(self._gripper_joint_num):
current_finger_position = current_joint_positions[self._joint_dof_indicies[i]]
next_position = self.regulate_joint_position(
current_finger_position - self._action_deltas[i],
self._joint_opened_positions[i],
self._joint_closed_positions[i]
)
target_joint_positions[self._joint_dof_indicies[i]] = (
next_position
)
else:
raise Exception("action {} is not defined for ParallelGripper".format(action))
# print("target_joint_positions", target_joint_positions)
return ArticulationAction(joint_positions=target_joint_positions)
def regulate_joint_position(self, joint_pos, open_pos, close_pos):
"""
Regulates the joint position to be within the range of the open and close positions.
"""
if open_pos > close_pos:
open_pos, close_pos = close_pos, open_pos
if joint_pos < open_pos:
joint_pos = open_pos
elif joint_pos > close_pos:
joint_pos = close_pos
return joint_pos
def apply_action(self, control_actions: ArticulationAction) -> None:
"""Applies actions to all the joints of an articulation that corresponds to the ArticulationAction of the finger joints only.
Args:
control_actions (ArticulationAction): ArticulationAction for the left finger joint and the right finger joint respectively.
"""
joint_actions = ArticulationAction()
if control_actions.joint_positions is not None:
joint_actions.joint_positions = [None] * self._articulation_num_dofs
for i in range(self._gripper_joint_num):
joint_actions.joint_positions[self._joint_dof_indicies[i]] = control_actions.joint_positions[i]
# if control_actions.joint_velocities is not None:
# joint_actions.joint_velocities = [None] * self._articulation_num_dofs
# joint_actions.joint_velocities[self._joint_dof_indicies[0]] = control_actions.joint_velocities[0]
# joint_actions.joint_velocities[self._joint_dof_indicies[1]] = control_actions.joint_velocities[1]
# if control_actions.joint_efforts is not None:
# joint_actions.joint_efforts = [None] * self._articulation_num_dofs
# joint_actions.joint_efforts[self._joint_dof_indicies[0]] = control_actions.joint_efforts[0]
# joint_actions.joint_efforts[self._joint_dof_indicies[1]] = control_actions.joint_efforts[1]
self._articulation_apply_action_func(control_actions=joint_actions)
return
def set_close_ratio(self, ratio):
"""
Sets the ratio of the closed position of the gripper.
"""
self._close_ratio = ratio | 11,753 | Python | 42.372694 | 135 | 0.610568 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow_controller.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.motion_generation as mg
from omni.isaac.core.articulations import Articulation
from .utils import process_policy_config, EXTENSION_FOLDER_PATH
class RMPFlowController(mg.MotionPolicyController):
"""[summary]
Args:
name (str): [description]
robot_articulation (Articulation): [description]
physics_dt (float, optional): [description]. Defaults to 1.0/60.0.
"""
def __init__(self, name: str, robot_articulation: Articulation, physics_dt: float = 1.0 / 60.0) -> None:
# print("EXTENSION_FOLDER_PATH: ", EXTENSION_FOLDER_PATH)
self.rmp_flow_config = process_policy_config(EXTENSION_FOLDER_PATH + "/control/kinova/rmpflow/config7.json")
self.rmp_flow = mg.lula.motion_policies.RmpFlow(**self.rmp_flow_config)
self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmp_flow, physics_dt)
mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp)
self._default_position, self._default_orientation = (
self._articulation_motion_policy._robot_articulation.get_world_pose()
)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
return
def reset(self):
mg.MotionPolicyController.reset(self)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
| 2,003 | Python | 43.533332 | 116 | 0.707439 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova_socket.py | # send message to Kinova Server to control the real robot
import socket
class KinovaClient():
def __init__(self, HOST = "localhost", PORT = 9999) -> None:
# SOCK_DGRAM is the socket type to use for UDP sockets
self.host = HOST
self.port = PORT
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# send a test message to the server
message = "Hello: server!"
self.sock.sendto(message.encode(), (self.host, self.port))
self.sock.settimeout(10)
# wait for a response from the server
data, addr = self.sock.recvfrom(1024)
print("Socket Server and Client initialized")
# # check if the response is correct
# if data.decode() == "Hello, client!":
# print("Connected to UDPServer")
# else:
# print("Failed to connect to UDPServer")
def send_message(self, command: str, message: str):
print("Sent: {}".format(message))
self.sock.sendto(bytes(command + ":" + message + "\n", "utf-8"), (self.host, self.port))
received = str(self.sock.recv(1024), "utf-8")
print("received: {}".format(received))
return received | 1,222 | Python | 36.060605 | 96 | 0.590835 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/utils.py | import os
import json
import numpy as np
from pathlib import Path
import omni.kit.app
import omni.usd
from pxr import UsdGeom, Gf, Usd, UsdPhysics
EXTENSION_FOLDER_PATH = str(Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
).resolve())
def fix_damping_and_stiffness(kinova_path = "/World/kinova_gen3_7_hand/kinova", stiffness = 1e3, damping = 1e6):
print("fixing damping and stiffness")
# stiffness_name = "drive:angular:physics:stiffness"
# damping_name = "drive:angular:physics:damping"
stage = omni.usd.get_context().get_stage()
joint_prim_paths = [
"/base_link/Actuator1",
"/shoulder_link/Actuator2",
"/half_arm_1_link/Actuator3",
"/half_arm_2_link/Actuator4",
"/forearm_link/Actuator5",
"/spherical_wrist_1_link/Actuator6",
"/spherical_wrist_2_link/Actuator7",
]
for joint_prim_path in joint_prim_paths:
joint_prim = stage.GetPrimAtPath(kinova_path + joint_prim_path)
joint_driver = UsdPhysics.DriveAPI.Get(joint_prim, "angular")
joint_driver.GetStiffnessAttr().Set(stiffness)
joint_driver.GetDampingAttr().Set(damping)
def process_policy_config(mg_config_file):
"""
Process the policy config file to get the absolute path of the assets"""
mp_config_dir = os.path.dirname(mg_config_file)
with open(mg_config_file) as config_file:
config = json.load(config_file)
rel_assets = config.get("relative_asset_paths", {})
for k, v in rel_assets.items():
config[k] = os.path.join(mp_config_dir, v)
del config["relative_asset_paths"]
return config
def regulate_degree(degree: float, min_value: float = 0, max_value: float = 360, indegree: bool = True):
"""
Regulate the degree to be in the range of [min_value, max_value]"""
if not indegree:
degree = np.rad2deg(degree)
if degree < min_value:
degree += 360
elif degree > max_value:
degree -= 360
return degree
def get_transform_mat_from_pos_rot(p, q):
"""
Get transform matrix from position and rotation
"""
trans = Gf.Transform()
rotation = Gf.Rotation(Gf.Quatd(float(q[0]), float(q[1]), float(q[2]), float(q[3])))
trans.SetRotation(rotation)
trans.SetTranslation(Gf.Vec3d(float(p[0]), float(p[1]), float(p[2])))
return trans.GetMatrix()
def get_prim_pickup_transform(stage, prim_path: str, offset: Gf.Vec3d):
"""
Get the pickup transform of the prim with offset"""
prim = stage.GetPrimAtPath(prim_path)
xformable = UsdGeom.Xformable(prim)
mat0 = xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
target_pos = mat0.ExtractTranslation()
xaxis = -offset / offset.GetLength()
yaxis = Gf.Cross(Gf.Vec3d(0, 0, 1), xaxis)
m = Gf.Matrix4d()
m.SetRow(0, Gf.Vec4d(xaxis[0], yaxis[0], 0, 0.0))
m.SetRow(1, Gf.Vec4d(xaxis[1], yaxis[1], 0, 0.0))
m.SetRow(2, Gf.Vec4d(xaxis[2], yaxis[2], 1, 0.0))
m.SetRow(3, Gf.Vec4d(0.0, 0.0, 0.0, 1.0))
eye_pos = target_pos + offset
m = m * Gf.Matrix4d().SetTranslate(eye_pos)
print("translation: ", eye_pos)
print("rotation: ", m.ExtractRotationQuat())
return eye_pos, m.ExtractRotationQuat()
def generate_slerp_action_sequence(ori_pos, ori_quat, rel_rot,
sub_steps = 5, sub_duration = 50,
slerp_last = True, slerp_offset = [0 ,0, 0]):
"""
Generate slerp action sequence from relative position and rotation
"""
slerp_action_sequence = []
ori_pos = Gf.Vec3d(ori_pos[0], ori_pos[1], ori_pos[2])
rel_quat = Gf.Quatd(float(rel_rot[0]), float(rel_rot[1]), float(rel_rot[2]), float(rel_rot[3])).GetNormalized()
ori_quat = Gf.Quatd(float(ori_quat[0]), float(ori_quat[1]), float(ori_quat[2]), float(ori_quat[3])).GetNormalized()
identity_quat = Gf.Quatd(1, 0, 0, 0)
for i in range(1, sub_steps):
t = (i + int(slerp_last)) / sub_steps
quat_rel = Gf.Slerp(t, identity_quat, rel_quat)
p = (quat_rel * Gf.Quatd(0, ori_pos + Gf.Vec3d(*slerp_offset) * i) * quat_rel.GetInverse()).GetImaginary()
q = quat_rel * ori_quat
slerp_action_sequence.append(
{
'action_type': 'move',
'duration': sub_duration,
'position': [p[0], p[1], p[2]],
'orientation': [q.GetReal(), q.GetImaginary()[0], q.GetImaginary()[1], q.GetImaginary()[2]]
},
)
return slerp_action_sequence | 4,652 | Python | 36.224 | 119 | 0.60877 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova.py | from typing import Optional, List
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
import carb
from pxr import UsdPhysics
from .kinova_gripper import KinovaGripper
class Kinova(Robot):
def __init__(
self,
prim_path: str = "/World/kinova",
name: str = "kinova_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
deltas: Optional[np.ndarray] = None,
) -> None:
# self.prim_path = prim_path
prim = get_prim_at_path(prim_path)
assert prim.IsValid(), "Please load Kinova into the environment first"
self._end_effector = None
self._gripper = None
self._end_effector_prim_name = end_effector_prim_name
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
self._end_effector_prim_path = prim_path + "/robotiq_85_base_link"
gripper_dof_names = [
"finger_joint", "right_outer_knuckle_joint",
"left_inner_knuckle_joint", "right_inner_knuckle_joint",
#"left_outer_finger_joint", "right_outer_finger_joint",
"left_inner_finger_joint", "right_inner_finger_joint",
]
gripper_open_position = np.zeros(6)
gripper_closed_position = np.array([0.8757, -0.8757, 0.8757, -0.8757, -0.8757, 0.8757])
deltas = None # -gripper_closed_position / 5.0
self._gripper = KinovaGripper(
end_effector_prim_path=self._end_effector_prim_path,
joint_prim_names=gripper_dof_names,
joint_opened_positions=gripper_open_position,
joint_closed_positions=gripper_closed_position,
action_deltas=deltas,
)
return
@property
def end_effector(self) -> RigidPrim:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._end_effector
@property
def gripper(self) -> KinovaGripper:
"""[summary]
Returns:
ParallelGripper: [description]
"""
return self._gripper
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
super().initialize(physics_sim_view)
self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector")
self._end_effector.initialize(physics_sim_view)
self._gripper.initialize(
physics_sim_view=physics_sim_view,
articulation_apply_action_func=self.apply_action,
get_joint_positions_func=self.get_joint_positions,
set_joint_positions_func=self.set_joint_positions,
dof_names=self.dof_names,
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
self._gripper.post_reset()
for i in range(self.gripper._gripper_joint_num):
self._articulation_controller.switch_dof_control_mode(
dof_index=self.gripper.joint_dof_indicies[i], mode="position"
)
return
def fix_damping_and_stiffness(prim_path = "/World/kinova_gen3_7_hand/kinova", stiffness = 1e3, damping = 1e6):
print("fixing damping and stiffness")
# stiffness_name = "drive:angular:physics:stiffness"
# damping_name = "drive:angular:physics:damping"
joint_prim_paths = [
"/base_link/Actuator1",
"/shoulder_link/Actuator2",
"/half_arm_1_link/Actuator3",
"/half_arm_2_link/Actuator4",
"/forearm_link/Actuator5",
"/spherical_wrist_1_link/Actuator6",
"/spherical_wrist_2_link/Actuator7",
]
for joint_prim_path in joint_prim_paths:
joint_prim = get_prim_at_path(prim_path + joint_prim_path)
joint_driver = UsdPhysics.DriveAPI.Get(joint_prim, "angular")
joint_driver.GetStiffnessAttr().Set(stiffness)
joint_driver.GetDampingAttr().Set(damping)
| 4,842 | Python | 36.835937 | 116 | 0.584882 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/coffee_config.py | # task config for making coffee
kinova_action_config = {
"go_home": {
'base_prim': None,
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0.45666, 0.0, 0.43371],
'orientation': [0.5, 0.5, 0.5, 0.5], # wxyz
}
]
},
"open_coffee_machine_handle": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0, -0.4, 0],
'orientation': [-0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.29, 0],
'orientation': [-0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 100,
'ratio': 0.8,
},
# {
# 'action_type': 'slerp',
# 'duration': 300,
# 'sub_steps': 10,
# 'position': [0, -0.28, 0],
# 'orientation': [-0.5, 0.5, 0.5, 0.5],
# 'relative_rotation': [0.7372773, -0.6755902, 0, 0],
# 'slerp_last': True,
# 'slerp_offset': [0, -0.001, 0]
# },
# {
# 'action_type': 'close', #open
# 'duration': 100,
# 'ratio': 0.05,
# },
{
'action_type': 'move',
'duration': 200,
'position': [0.3, -0.5, 0.3],
'orientation': [0, 0.7071, 0.7071, 0],
},
]
},
"close_coffee_machine_handle": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'close', #open
'duration': 30,
'ratio': 0.95,
},
{
'action_type': 'move',
'duration': 200,
'position': [0, 0, 0.27],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'slerp',
'duration': 140,
'sub_steps': 7,
'position': [0, 0, 0.27],
'orientation': [0, 0.7071, 0.7071, 0],
'relative_rotation': [0.7372773, 0.675590, 0, 0],
'slerp_last': False,
'slerp_offset': [0, 0, 0]
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.35],
'orientation': [-0.4545, 0.5416, 0.5416, 0.4545],
},
]
},
"press_coffee_machine_button": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'close', #open
'duration': 30,
'ratio': 1.1,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.09, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 30,
'position': [-0, -0.09, 0.18],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 50,
'position': [-0, -0.09, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 30,
'position': [-0, -0.3, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
]
},
"pick_up_capsule": {
'base_prim': '/World/k_cup',
'steps':[
{
'action_type': 'move',
'duration': 300,
'position': [-0.12, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.1],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.03],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 30,
'ratio': 0.6,
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
]
},
"pick_up_papercup": {
'base_prim': '/World/papercup',
'steps':[
{
'action_type': 'move',
'duration': 300,
'position': [-0.15, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.1],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.00],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 60,
'ratio': 0.4,
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
]
},
"move_capsule_to_coffee_machine": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.02],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 60,
'position': [0, -0.218, 0.02],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'close', #open
'duration': 60,
'ratio': 0.05,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.025],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.2, 0.4],
'orientation': [0, 0, 0.7071, 0.7071],
},
]
},
"move_papercup_to_coffee_machine": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0, -0.4, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.25, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'close', #open
'duration': 100,
'ratio': 0.05,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.4, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.4, 0.4],
'orientation': [0, 0, 0.7071, 0.7071],
},
]
},
##################################### blendid #########################################
"pick_up_box": {
'base_prim': '/World/tea_tower',
'steps':[
{
'action_type': 'close', #open
'duration': 50,
'ratio': 0.0,
},
{
'action_type': 'move',
'duration': 200,
'position': [-0.38, 0.0, 0.15],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.28, 0.0, 0.15],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
# {
# 'action_type': 'move',
# 'duration': 100,
# 'position': [-0.15, 0.0, 0.00],
# 'orientation': [0.5, 0.5, 0.5, 0.5],
# },
# {
# 'action_type': 'close',
# 'duration': 60,
# 'ratio': 0.4,
# },
# {
# 'action_type': 'move',
# 'duration': 100,
# 'position': [-0.15, 0.0, 0.3],
# 'orientation': [0.5, 0.5, 0.5, 0.5],
# },
]
},
} | 9,505 | Python | 29.565916 | 91 | 0.312151 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/coffee_controller.py | import omni.usd
from omni.isaac.core.controllers import BaseController
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.core.prims import XFormPrim
from .kinova import Kinova
from .rmpflow_controller import RMPFlowController
import numpy as np
from .numpy_utils import *
from .utils import regulate_degree, get_transform_mat_from_pos_rot, generate_slerp_action_sequence
import asyncio
from .kinova_socket import KinovaClient
from .coffee_config import kinova_action_config
class CoffeeMakerController(BaseController):
def __init__(self, name: str, kinova: Kinova, connect_server = False) -> None:
BaseController.__init__(self, name=name)
# env
self.stage = omni.usd.get_context().get_stage()
# event
self.event = "move" # action event
self.total_event_count = 0 # event time
self.event_elapsed = 0 # event elapsed time
self.event_pool = [] # event pool
self.robot = kinova
self.gripper = self.robot.gripper
self.cs_controller = RMPFlowController(name="cspace_controller", robot_articulation=self.robot)
# TODO:find height
self.ee_pos_target = np.array([0.0, 0.0, 1.0])
self.ee_ori_target = np.array([1.0, 0.0, 0.0, 0.0])
# connection
self.connect_server = connect_server
if connect_server:
self.client = KinovaClient()
self.sending_message = False
# add go home default action
# self.apply_high_level_action()
# self.sending_message = False
def add_event_to_pool(self, event: str, elapsed: int,
ee_pos: np.ndarray, ee_ori: np.ndarray, gripper_ratio: float = 1.0):
self.event_pool.append([event, elapsed, ee_pos, ee_ori, gripper_ratio])
def update_ee_target(self, pos, ori):
"""
Update End-Effector Target position and orientation
"""
self.ee_pos_target = pos
self.ee_ori_target = ori
def update_event(self, event: str):
"""
Update robot high-level event
"""
if event != self.event:
self.event = event
self.total_event_count = 0
################################## sync robot ##################################
def synchronize_robot(self):
"""
Send message to the Server to
"""
if not self.sending_message:
# get joint positions and gripper degree
all_positions = self.robot.get_joint_positions()
gripper_degree = all_positions[7] / 0.8757
joint_positions = [regulate_degree(e, indegree=False) for e in all_positions[:7]]
joint_positions = joint_positions + [gripper_degree]
assert len(joint_positions) == 8, "Invalid number of joint positions"
# send message
message = " ".join([str(e) for e in joint_positions])
self.sending_message = True
self.client.send_message("Control", message)
self.sending_message = False
def obtain_robot_state(self):
"""
Get robot state from the Server
"""
if not self.sending_message:
self.sending_message = True
answer_message = self.client.send_message("GetJoints", "NA")
self.sending_message = False
return [float(e) for e in answer_message.split(" ")]
def apply_high_level_action(self, action_name: str = "go_home"):
"""
Apply high-level action to the robot
"""
action = kinova_action_config[action_name]
if action['base_prim'] is None:
base_world_pos, base_world_rot = self.robot.get_world_pose()
else:
base_prim = XFormPrim(action['base_prim'])
base_world_pos, base_world_rot = base_prim.get_world_pose()
base_mat = get_transform_mat_from_pos_rot(base_world_pos, base_world_rot)
print("base_mat", base_mat)
for action_step in action['steps']:
step_type = action_step['action_type']
duration = action_step['duration']
if step_type == "move":
offset_mat = get_transform_mat_from_pos_rot(action_step['position'], action_step['orientation'])
print("offset_mat", offset_mat)
target_mat = offset_mat * base_mat
print("target_mat", target_mat.ExtractTranslation(), target_mat.ExtractRotationQuat())
target_pos = target_mat.ExtractTranslation()
target_rot = target_mat.ExtractRotationQuat()
pos_array = np.array([target_pos[0], target_pos[1], target_pos[2]])
rot_array = np.array([target_rot.GetReal(), target_rot.GetImaginary()[0], target_rot.GetImaginary()[1], target_rot.GetImaginary()[2]])
self.add_event_to_pool(step_type, duration, pos_array, rot_array)
elif step_type in ["close", "open"]:
gripper_ratio = action_step['ratio']
self.add_event_to_pool(step_type, duration, None, None, gripper_ratio)
elif step_type == "slerp":
slerp_action_sequence = generate_slerp_action_sequence(
action_step['position'],
action_step['orientation'],
action_step['relative_rotation'],
sub_steps=action_step['sub_steps'],
sub_duration=action_step['duration'] // action_step['sub_steps'],
slerp_last=action_step['slerp_last'],
slerp_offset=action_step['slerp_offset']
)
print("action_sequence", slerp_action_sequence)
for sub_action in slerp_action_sequence:
offset_mat = get_transform_mat_from_pos_rot(sub_action['position'], sub_action['orientation'])
target_mat = offset_mat * base_mat
target_pos = target_mat.ExtractTranslation()
target_rot = target_mat.ExtractRotationQuat()
pos_array = np.array([target_pos[0], target_pos[1], target_pos[2]])
rot_array = np.array([target_rot.GetReal(), target_rot.GetImaginary()[0], target_rot.GetImaginary()[1], target_rot.GetImaginary()[2]])
self.add_event_to_pool(sub_action['action_type'], sub_action['duration'], pos_array, rot_array)
def forward(self):
"""
Main function to update the robot
"""
# update event
if len(self.event_pool) > 0:
if self.event_elapsed <= 0:
event, elapsed, ee_pos, ee_ori, gripper_ratio = self.event_pool.pop(0)
print("event, elapsed, ee_pos, ee_ori ", event, elapsed, ee_pos, ee_ori, gripper_ratio)
self.update_event(event)
if self.event == "move":
self.update_ee_target(ee_pos, ee_ori)
elif self.event == "close":
self.gripper.set_close_ratio(gripper_ratio)
if self.connect_server:
self.synchronize_robot()
self.event_elapsed = elapsed
else:
if self.connect_server:
if self.total_event_count > 200 and self.total_event_count % (60 * 3) == 0:
self.synchronize_robot()
# print("coffee control event", self.event, self.event_elapsed)
if self.event == "move":
actions = self.cs_controller.forward(
target_end_effector_position=self.ee_pos_target,
target_end_effector_orientation=self.ee_ori_target)
elif self.event == "close":
actions = self.gripper.forward(action="close")
elif self.event == "open":
actions = self.gripper.forward(action="open")
self.robot.apply_action(actions)
# from omni.isaac.core.utils.types import ArticulationAction
# joint_actions = ArticulationAction()
# joint_actions.joint_positions = [0, 15, 180, -130, 0, 55, 90] + [0.8] * 6
# for i in range(13):
# joint_actions.joint_positions[i] = np.deg2rad(joint_actions.joint_positions[i])
# print("joint_actions", joint_actions)
# self.robot.apply_action(joint_actions)
self.total_event_count += 1 # update event time
self.event_elapsed -= 1 # update event elapsed time
# synchronize
# if self.connect_server:
# if self.total_event_count % 60 == 0:
# self.synchronize_robot()
# return actions
| 8,724 | Python | 39.393518 | 154 | 0.568317 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/validator.py | # validate real and simulated robot
class Validator():
def __init__(self, robot, robot_client) -> None:
"""
Initialize the validator
"""
self.robot = robot
self.robot_client = robot_client
| 246 | Python | 16.642856 | 52 | 0.552846 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow/kinova_rmpflow_common.yaml | # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
joint_limit_buffers: [.01, .01, .01, .01, .01, .01]
# Note: metric_weight and metric_scalar mean the same thing. Set to zero to turn it off.
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 0.
cspace_trajectory_rmp: # Note: unused
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp: # Note: unused
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp: # Note: Less important
max_velocity: 4. # max_xd
velocity_damping_region: 1.5
damping_gain: 1000.0
metric_weight: 0.
target_rmp:
accel_p_gain: 100.
accel_d_gain: 400.
accel_norm_eps: .025
# TODO: meters
metric_alpha_length_scale: .001
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
# TODO: meters
proximity_metric_boost_length_scale: .0025
xi_estimator_gate_std_dev: 20000. # unused
accept_user_weights: false # Values >= .5 are true and < .5 are false
axis_target_rmp: # Note: Won't be used for end effector position control
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
# TODO: meters
proximity_metric_boost_length_scale: .01
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp: # Note import if no obstacles
damping_gain: 50.
# TODO: meters
damping_std_dev: .005
damping_robustness_eps: 1e-2
# TODO: meters
damping_velocity_gate_length_scale: .001
repulsion_gain: 800.
# TODO: meters
repulsion_std_dev: .001
# TODO: meters
metric_modulation_radius: .05
metric_scalar: 10000. # Real value should be this.
#metric_scalar: 0. # Turns off collision avoidance.
# TODO: meters
metric_exploder_std_dev: .0025
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 0.
canonical_resolve:
max_acceleration_norm: 5000. # TODO: try setting much larger
projection_tolerance: .01
verbose: false
body_cylinders:
- name: base_stem
pt1: [0,0,.333]
pt2: [0,0,0.]
radius: .05
body_collision_controllers:
- name: Base_Link
radius: .05
- name: Shoulder_Link
radius: .05
- name: Bicep_Link
radius: .05
- name: ForeArm_Link
radius: .05
- name: SphericalWrist1_Link
radius: .05
- name: SphericalWrist2_Link
radius: .05
| 3,556 | YAML | 29.930435 | 88 | 0.615017 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow/kinova_rmpflow_common7.yaml | # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
joint_limit_buffers: [.01, .01, .01, .01, .01, .01, .01]
# Note: metric_weight and metric_scalar mean the same thing. Set to zero to turn it off.
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 0.
cspace_trajectory_rmp: # Note: unused
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp: # Note: unused
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp: # Note: Less important
max_velocity: 4 # max_xd
velocity_damping_region: 1.5
damping_gain: 1000.0
metric_weight: 0.
target_rmp:
accel_p_gain: 100.
accel_d_gain: 400.
accel_norm_eps: .025
# TODO: meters
metric_alpha_length_scale: .001
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
# TODO: meters
proximity_metric_boost_length_scale: .0025
xi_estimator_gate_std_dev: 20000. # unused
accept_user_weights: false # Values >= .5 are true and < .5 are false
axis_target_rmp: # Note: Won't be used for end effector position control
accel_p_gain: 210.
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
# TODO: meters
proximity_metric_boost_length_scale: .01
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp: # Note import if no obstacles
damping_gain: 50.
# TODO: meters
damping_std_dev: .005
damping_robustness_eps: 1e-2
# TODO: meters
damping_velocity_gate_length_scale: .001
repulsion_gain: 800.
# TODO: meters
repulsion_std_dev: .001
# TODO: meters
metric_modulation_radius: .05
metric_scalar: 10000. # Real value should be this.
#metric_scalar: 0. # Turns off collision avoidance.
# TODO: meters
metric_exploder_std_dev: .0025
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 0.
canonical_resolve:
max_acceleration_norm: 5000. # TODO: try setting much larger
projection_tolerance: .01
verbose: false
body_cylinders:
- name: base_stem
pt1: [0,0,.333]
pt2: [0,0,0.]
radius: .05
body_collision_controllers:
- name: base_link
radius: .05
- name: shoulder_link
radius: .05
- name: half_arm_1_link
radius: .05
- name: half_arm_2_link
radius: .05
- name: forearm_link
radius: .05
- name: spherical_wrist_1_link
radius: .05
- name: spherical_wrist_2_link
radius: .05
| 3,615 | YAML | 29.905983 | 88 | 0.613001 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow/robot_descriptor.yaml | # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# The robot descriptor defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF, except when otherwise specified below under
# cspace_urdf_bridge
cspace:
- Actuator1
- Actuator2
- Actuator3
- Actuator4
- Actuator5
- Actuator6
root_link: Base_Link
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00, 0.00
]
acceleration_limits: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
jerk_limits: [150.0, 150.0, 150.0, 150.0, 150.0, 150.0] | 1,026 | YAML | 31.093749 | 79 | 0.739766 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow/robot_descriptor7.yaml | # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# The robot descriptor defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF, except when otherwise specified below under
# cspace_urdf_bridge
cspace:
- Actuator1
- Actuator2
- Actuator3
- Actuator4
- Actuator5
- Actuator6
- Actuator7
root_link: base_link
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00
]
acceleration_limits: [1.0, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0]
jerk_limits: [150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150] | 1,061 | YAML | 31.181817 | 79 | 0.734213 |
DigitalBotLab/Robots/robot-exts-control/exts/control/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "control"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import control".
[[python.module]]
name = "control"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,547 | TOML | 31.249999 | 118 | 0.743374 |
DigitalBotLab/Robots/robot-exts-control/exts/control/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
DigitalBotLab/Robots/robot-exts-control/exts/control/docs/README.md | # Python Extension Example [control]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 166 | Markdown | 32.399994 | 126 | 0.783133 |
DigitalBotLab/Robots/robot-exts-control/exts/control/docs/index.rst | control
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"control"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 315 | reStructuredText | 14.047618 | 43 | 0.603175 |
DigitalBotLab/Robots/RobotServices/kinova_control.py | import sys
import os
import time
import threading
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.messages import Base_pb2, BaseCyclic_pb2, Common_pb2
# Maximum allowed waiting time during actions (in seconds)
TIMEOUT_DURATION = 20
# Create closure to set an event after an END or an ABORT
def check_for_end_or_abort(e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check
def angular_action_movement(base, joint_angles):
print("Starting angular action movement ...")
action = Base_pb2.Action()
action.name = "Example angular action movement"
action.application_data = ""
actuator_count = base.GetActuatorCount()
# move to specified location
for joint_id in range(actuator_count.count):
joint_angle = action.reach_joint_angles.joint_angles.joint_angles.add()
joint_angle.joint_identifier = joint_id
joint_angle.value = joint_angles[joint_id]
e = threading.Event()
notification_handle = base.OnNotificationActionTopic(
check_for_end_or_abort(e),
Base_pb2.NotificationOptions()
)
print("Executing action")
base.ExecuteAction(action)
print("Waiting for movement to finish ...")
finished = e.wait(TIMEOUT_DURATION)
base.Unsubscribe(notification_handle)
for joint_id in range(actuator_count.count):
joint_angle = action.reach_joint_angles.joint_angles.joint_angles
# print("joint_angle: ", joint_angle)
if finished:
print("Angular movement completed")
else:
print("Timeout on action notification wait")
return finished
def cartesian_action_movement(base, base_cyclic):
print("Starting Cartesian action movement ...")
action = Base_pb2.Action()
action.name = "Example Cartesian action movement"
action.application_data = ""
feedback = base_cyclic.RefreshFeedback()
cartesian_pose = action.reach_pose.target_pose
cartesian_pose.x = feedback.base.tool_pose_x # (meters)
cartesian_pose.y = feedback.base.tool_pose_y - 0.1 # (meters)
cartesian_pose.z = feedback.base.tool_pose_z - 0.2 # (meters)
cartesian_pose.theta_x = feedback.base.tool_pose_theta_x # (degrees)
cartesian_pose.theta_y = feedback.base.tool_pose_theta_y # (degrees)
cartesian_pose.theta_z = feedback.base.tool_pose_theta_z # (degrees)
e = threading.Event()
notification_handle = base.OnNotificationActionTopic(
check_for_end_or_abort(e),
Base_pb2.NotificationOptions()
)
print("Executing action")
base.ExecuteAction(action)
print("Waiting for movement to finish ...")
finished = e.wait(TIMEOUT_DURATION)
base.Unsubscribe(notification_handle)
if finished:
print("Cartesian movement completed")
else:
print("Timeout on action notification wait")
return finished
def GripperCommand(base, target_position):
# Create the GripperCommand we will send
gripper_command = Base_pb2.GripperCommand()
finger = gripper_command.gripper.finger.add()
# Close the gripper with position increments
gripper_command.mode = Base_pb2.GRIPPER_POSITION
finger.finger_identifier = 1
finger.value = target_position
# print("Going to position {:0.2f}...".format(finger.value))
base.SendGripperCommand(gripper_command)
return True
class GripperCommandExample:
def __init__(self, router, proportional_gain = 2.0):
self.proportional_gain = proportional_gain
self.router = router
# Create base client using TCP router
self.base = BaseClient(self.router)
def ExampleSendGripperCommands(self, target_position):
# Create the GripperCommand we will send
gripper_command = Base_pb2.GripperCommand()
finger = gripper_command.gripper.finger.add()
# Close the gripper with position increments
gripper_command.mode = Base_pb2.GRIPPER_POSITION
finger.finger_identifier = 1
finger.value = target_position
# print("Going to position {:0.2f}...".format(finger.value))
self.base.SendGripperCommand(gripper_command)
return True
# # Set speed to open gripper
# print ("Opening gripper using speed command...")
# gripper_command.mode = Base_pb2.GRIPPER_SPEED
# finger.value = 0.1
# self.base.SendGripperCommand(gripper_command)
# gripper_request = Base_pb2.GripperRequest()
# # Wait for reported position to be opened
# gripper_request.mode = Base_pb2.GRIPPER_POSITION
# while True:
# gripper_measure = self.base.GetMeasuredGripperMovement(gripper_request)
# if len (gripper_measure.finger):
# print("Current position is : {0}".format(gripper_measure.finger[0].value))
# if gripper_measure.finger[0].value < 0.01:
# break
# else: # Else, no finger present in answer, end loop
# break
# # Set speed to close gripper
# print ("Closing gripper using speed command...")
# gripper_command.mode = Base_pb2.GRIPPER_SPEED
# finger.value = -0.1
# self.base.SendGripperCommand(gripper_command)
# # Wait for reported speed to be 0
# gripper_request.mode = Base_pb2.GRIPPER_SPEED
# while True:
# gripper_measure = self.base.GetMeasuredGripperMovement(gripper_request)
# if len (gripper_measure.finger):
# print("Current speed is : {0}".format(gripper_measure.finger[0].value))
# if gripper_measure.finger[0].value == 0.0:
# break
# else: # Else, no finger present in answer, end loop
# break
class GripperFeedback:
def __init__(self, base, base_cyclic, proportional_gain = 2.0, force_min = 10, force_max = 30):
"""
GripperFeedback class constructor.
Inputs:
kortex_api.RouterClient router: TCP router
kortex_api.RouterClient router_real_time: Real-time UDP router
float proportional_gain: Proportional gain used in control loop (default value is 2.0)
Outputs:
None
Notes:
- Actuators and gripper initial position are retrieved to set initial positions
- Actuator and gripper cyclic command objects are created in constructor. Their
references are used to update position and speed.
"""
self.proportional_gain = proportional_gain
###########################################################################################
# UDP and TCP sessions are used in this example.
# TCP is used to perform the change of servoing mode
# UDP is used for cyclic commands.
#
# 2 sessions have to be created: 1 for TCP and 1 for UDP
###########################################################################################
# Create base client using TCP router
self.base = base
# Create base cyclic client using UDP router.
self.base_cyclic = base_cyclic
# Create base cyclic command object.
self.base_command = BaseCyclic_pb2.Command()
self.base_command.frame_id = 0
self.base_command.interconnect.command_id.identifier = 0
self.base_command.interconnect.gripper_command.command_id.identifier = 0
# Add motor command to interconnect's cyclic
self.motorcmd = self.base_command.interconnect.gripper_command.motor_cmd.add()
# Set gripper's initial position velocity and force
base_feedback = self.base_cyclic.RefreshFeedback()
self.motorcmd.position = base_feedback.interconnect.gripper_feedback.motor[0].position
self.motorcmd.velocity = 0
self.motorcmd.force = force_min
self.force_min = force_min
self.force_max = force_max
for actuator in base_feedback.actuators:
self.actuator_command = self.base_command.actuators.add()
self.actuator_command.position = actuator.position
self.actuator_command.velocity = 0.0
self.actuator_command.torque_joint = 0.0
self.actuator_command.command_id = 0
print("Position = ", actuator.position)
# Save servoing mode before changing it
self.previous_servoing_mode = self.base.GetServoingMode()
# Set base in low level servoing mode
servoing_mode_info = Base_pb2.ServoingModeInformation()
servoing_mode_info.servoing_mode = Base_pb2.LOW_LEVEL_SERVOING
self.base.SetServoingMode(servoing_mode_info)
def Cleanup(self):
"""
Restore arm's servoing mode to the one that
was effective before running the example.
Inputs:
None
Outputs:
None
Notes:
None
"""
# Restore servoing mode to the one that was in use before running the example
self.base.SetServoingMode(self.previous_servoing_mode)
def grip(self, target_position):
if target_position > 100.0:
target_position = 100.0
if target_position < 0.0:
target_position = 0.0
current_force = self.force_min
self.motorcmd.position = target_position
# self.motorcmd.force = self.force_max
return True
def Goto(self, target_position):
"""
Position gripper to a requested target position using a simple
proportional feedback loop which changes torque according to error
between target position and current gripper position
Inputs:
float target_position: position (0% - 100%) to send gripper to.
Outputs:
Returns True if gripper was positionned successfully, returns False
otherwise.
Notes:
- This function blocks until position is reached.
- If target position exceeds 100.0, its value is changed to 100.0.
- If target position is below 0.0, its value is set to 0.0.
"""
if target_position > 100.0:
target_position = 100.0
if target_position < 0.0:
target_position = 0.0
while True:
try:
base_feedback = self.base_cyclic.Refresh(self.base_command)
# Calculate speed according to position error (target position VS current position)
position_error = target_position - base_feedback.interconnect.gripper_feedback.motor[0].position
print("target pos:", target_position)
# If positional error is small, stop gripper
if abs(position_error) < 1.5:
position_error = 0
self.motorcmd.velocity = 0
self.base_cyclic.Refresh(self.base_command)
return True
else:
self.motorcmd.velocity = self.proportional_gain * abs(position_error)
if self.motorcmd.velocity > 100.0:
self.motorcmd.velocity = 100.0
self.motorcmd.position = target_position
except Exception as e:
print(str(e))
return False
time.sleep(0.001)
return True
| 12,010 | Python | 36.652038 | 112 | 0.618068 |
DigitalBotLab/Robots/RobotServices/kinova_server.py | import socketserver
import utilities
import sys, os
from numpy import interp
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.messages import Base_pb2
from kinova_control import angular_action_movement, GripperFeedback, GripperCommand
# import files
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# initialize connection argument
class KinovaUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def setup(self):
self.joint_target = 0.0
def handle(self):
# obtain message from Isaac Sim
data = self.request[0].strip()
socket = self.request[1]
print("recieving data from omniverse:", data)
command, message = data.split(b':')
if command.startswith(b'Hello'):
response = "Connect with isaac sim"
print("establish connection with isaac sim")
elif command.startswith(b'Control'):
joint_positions = self.process_data(message)
success = "succeed" if self.control_robot(joint_positions) else "failed"
response = f"The action {success}"
elif command.startswith(b'GetJoints'):
joint_angles = self.get_joint_status()
response = " ".join([str(e) for e in joint_angles])
socket.sendto(response.encode('utf-8'), self.client_address)
def process_data(self, data: str):
"""
Process data as Kinova command to control the real robot
data is comprised of 7(body) + 1(gripper) dimensions
"""
joint_positions = [float(e) for e in data.split()]
return joint_positions
def control_robot(self, joint_positions):
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
base = BaseClient(router)
# base_cyclic = BaseCyclicClient(router_real_time)
# gripper = GripperFeedback(base, base_cyclic)
success = True
success &= angular_action_movement(base, joint_positions[:7])
# gripper.Cleanup()
print("go to position", joint_positions[7])
joint_target = min(max(0, joint_positions[7]), 1)
# if joint_target != self.joint_target:
# self.joint_target = joint_target
success &= GripperCommand(base, joint_target)
# gripper.Cleanup()
# gripper_request = Base_pb2.GripperRequest()
# gripper_request.mode = Base_pb2.GRIPPER_POSITION
# gripper_measure = base.GetMeasuredGripperMovement(gripper_request)
# print("gripper position is at", gripper_measure)
return success
def get_joint_status(self):
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
# Create required services
base = BaseClient(router)
joint_angles = base.GetMeasuredJointAngles().joint_angles
# print("Joint angles: ", len(joint_angles), joint_angles[0], joint_angles)
joint_angles = [e.value for e in joint_angles]
gripper_request = Base_pb2.GripperRequest()
gripper_request.mode = Base_pb2.GRIPPER_POSITION
gripper_measure = base.GetMeasuredGripperMovement(gripper_request)
# print("gripper position is at", gripper_measure)
print("joint_angles and gripper position", joint_angles, gripper_measure)
return joint_angles + [gripper_measure]
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
args = utilities.parseConnectionArguments()
with socketserver.UDPServer((HOST, PORT), KinovaUDPHandler) as server:
server.serve_forever()
| 4,417 | Python | 41.07619 | 91 | 0.63233 |
DigitalBotLab/Robots/RobotServices/utilities.py | import argparse
from kortex_api.TCPTransport import TCPTransport
from kortex_api.UDPTransport import UDPTransport
from kortex_api.RouterClient import RouterClient, RouterClientSendOptions
from kortex_api.SessionManager import SessionManager
from kortex_api.autogen.messages import Session_pb2
def parseConnectionArguments(parser = argparse.ArgumentParser()):
parser.add_argument("--ip", type=str, help="IP address of destination", default="192.168.1.10")
parser.add_argument("-u", "--username", type=str, help="username to login", default="admin")
parser.add_argument("-p", "--password", type=str, help="password to login", default="admin")
return parser.parse_args()
class DeviceConnection:
TCP_PORT = 10000
UDP_PORT = 10001
@staticmethod
def createTcpConnection(args):
"""
returns RouterClient required to create services and send requests to device or sub-devices,
"""
return DeviceConnection(args.ip, port=DeviceConnection.TCP_PORT, credentials=(args.username, args.password))
@staticmethod
def createUdpConnection(args):
"""
returns RouterClient that allows to create services and send requests to a device or its sub-devices @ 1khz.
"""
return DeviceConnection(args.ip, port=DeviceConnection.UDP_PORT, credentials=(args.username, args.password))
def __init__(self, ipAddress, port=TCP_PORT, credentials = ("","")):
self.ipAddress = ipAddress
self.port = port
self.credentials = credentials
self.sessionManager = None
# Setup API
self.transport = TCPTransport() if port == DeviceConnection.TCP_PORT else UDPTransport()
self.router = RouterClient(self.transport, RouterClient.basicErrorCallback)
# Called when entering 'with' statement
def __enter__(self):
self.transport.connect(self.ipAddress, self.port)
if (self.credentials[0] != ""):
session_info = Session_pb2.CreateSessionInfo()
session_info.username = self.credentials[0]
session_info.password = self.credentials[1]
session_info.session_inactivity_timeout = 10000 # (milliseconds)
session_info.connection_inactivity_timeout = 2000 # (milliseconds)
self.sessionManager = SessionManager(self.router)
print("Logging as", self.credentials[0], "on device", self.ipAddress)
self.sessionManager.CreateSession(session_info)
return self.router
# Called when exiting 'with' statement
def __exit__(self, exc_type, exc_value, traceback):
if self.sessionManager != None:
router_options = RouterClientSendOptions()
router_options.timeout_ms = 1000
self.sessionManager.CloseSession(router_options)
self.transport.disconnect()
| 2,888 | Python | 36.51948 | 116 | 0.674169 |
DigitalBotLab/Robots/VisionServices/segment_anything/utils.py | import numpy as np
import matplotlib.pyplot as plt
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels==1]
neg_points = coords[labels==0]
ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
def show_box(box, ax):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) | 966 | Python | 42.954544 | 128 | 0.606625 |
DigitalBotLab/Robots/VisionServices/segment_anything/sam_app.py | import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
import os
import gradio as gr
import json
from segment_anything import sam_model_registry, SamPredictor
SEGMENT_ANYTHING_FOLDER = "C:\\Users\\zhaoy\\Downloads"#"I:/Research/semgent-anything"
MODEL_TYPE = "vit_b" #"vit_b"
SAM_CHECKPOINT = os.path.join(SEGMENT_ANYTHING_FOLDER, "sam_vit_b_01ec64.pth") # sam_vit_h_4b8939 # sam_vit_b_01ec64
device = "cuda"
sam = sam_model_registry[MODEL_TYPE](checkpoint=SAM_CHECKPOINT)
sam.to(device=device)
predictor = SamPredictor(sam)
def segment_with_points(
image,
input_point_x,
input_point_y,
shape = "cuboid",
input_label = np.array([1]),
shape_contour_count = 6,
debug_plot = True,
):
predictor.set_image(image)
input_points = np.array([[input_point_x, input_point_y]])
masks, scores, logits = predictor.predict(
point_coords=input_points,
point_labels=input_label,
multimask_output=True,
)
print("mask", masks.shape, "scores", scores.shape, "logits", logits.shape)
# only return the first mask
target_mask = masks[0].astype(np.uint8)
target_contours, _ = cv2.findContours(target_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
target_contour_count = len(target_contours)
for mask in masks:
# get contours
contours, _ = cv2.findContours(mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0 and len(contours[0]) < target_contour_count:
target_mask = mask
target_contours = contours
target_contour_count = len(contours)
if debug_plot:
cv2.drawContours(image, target_contours, -1, (255, 255, 255), 2)
cv2.imshow('target_contours', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
mask = target_mask
contour = max(target_contours, key=cv2.contourArea)
arclen = cv2.arcLength(contour, True)
if shape == "cuboid":
for ratio in [0.01, 0.02, 0.005, 0.01, 0.02, 0.05, 0.1]:
epsilon = ratio * arclen
approx = cv2.approxPolyDP(contour, epsilon, True)
if len(approx) == shape_contour_count:
break
else: # bounding box
x, y, w, h = cv2.boundingRect(contour)
approx = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
print("approx", approx, approx.shape)
if debug_plot:
temp = cv2.drawContours(image, [approx], -1, (255, 0, 0), 1)
temp = cv2.resize(temp, (960, 540))
cv2.imshow('Final Contours', temp)
cv2.waitKey(0)
cv2.destroyAllWindows()
return json.dumps(approx.tolist())
cond_img_e = gr.Image(label="Input image", type='numpy', image_mode = "RGB")
input_point_x = gr.Number(label="input h", value = 0)
input_point_y = gr.Number(label="input w", value = 0)
if __name__ == "__main__":
demo = gr.Interface(
segment_with_points,
inputs=[cond_img_e,
input_point_x,
input_point_y,
"text"
],
outputs="text",
title="Segment Anything",
)
demo.launch(share = False)
| 3,184 | Python | 29.625 | 116 | 0.608668 |
DigitalBotLab/Robots/VisionServices/dino/dino_app.py | import argparse
import numpy as np
import gradio as gr
import torch
# Grounding DINO
from GroundingDINO.groundingdino.models import build_model
from GroundingDINO.groundingdino.util.slconfig import SLConfig
from GroundingDINO.groundingdino.util.utils import clean_state_dict
from GroundingDINO.groundingdino.util.inference import predict
from GroundingDINO.groundingdino.datasets import transforms as T
# segment anything
# from segment_anything import build_sam, SamPredictor
# import cv2
import numpy as np
from huggingface_hub import hf_hub_download
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"
ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"
def load_model_hf(repo_id, filename, ckpt_config_filename, device='cpu'):
cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)
args = SLConfig.fromfile(cache_config_file)
model = build_model(args)
args.device = device
try:
checkpoint = torch.load(cache_file, map_location='cpu')
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
except:
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
checkpoint = torch.load(cache_file, map_location='cpu')
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
# cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
# checkpoint = torch.load(cache_file, map_location='cpu')
# log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
print("Model loaded from {} \n => {}".format(cache_file, log))
_ = model.eval()
return model
def transform_image(image_pil):
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image
def run_gdino(image, text_prompt, box_threshold, text_threshold):
w, h = image.size
print(image.size)
image_pil = image.convert("RGB")
image = transform_image(image_pil)
groundingdino_model = load_model_hf(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename)
boxes, scores, labels = predict(
model=groundingdino_model,
image=image,
caption=text_prompt,
box_threshold=box_threshold,
text_threshold=text_threshold
)
def to_center(x):
x *= np.array([w, h, w, h])
a = x[2] / 2
b = x[3] / 2
return np.array([x[0]-a, x[1]-b, x[2]+x[0], x[3]+x[1]])
if boxes.shape[0] == 0:
return ""
boxes = boxes.cpu().detach().numpy()
pixel_coord = np.apply_along_axis(to_center, 1, boxes)
scores = scores.cpu().detach().numpy()
print(list(pixel_coord), list(scores))
record = []
for box, score, label in zip(list(np.around(pixel_coord).astype("int")), list(scores), labels):
# print("box", box)
# print("score", score)
record.append(str(list(box)) + "_" + "{:.3f}".format(score) + "_" + str(label))
return str(record)
if __name__ == "__main__":
demo = gr.Interface(
run_gdino,
inputs=[gr.Image(source='upload', type="pil"), "text", gr.Slider(0, 1, value=0.3), gr.Slider(0, 1, value=0.25)],
outputs="text",
title="Grounded Dino",
examples=[
],
)
demo.launch(share = True)
| 3,487 | Python | 28.811966 | 117 | 0.641812 |
DigitalBotLab/Robots/VisionServices/owl_vit/owl_app.py | import torch
import cv2
import gradio as gr
import numpy as np
from transformers import OwlViTProcessor, OwlViTForObjectDetection
# Use GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32").to(device)
model.eval()
processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
def query_image(img, text_queries, score_threshold):
text_queries = text_queries
text_queries = text_queries.split(",")
target_sizes = torch.Tensor([img.shape[:2]])
inputs = processor(text=text_queries, images=img, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
outputs.logits = outputs.logits.cpu()
outputs.pred_boxes = outputs.pred_boxes.cpu()
results = processor.post_process(outputs=outputs, target_sizes=target_sizes)
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
# font = cv2.FONT_HERSHEY_SIMPLEX
# for box, score, label in zip(boxes, scores, labels):
# box = [int(i) for i in box.tolist()]
# if score >= score_threshold:
# img = cv2.rectangle(img, pt1 = (box[0], box[1]), pt2 = (box[2], box[3]), color = (255,0,0), thickness = 5)
# if box[3] + 25 > 768:
# y = box[3] - 10
# else:
# y = box[3] + 25
# img = cv2.putText(
# img, text_queries[label], (box[0], y), font, 1, (255,0,0), 2, cv2.LINE_AA
# )
records = []
for box, score, label in zip(boxes, scores, labels):
# print(box, score, label)
if score >= score_threshold:
records.append(str(box.long().tolist()) + "_" + "{:.3f}".format(score.item()) + "_" + str(label.item()))
return str(records)
description = """
Gradio demo for <a href="https://huggingface.co/docs/transformers/main/en/model_doc/owlvit">OWL-ViT</a>,
introduced in <a href="https://arxiv.org/abs/2205.06230">Simple Open-Vocabulary Object Detection
with Vision Transformers</a>.
\n\nYou can use OWL-ViT to query images with text descriptions of any object.
To use it, simply upload an image and enter comma separated text descriptions of objects you want to query the image for. You
can also use the score threshold slider to set a threshold to filter out low probability predictions.
\n\nOWL-ViT is trained on text templates,
hence you can get better predictions by querying the image with text templates used in training the original model: *"photo of a star-spangled banner"*,
*"image of a shoe"*. Refer to the <a href="https://arxiv.org/abs/2103.00020">CLIP</a> paper to see the full list of text templates used to augment the training data.
\n\n<a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb">Colab demo</a>
"""
demo = gr.Interface(
query_image,
inputs=[gr.Image(), "text", gr.Slider(0, 1, value=0.1)],
outputs="text",
title="Zero-Shot Object Detection with OWL-ViT",
description=description,
examples=[
],
)
demo.launch(share = True) | 3,249 | Python | 38.634146 | 165 | 0.659588 |
DigitalBotLab/Robots/VisionServices/owl_vit/README.md | ---
title: OWL-ViT Demo
emoji: 🔥
colorFrom: yellow
colorTo: yellow
sdk: gradio
sdk_version: 3.1.3
app_file: app.py
pinned: false
license: apache-2.0
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
| 251 | Markdown | 16.999999 | 96 | 0.752988 |
DigitalBotLab/Robots/VisionServices/fastsam/fastsam_app.py | from ultralytics import YOLO
import gradio as gr
import torch
from utils.my_tools import fast_process
from utils.tools import format_results, box_prompt, point_prompt, text_prompt
from PIL import ImageDraw
import numpy as np
import os
import json
FASTSAM_FOLDER = "I:/Research/FastSAM/"
# Load the pre-trained model
model = YOLO(os.path.join(FASTSAM_FOLDER,'weights/FastSAM.pt'))
device = torch.device(
"cuda"
if torch.cuda.is_available()
else "mps"
if torch.backends.mps.is_available()
else "cpu"
)
def segment_everything(
input,
text="",
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
withContours=True,
use_retina=True,
mask_random_color=True,
):
input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))
results = model(input,
device=device,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)
if len(text) > 0:
results = format_results(results[0], 0)
annotations, _ = text_prompt(results, text, input, device=device,
clip_model_path=os.path.join(FASTSAM_FOLDER,'weights/CLIP_ViT_B_32.pt')
)
annotations = np.array([annotations])
else:
annotations = results[0].masks.data
contour_str = fast_process(annotations=annotations,
image=input,
device=device,
scale=(1024 // input_size),
better_quality=better_quality,
mask_random_color=mask_random_color,
bbox=None,
use_retina=use_retina,
)
return json.dumps(contour_str.tolist())
cond_img_e = gr.Image(label="Input", type='pil')
cond_img_p = gr.Image(label="Input with points", type='pil')
cond_img_t = gr.Image(label="Input with text", type='pil',
value = os.path.join(FASTSAM_FOLDER,"examples/0.jpg"))
segm_img_e = gr.Image(label="Segmented Image", interactive=False, type='pil')
segm_img_p = gr.Image(label="Segmented Image with points", interactive=False, type='pil')
segm_img_t = gr.Image(label="Segmented Image with text", interactive=False, type='pil')
global_points = []
global_point_label = []
if __name__ == "__main__":
demo = gr.Interface(
segment_everything,
inputs=[cond_img_t,
gr.Textbox(label="text prompt", value="grey tea tower"),
# 1024,
# 0.7,
# 0.25,
# False,
# True,
# True,
],
outputs="text",
title="FastSAM",
)
demo.launch(share = False)
| 3,071 | Python | 28.538461 | 107 | 0.554217 |
DigitalBotLab/Robots/VisionServices/fastsam/README.md | ---
title: FastSAM
emoji: 🐠
colorFrom: pink
colorTo: indigo
sdk: gradio
sdk_version: 3.35.2
app_file: app_gradio.py
pinned: false
license: apache-2.0
---
# Fast Segment Anything
Official PyTorch Implementation of the <a href="https://github.com/CASIA-IVA-Lab/FastSAM">.
The **Fast Segment Anything Model(FastSAM)** is a CNN Segment Anything Model trained by only 2% of the SA-1B dataset published by SAM authors. The FastSAM achieve a comparable performance with
the SAM method at **50× higher run-time speed**.
## License
The model is licensed under the [Apache 2.0 license](LICENSE).
## Acknowledgement
- [Segment Anything](https://segment-anything.com/) provides the SA-1B dataset and the base codes.
- [YOLOv8](https://github.com/ultralytics/ultralytics) provides codes and pre-trained models.
- [YOLACT](https://arxiv.org/abs/2112.10003) provides powerful instance segmentation method.
- [Grounded-Segment-Anything](https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything) provides a useful web demo template.
## Citing FastSAM
If you find this project useful for your research, please consider citing the following BibTeX entry.
```
@misc{zhao2023fast,
title={Fast Segment Anything},
author={Xu Zhao and Wenchao Ding and Yongqi An and Yinglong Du and Tao Yu and Min Li and Ming Tang and Jinqiao Wang},
year={2023},
eprint={2306.12156},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
``` | 1,451 | Markdown | 30.565217 | 192 | 0.740868 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/tools.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
import os
import sys
import clip
def convert_box_xywh_to_xyxy(box):
x1 = box[0]
y1 = box[1]
x2 = box[0] + box[2]
y2 = box[1] + box[3]
return [x1, y1, x2, y2]
def segment_image(image, bbox):
image_array = np.array(image)
segmented_image_array = np.zeros_like(image_array)
x1, y1, x2, y2 = bbox
segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
segmented_image = Image.fromarray(segmented_image_array)
black_image = Image.new("RGB", image.size, (255, 255, 255))
# transparency_mask = np.zeros_like((), dtype=np.uint8)
transparency_mask = np.zeros(
(image_array.shape[0], image_array.shape[1]), dtype=np.uint8
)
transparency_mask[y1:y2, x1:x2] = 255
transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
black_image.paste(segmented_image, mask=transparency_mask_image)
return black_image
def format_results(result, filter=0):
annotations = []
n = len(result.masks.data)
for i in range(n):
annotation = {}
mask = result.masks.data[i] == 1.0
if torch.sum(mask) < filter:
continue
annotation["id"] = i
annotation["segmentation"] = mask.cpu().numpy()
annotation["bbox"] = result.boxes.data[i]
annotation["score"] = result.boxes.conf[i]
annotation["area"] = annotation["segmentation"].sum()
annotations.append(annotation)
return annotations
def filter_masks(annotations): # filter the overlap mask
annotations.sort(key=lambda x: x["area"], reverse=True)
to_remove = set()
for i in range(0, len(annotations)):
a = annotations[i]
for j in range(i + 1, len(annotations)):
b = annotations[j]
if i != j and j not in to_remove:
# check if
if b["area"] < a["area"]:
if (a["segmentation"] & b["segmentation"]).sum() / b[
"segmentation"
].sum() > 0.8:
to_remove.add(j)
return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove
def get_bbox_from_mask(mask):
mask = mask.astype(np.uint8)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
x1, y1, w, h = cv2.boundingRect(contours[0])
x2, y2 = x1 + w, y1 + h
if len(contours) > 1:
for b in contours:
x_t, y_t, w_t, h_t = cv2.boundingRect(b)
# 将多个bbox合并成一个
x1 = min(x1, x_t)
y1 = min(y1, y_t)
x2 = max(x2, x_t + w_t)
y2 = max(y2, y_t + h_t)
h = y2 - y1
w = x2 - x1
return [x1, y1, x2, y2]
def fast_process(
annotations, args, mask_random_color, bbox=None, points=None, edges=False
):
if isinstance(annotations[0], dict):
annotations = [annotation["segmentation"] for annotation in annotations]
result_name = os.path.basename(args.img_path)
image = cv2.imread(args.img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_h = image.shape[0]
original_w = image.shape[1]
if sys.platform == "darwin":
plt.switch_backend("TkAgg")
plt.figure(figsize=(original_w/100, original_h/100))
# Add subplot with no margin.
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(image)
if args.better_quality == True:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(
mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
)
annotations[i] = cv2.morphologyEx(
mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
)
if args.device == "cpu":
annotations = np.array(annotations)
fast_show_mask(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
points=points,
point_label=args.point_label,
retinamask=args.retina,
target_height=original_h,
target_width=original_w,
)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
fast_show_mask_gpu(
annotations,
plt.gca(),
random_color=args.randomcolor,
bbox=bbox,
points=points,
point_label=args.point_label,
retinamask=args.retina,
target_height=original_h,
target_width=original_w,
)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
if args.withContours == True:
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask["segmentation"]
annotation = mask.astype(np.uint8)
if args.retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, hierarchy = cv2.findContours(
annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
for contour in contours:
contour_all.append(contour)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
plt.imshow(contour_mask)
save_path = args.output
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.axis("off")
fig = plt.gcf()
plt.draw()
try:
buf = fig.canvas.tostring_rgb()
except AttributeError:
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
cols, rows = fig.canvas.get_width_height()
img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3)
cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
# CPU post process
def fast_show_mask(
annotation,
ax,
random_color=False,
bbox=None,
points=None,
point_label=None,
retinamask=True,
target_height=960,
target_width=960,
):
msak_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
# 将annotation 按照面积 排序
areas = np.sum(annotation, axis=(1, 2))
sorted_indices = np.argsort(areas)
annotation = annotation[sorted_indices]
index = (annotation != 0).argmax(axis=0)
if random_color == True:
color = np.random.random((msak_sum, 1, 1, 3))
else:
color = np.ones((msak_sum, 1, 1, 3)) * np.array(
[30 / 255, 144 / 255, 255 / 255]
)
transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6
visual = np.concatenate([color, transparency], axis=-1)
mask_image = np.expand_dims(annotation, -1) * visual
show = np.zeros((height, weight, 4))
h_indices, w_indices = np.meshgrid(
np.arange(height), np.arange(weight), indexing="ij"
)
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
show[h_indices, w_indices, :] = mask_image[indices]
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
# draw point
if points is not None:
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
s=20,
c="y",
)
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
s=20,
c="m",
)
if retinamask == False:
show = cv2.resize(
show, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
ax.imshow(show)
def fast_show_mask_gpu(
annotation,
ax,
random_color=False,
bbox=None,
points=None,
point_label=None,
retinamask=True,
target_height=960,
target_width=960,
):
msak_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
areas = torch.sum(annotation, dim=(1, 2))
sorted_indices = torch.argsort(areas, descending=False)
annotation = annotation[sorted_indices]
# 找每个位置第一个非零值下标
index = (annotation != 0).to(torch.long).argmax(dim=0)
if random_color == True:
color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device)
else:
color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor(
[30 / 255, 144 / 255, 255 / 255]
).to(annotation.device)
transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6
visual = torch.cat([color, transparency], dim=-1)
mask_image = torch.unsqueeze(annotation, -1) * visual
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
show = torch.zeros((height, weight, 4)).to(annotation.device)
h_indices, w_indices = torch.meshgrid(
torch.arange(height), torch.arange(weight), indexing="ij"
)
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
show[h_indices, w_indices, :] = mask_image[indices]
show_cpu = show.cpu().numpy()
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
# draw point
if points is not None:
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
s=20,
c="y",
)
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
s=20,
c="m",
)
if retinamask == False:
show_cpu = cv2.resize(
show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
ax.imshow(show_cpu)
# clip
@torch.no_grad()
def retriev(
model, preprocess, elements, search_text: str, device
) -> int:
preprocessed_images = [preprocess(image).to(device) for image in elements]
tokenized_text = clip.tokenize([search_text]).to(device)
stacked_images = torch.stack(preprocessed_images)
image_features = model.encode_image(stacked_images)
text_features = model.encode_text(tokenized_text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
probs = 100.0 * image_features @ text_features.T
return probs[:, 0].softmax(dim=0)
def crop_image(annotations, image_like):
if isinstance(image_like, str):
image = Image.open(image_like)
else:
image = image_like
ori_w, ori_h = image.size
mask_h, mask_w = annotations[0]["segmentation"].shape
if ori_w != mask_w or ori_h != mask_h:
image = image.resize((mask_w, mask_h))
cropped_boxes = []
cropped_images = []
not_crop = []
filter_id = []
# annotations, _ = filter_masks(annotations)
# filter_id = list(_)
for _, mask in enumerate(annotations):
if np.sum(mask["segmentation"]) <= 100:
filter_id.append(_)
continue
bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox
cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片
# cropped_boxes.append(segment_image(image,mask["segmentation"]))
cropped_images.append(bbox) # 保存裁剪的图片的bbox
return cropped_boxes, cropped_images, not_crop, filter_id, annotations
def box_prompt(masks, bbox, target_height, target_width):
h = masks.shape[1]
w = masks.shape[2]
if h != target_height or w != target_width:
bbox = [
int(bbox[0] * w / target_width),
int(bbox[1] * h / target_height),
int(bbox[2] * w / target_width),
int(bbox[3] * h / target_height),
]
bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0
bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0
bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w
bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h
# IoUs = torch.zeros(len(masks), dtype=torch.float32)
bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
orig_masks_area = torch.sum(masks, dim=(1, 2))
union = bbox_area + orig_masks_area - masks_area
IoUs = masks_area / union
max_iou_index = torch.argmax(IoUs)
return masks[max_iou_index].cpu().numpy(), max_iou_index
def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理
h = masks[0]["segmentation"].shape[0]
w = masks[0]["segmentation"].shape[1]
if h != target_height or w != target_width:
points = [
[int(point[0] * w / target_width), int(point[1] * h / target_height)]
for point in points
]
onemask = np.zeros((h, w))
masks = sorted(masks, key=lambda x: x['area'], reverse=True)
for i, annotation in enumerate(masks):
if type(annotation) == dict:
mask = annotation['segmentation']
else:
mask = annotation
for i, point in enumerate(points):
if mask[point[1], point[0]] == 1 and point_label[i] == 1:
onemask[mask] = 1
if mask[point[1], point[0]] == 1 and point_label[i] == 0:
onemask[mask] = 0
onemask = onemask >= 1
return onemask, 0
def text_prompt(annotations, text, img_path, device, clip_model_path = "./weights/CLIP_ViT_B_32.pt"):
cropped_boxes, cropped_images, not_crop, filter_id, annotations_ = crop_image(
annotations, img_path
)
clip_model, preprocess = clip.load(clip_model_path, device=device)
scores = retriev(
clip_model, preprocess, cropped_boxes, text, device=device
)
max_idx = scores.argsort()
max_idx = max_idx[-1]
max_idx += sum(np.array(filter_id) <= int(max_idx))
return annotations_[max_idx]["segmentation"], max_idx
| 15,038 | Python | 33.974419 | 102 | 0.579864 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/my_tools.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
def fast_process(
annotations,
image,
device,
scale,
better_quality=False,
mask_random_color=True,
bbox=None,
use_retina=True,
# withContours=True, # must true
):
if isinstance(annotations[0], dict):
annotations = [annotation['segmentation'] for annotation in annotations]
original_h = image.height
original_w = image.width
if better_quality:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
if device == 'cpu':
annotations = np.array(annotations)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask['segmentation']
annotation = mask.astype(np.uint8)
if use_retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
arclen = cv2.arcLength(contour, True)
# WARNING: 0.005 is a magic number
approx = cv2.approxPolyDP(contour, arclen*0.005, True)
print("approx!!", approx.shape)
contour_all.append(approx)
print("contour_all!!!", contour_all)
return np.array(contour_all)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
image = image.convert('RGBA')
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_contour, (0, 0), overlay_contour)
return image
| 2,438 | Python | 31.959459 | 111 | 0.608696 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/tools_gradio.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
def fast_process(
annotations,
image,
device,
scale,
better_quality=False,
mask_random_color=True,
bbox=None,
use_retina=True,
withContours=True,
):
if isinstance(annotations[0], dict):
annotations = [annotation['segmentation'] for annotation in annotations]
original_h = image.height
original_w = image.width
if better_quality:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
if device == 'cpu':
annotations = np.array(annotations)
inner_mask = fast_show_mask(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
retinamask=use_retina,
target_height=original_h,
target_width=original_w,
)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
inner_mask = fast_show_mask_gpu(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
retinamask=use_retina,
target_height=original_h,
target_width=original_w,
)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
if withContours:
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask['segmentation']
annotation = mask.astype(np.uint8)
if use_retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
arclen = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, arclen*0.005, True)
contour_all.append(approx)
print("contour_all", contour_all)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
image = image.convert('RGBA')
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_inner, (0, 0), overlay_inner)
if withContours:
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_contour, (0, 0), overlay_contour)
return image
# CPU post process
def fast_show_mask(
annotation,
ax,
random_color=False,
bbox=None,
retinamask=True,
target_height=960,
target_width=960,
):
mask_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
# 将annotation 按照面积 排序
areas = np.sum(annotation, axis=(1, 2))
sorted_indices = np.argsort(areas)[::1]
annotation = annotation[sorted_indices]
index = (annotation != 0).argmax(axis=0)
if random_color == True:
color = np.random.random((mask_sum, 1, 1, 3))
else:
color = np.ones((mask_sum, 1, 1, 3)) * np.array([30 / 255, 144 / 255, 255 / 255])
transparency = np.ones((mask_sum, 1, 1, 1)) * 0.6
visual = np.concatenate([color, transparency], axis=-1)
mask_image = np.expand_dims(annotation, -1) * visual
mask = np.zeros((height, weight, 4))
h_indices, w_indices = np.meshgrid(np.arange(height), np.arange(weight), indexing='ij')
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
mask[h_indices, w_indices, :] = mask_image[indices]
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1))
if retinamask == False:
mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
return mask
def fast_show_mask_gpu(
annotation,
ax,
random_color=False,
bbox=None,
retinamask=True,
target_height=960,
target_width=960,
):
device = annotation.device
mask_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
areas = torch.sum(annotation, dim=(1, 2))
sorted_indices = torch.argsort(areas, descending=False)
annotation = annotation[sorted_indices]
# 找每个位置第一个非零值下标
index = (annotation != 0).to(torch.long).argmax(dim=0)
if random_color == True:
color = torch.rand((mask_sum, 1, 1, 3)).to(device)
else:
color = torch.ones((mask_sum, 1, 1, 3)).to(device) * torch.tensor(
[30 / 255, 144 / 255, 255 / 255]
).to(device)
transparency = torch.ones((mask_sum, 1, 1, 1)).to(device) * 0.6
visual = torch.cat([color, transparency], dim=-1)
mask_image = torch.unsqueeze(annotation, -1) * visual
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
mask = torch.zeros((height, weight, 4)).to(device)
h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight))
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
mask[h_indices, w_indices, :] = mask_image[indices]
mask_cpu = mask.cpu().numpy()
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
if retinamask == False:
mask_cpu = cv2.resize(
mask_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
return mask_cpu
| 6,221 | Python | 33.566666 | 111 | 0.596046 |
DigitalBotLab/InsideroboConnector/README.md | # <img src="Images/logo.png" alt="Logo" width="50" height="50"> Digital Bot Lab: InsideroboConnector


# Overview: Bridging the Gap Between Digital Robots and Omniverse

The Digital Bot Lab's Insiderobo Connector is a cutting-edge solution designed to seamlessly connect our extensive digital robot collection with the powerful NVIDIA Omniverse platform. With our connector, users can effortlessly import digital robots in .usd format, enabling them to leverage the full potential of Omniverse applications.
## 1. Get Started
Experience the future of robotics with the Digital Bot Lab's Insiderobo Connector, where the connection between digital robots and Omniverse becomes effortless and transformative.

### 1.1 Install Omniverse USD Composer
This project is currently targeted for `Omniverse USD Composer`. Please follow the instructions to install it first:
[USD Composer Overview](https://docs.omniverse.nvidia.com/composer/latest/index.html#:~:text=NVIDIA%20Omniverse%E2%84%A2%20USD%20Composer,is%20based%20on%20Pixar's%20USD.)
### 1.2 Import the extension
To install the extension to Omniverse USD Composer:
First, clone the respository
```bash
git clone https://github.com/DigitalBotLab/InsideroboConnector
```
And now Open the `Omniverse USD Composer`, go to `Menu Bar` -> `Windows` -> `Extensions` -> `+` ->
Add your `<path_to_this_repository>/AssetProvider/dbl-exts-asset/exts`
## 2. Format: USD
Our digital robots are meticulously crafted and well-configured in .usd format, complete with physics, rigid bodies, and joints. This ensures a realistic and immersive experience when interacting with the robots within Omniverse.
## 3. ROS <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="Ros" width="70" height="70">
The Insiderobo Connector is built upon the foundation of the Robot Operating System (ROS), an open-source framework that empowers researchers and developers to easily build and reuse code across various robotics applications. This integration allows for enhanced collaboration, accelerated development, and seamless integration of digital robots into the Omniverse ecosystem.
## 4. License
Our project adheres to the Robot Operating System (ROS) framework, which enables us to develop and integrate robotic systems efficiently. We are proud to announce that our project is released under the BSD 3.0 license. This license ensures that our software is open-source, allowing users to freely use, modify, and distribute it while maintaining the necessary attribution and disclaimer requirements. By embracing ROS and the BSD 3.0 license, we aim to foster collaboration and innovation within the robotics community.
| 2,893 | Markdown | 52.592592 | 521 | 0.790529 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/PACKAGE-LICENSES/omni.kit.browser.asset_provider.sketchfab-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/PACKAGE-LICENSES/dependencies/pybind11-LICENSE.md | Copyright (c) 2016 Wenzel Jakob <[email protected]>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Please also refer to the file CONTRIBUTING.md, which clarifies licensing of
external contributions to this project including patches, pull requests, etc.
| 1,676 | Markdown | 54.899998 | 79 | 0.809666 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/PACKAGE-LICENSES/dependencies/fmt-LICENSE.md | Copyright (c) 2012 - 2016, Victor Zverovich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,310 | Markdown | 53.624998 | 79 | 0.809924 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/config/extension.toml | [package]
authors = ["NVIDIA"]
category = "services"
changelog = "docs/CHANGELOG.md"
description = "Digital Bot Lab Asset Provider"
icon = "data/digitalbotlab.png"
keywords = ["asset", "provider", "sketchfab"]
preview_image = "data/preview.png"
readme = "docs/README.md"
repository = ""
title = "Asset Provider for Digital Bot Lab"
version = "1.0.10"
[dependencies]
"omni.services.browser.asset" = {}
[[python.module]]
name = "omni.asset_provider.sketchfab"
[settings]
exts."omni.asset_provider.digitalbotlab".enable = true
exts."omni.asset_provider.digitalbotlab".providerId = "Digital_Bot_Lab"
exts."omni.asset_provider.digitalbotlab".keepOriginalPageSize = true
exts."omni.asset_provider.digitalbotlab".maxCountPerPage = 24
exts."omni.asset_provider.digitalbotlab".minThumbnailSize = 256
exts."omni.asset_provider.digitalbotlab".searchUrl = "https://api.sketchfab.com/v3/search"
exts."omni.asset_provider.digitalbotlab".modelsUrl = "https://api.sketchfab.com/v3/models"
exts."omni.asset_provider.digitalbotlab".authorizeUrl = "https://sketchfab.com/oauth2/authorize/"
exts."omni.asset_provider.digitalbotlab".accessTokenUrl = "https://sketchfab.com/oauth2/token/"
exts."omni.asset_provider.digitalbotlab".clientId = "eQcrihd32CeYmF9evsYEGXZr8vynHA82DW7SzJw2"
exts."omni.asset_provider.digitalbotlab".clientSecret = ""
[[test]]
dependencies = ["omni.services.client", "omni.client"]
| 1,389 | TOML | 38.714285 | 97 | 0.768179 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/config/extension.gen.toml |
[package]
archivePath = "http://d4i3qtqj3r0z5.cloudfront.net/omni.asset_provider.digitalbotlab-1.0.10.zip"
repository = "https://gitlab-master.nvidia.com/omniverse/kit-extensions/kit-browsers"
[package.publish]
date = 1669118155
kitVersion = "104.0+release.95826.4b36ab32.tc"
buildNumber = "101.1.0+master.1103.15b266ed.tc"
repoName = "kit-browsers"
[package.authors]
0 = "Yizhou Zhao <[email protected]>"
| 472 | TOML | 32.785712 | 100 | 0.677966 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/constants.py | SETTING_ROOT = "/exts/omni.asset_provider.digitalbotlab/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
| 105 | Python | 34.333322 | 57 | 0.761905 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/extension.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
import carb.settings
import omni.ui as ui
from omni.services.browser.asset import get_instance as get_asset_services
from .sketchfab import SketchFabAssetProvider
from .constants import SETTING_STORE_ENABLE
import asyncio
import aiohttp
class DigitalBotLabAssetProviderExtension(omni.ext.IExt):
""" Sketchfab Asset Provider extension.
"""
def on_startup(self, ext_id):
self._asset_provider = SketchFabAssetProvider()
self._asset_service = get_asset_services()
self._asset_service.register_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, True)
self._window = ui.Window("DBL Debug", width=300, height=300)
with self._window.frame:
with ui.VStack():
#ui.Label("Prim Path:", width = 100)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
ui.Button("Debug", height = 20, clicked_fn = self.debug_token)
def on_shutdown(self):
self._asset_service.unregister_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, False)
self._asset_provider = None
self._asset_service = None
def debug(self):
async def authenticate():
params = {"email": "[email protected]", "password": "97654321abc"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/auth/signin", json=params) as response:
self._auth_params = await response.json()
print("auth_params", self._auth_params)
self.token = self._auth_params["token"]
asyncio.ensure_future(authenticate())
def debug_token(self):
async def verify_token():
params = {"token": self.token, "asset": "test"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/omniverse/download", json=params) as response:
response = await response.json()
print("response", response)
asyncio.ensure_future(verify_token()) | 2,660 | Python | 41.238095 | 113 | 0.644737 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .extension import *
| 451 | Python | 49.222217 | 76 | 0.809313 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/sketchfab.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""SketchFab asset store implementation."""
from typing import Dict, List, Optional, Union, Tuple, Callable
import carb
import carb.settings
import os
import asyncio
import omni.client
from omni.services.browser.asset import BaseAssetStore, AssetModel, SearchCriteria, ProviderModel
from .constants import SETTING_ROOT, SETTING_STORE_ENABLE
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data")
class SketchFabAssetProvider(BaseAssetStore):
"""
SketchFab asset provider implementation.
For documentation on the search API, see the online interactive API at:
https://docs.sketchfab.com/data-api/v3/index.html#!/search/get_v3_search_type_models
.. note:
SketchFab does not return search results in no search query has been provided. In other words, navigating through
the pre-defined categories will not display any results from SketchFab, as no search terms have been submitted in
that context.
"""
def __init__(self) -> None:
"""
Constructor.
Returns:
None
"""
print("DigitalBotLabAssetProvider.__init__")
settings = carb.settings.get_settings()
self._provider_id = settings.get_as_string(SETTING_ROOT + "providerId")
super().__init__(store_id=self._provider_id)
self._keep_page_size = settings.get_as_bool(SETTING_ROOT + "keepOriginalPageSize")
self._max_count_per_page = settings.get_as_int(SETTING_ROOT + "maxCountPerPage")
self._min_thumbnail_size = settings.get_as_int(SETTING_ROOT + "minThumbnailSize")
self._search_url = settings.get_as_string(SETTING_ROOT + "searchUrl")
self._models_url = settings.get_as_string(SETTING_ROOT + "modelsUrl")
self._authorize_url = settings.get_as_string(SETTING_ROOT + "authorizeUrl")
self._access_token_url = settings.get_as_string(SETTING_ROOT + "accessTokenUrl")
self._client_id = settings.get_as_string(SETTING_ROOT + "clientId")
self._client_secret = settings.get_as_string(SETTING_ROOT + "clientSecret")
self._auth_params = None
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(
name=self._store_id, icon=f"{DATA_PATH}/logo.png", enable_setting=SETTING_STORE_ENABLE
)
def authorized(self) -> bool:
if self._auth_params:
return self._auth_params.get("access_token", None)
return False
async def authenticate(self, username: str, password: str):
params = {"grant_type": "password", "client_id": self._client_id, "username": username, "password": password}
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.post(self._access_token_url, params=params) as response:
self._auth_params = await response.json()
def get_access_token(self) -> str:
if self._auth_params:
return self._auth_params.get("access_token", None)
return None
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
assets: List[AssetModel] = []
if self._keep_page_size:
required_count = (
search_criteria.page.size
if search_criteria.page.size < self._max_count_per_page
else self._max_count_per_page
)
else:
required_count = search_criteria.page.size
params = {
"type": "models",
"q": "",
"downloadable": "true",
"cursor": (search_criteria.page.number - 1) * required_count,
"sort_by": "-likeCount"
}
if search_criteria.keywords:
params["q"] = " ".join(search_criteria.keywords)
if search_criteria.filter.categories:
category = self._pick_category(categories=search_criteria.filter.categories)
if category:
if params["q"] == "":
params["q"] = category.lower()
else:
params["q"] += f" {category.lower()}"
# if search_criteria.sort and len(search_criteria.sort) >= 2:
# sort_field, sort_order = search_criteria.sort
# # Add other properties if SketchFab supports more sorting options in the future.
# if sort_field in ["published_at"]:
# params["sort_by"] = sort_field
# if sort_order.lower() == "desc":
# params["sort_by"] = f"-likeCount"
# The SketchFab API limits the number of search results per page to at most 24
to_continue = True
while required_count > 0:
params["count"] = min(self._max_count_per_page, required_count)
(page_assets, to_continue) = await self._search_one_page(params)
if page_assets:
params["cursor"] += params["count"]
required_count -= params["count"]
assets.extend(page_assets)
if not to_continue:
break
else:
break
return (assets, to_continue)
async def _search_one_page(self, params: Dict) -> Tuple[List[AssetModel], bool]:
"""
Search one page. Max 24 assets.
Args:
params (Dict): Search parameters.
Returns:
List[AssetModel]: List of searched assets.
bool: True means more results to be searched. False means end of search.
"""
to_continue = True
items = []
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(self._search_url, params=params) as response:
results = await response.json()
cursors = results.get("cursors", {})
# If no more resutls
to_continue = cursors["next"] is not None
items = results.get("results", [])
assets: List[AssetModel] = []
for item in items:
item_categories = [x.get("name", "") for x in item.get("categories", [])]
item_tags = [x.get("name", "") for x in item.get("tags", [])]
item_thumbnails = [x for x in item.get("thumbnails", {}).get("images", [])]
item_thumbnail = self._pick_most_appropriate_thumbnail(item_thumbnails)
if item.get("isDownloadable"):
download_url = f"{self._models_url}/{item.get('uid')}/download"
else:
download_url = ""
if item_thumbnail is not None:
assets.append(
AssetModel(
identifier=item.get("uid"),
name=item.get("name"),
version="",
published_at=item.get("publishedAt"),
categories=item_categories,
tags=item_tags,
vendor=self._provider_id,
download_url=download_url,
product_url=item.get("viewerUrl", ""),
# price=0.0, # SketchFab does not display price for assets when using the search API.
thumbnail=item_thumbnail,
)
)
return (assets, to_continue)
def _sanitize_categories(self, categories: List[str]) -> List[str]:
"""
Sanitize the given list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories to sanitize.
Returns:
List[str]: Sanitized category names from the given list of categories.
"""
sanitized_categories: List[str] = []
for category in categories:
if category.startswith("/"):
category = category[1:]
category_keywords = category.split("/")
sanitized_categories.extend(category_keywords)
return sanitized_categories
def _pick_category(self, categories: List[str]) -> Optional[str]:
"""
Pick the most appropriate category from the list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories from which to pick the most appropriate
category for a search.
Returns:
Optional[str]: The most appropriate category from the given list of ``SearchCriteria`` categories, or
``None`` if no category could be identified.
"""
sanitized_categories = self._sanitize_categories(categories=categories)
if sanitized_categories:
return sanitized_categories[-1]
return None
def _pick_most_appropriate_thumbnail(self, thumbnails: List[Dict[str, Union[str, int]]]) -> Optional[str]:
"""
Pick the most appropriate thumbnail URL from the list of provided image metadata abot the asset.
Args:
thumbnails (): List of image metadata about the asset.
Returns:
Optional[str]: The URL of the image thumbnail to use for the asset, or ``None`` if no suitable thumbnail was
found.
"""
high_res_thumbnails: List[Dict[str, Union[str, int]]] = []
low_res_thumbnails: List[Dict[str, Union[str, int]]] = []
# Sort the thumbnails in 2 buckets (whether higher resolution than desired, or lower than desired):
for thumbnail in thumbnails:
thumbnail_width: Optional[int] = thumbnail.get("width")
thumbnail_height: Optional[int] = thumbnail.get("height")
if thumbnail_width is not None and thumbnail_height is not None:
if thumbnail_width >= self._min_thumbnail_size and thumbnail_height >= self._min_thumbnail_size:
high_res_thumbnails.append(thumbnail)
else:
low_res_thumbnails.append(thumbnail)
# Pick the most appropriate thumbnail within the list of high-res candidates:
if high_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = high_res_thumbnails[0]
for thumbnail in high_res_thumbnails:
if thumbnail.get("width") < candidate_thumbnail.get("width") and thumbnail.get(
"height"
) < candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
# Pick the largest thumbnail within the list of low-res candidates:
if low_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = low_res_thumbnails[0]
for thumbnail in low_res_thumbnails:
if thumbnail.get("width") > candidate_thumbnail.get("width") and thumbnail.get(
"height"
) > candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
return None
async def _download(self, asset: AssetModel, dest_url: str, on_progress_fn: Callable[[float], None] = None) -> Dict:
""" Downloads an asset from the asset store.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `download` function that will wrap this function in a timeout.
"""
ret_value = {"url": None}
if not (asset and asset.download_url):
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
return ret_value
import aiohttp
async with aiohttp.ClientSession() as session:
headers = {"Authorization": "Bearer %s" % self.get_access_token()}
async with session.get(asset.download_url, headers=headers) as response:
results = await response.json()
# Parse downloaded response; see https://sketchfab.com/developers/download-api/downloading-models
if "usdz" in results:
download_url = results["usdz"].get("url")
else:
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
carb.log_error(f"[{asset.name}] Invalid download url: {asset.download_url}!")
carb.log_info(f"addtional result: {results}")
return ret_value
content = bytearray()
# Download content from the given url
downloaded = 0
async with session.get(download_url) as response:
size = int(response.headers.get("content-length", 0))
if size > 0:
async for chunk in response.content.iter_chunked(1024 * 512):
content.extend(chunk)
downloaded += len(chunk)
if on_progress_fn:
on_progress_fn(float(downloaded) / size)
else:
if on_progress_fn:
on_progress_fn(0)
content = await response.read()
if on_progress_fn:
on_progress_fn(1)
if response.ok:
# Write to destination
filename = os.path.basename(download_url.split("?")[0])
dest_url = f"{dest_url}/{filename}"
(result, list_entry) = await omni.client.stat_async(dest_url)
if result == omni.client.Result.OK:
# If dest file already exists, use asset identifier in filename to different
dest_url = dest_url[:-5] + "_" + str(asset.identifier) + ".usdz"
ret_value["status"] = await omni.client.write_file_async(dest_url, content)
ret_value["url"] = dest_url
else:
carb.log_error(f"[{asset.name}] access denied: {download_url}")
ret_value["status"] = omni.client.Result.ERROR_ACCESS_DENIED
return ret_value
def destroy(self):
self._auth_params = None
| 14,630 | Python | 41.906158 | 120 | 0.582707 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/insiderobo_store.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""SketchFab asset store implementation."""
from typing import Dict, List, Optional, Union, Tuple, Callable
import carb
import carb.settings
import os
import asyncio
import omni.client
from omni.services.browser.asset import BaseAssetStore, AssetModel, SearchCriteria, ProviderModel
from .constants import SETTING_ROOT, SETTING_STORE_ENABLE
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data")
class DigitalBotLabAssetProvider(BaseAssetStore):
"""
SketchFab asset provider implementation.
For documentation on the search API, see the online interactive API at:
https://docs.sketchfab.com/data-api/v3/index.html#!/search/get_v3_search_type_models
.. note:
SketchFab does not return search results in no search query has been provided. In other words, navigating through
the pre-defined categories will not display any results from SketchFab, as no search terms have been submitted in
that context.
"""
def __init__(self) -> None:
"""
Constructor.
Returns:
None
"""
print("DigitalBotLabAssetProvider.__init__")
settings = carb.settings.get_settings()
self._provider_id = settings.get_as_string(SETTING_ROOT + "providerId")
super().__init__(store_id=self._provider_id)
self._keep_page_size = settings.get_as_bool(SETTING_ROOT + "keepOriginalPageSize")
self._max_count_per_page = settings.get_as_int(SETTING_ROOT + "maxCountPerPage")
self._min_thumbnail_size = settings.get_as_int(SETTING_ROOT + "minThumbnailSize")
self._search_url = settings.get_as_string(SETTING_ROOT + "searchUrl")
self._models_url = settings.get_as_string(SETTING_ROOT + "modelsUrl")
self._authorize_url = settings.get_as_string(SETTING_ROOT + "authorizeUrl")
self._access_token_url = settings.get_as_string(SETTING_ROOT + "accessTokenUrl")
self._client_id = settings.get_as_string(SETTING_ROOT + "clientId")
self._client_secret = settings.get_as_string(SETTING_ROOT + "clientSecret")
self._auth_params = None
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(
name=self._store_id, icon=f"{DATA_PATH}/digitalbotlab.png", enable_setting=SETTING_STORE_ENABLE
)
def authorized(self) -> bool:
if self._auth_params:
return self._auth_params.get("access_token", None)
return False
async def authenticate(self, username: str, password: str):
params = {"grant_type": "password", "client_id": self._client_id, "username": username, "password": password}
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.post(self._access_token_url, params=params) as response:
self._auth_params = await response.json()
def get_access_token(self) -> str:
if self._auth_params:
return self._auth_params.get("access_token", None)
return None
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
assets: List[AssetModel] = []
if self._keep_page_size:
required_count = (
search_criteria.page.size
if search_criteria.page.size < self._max_count_per_page
else self._max_count_per_page
)
else:
required_count = search_criteria.page.size
params = {
"type": "models",
"q": "",
"downloadable": "true",
"cursor": (search_criteria.page.number - 1) * required_count,
"sort_by": "-likeCount"
}
if search_criteria.keywords:
params["q"] = " ".join(search_criteria.keywords)
if search_criteria.filter.categories:
category = self._pick_category(categories=search_criteria.filter.categories)
if category:
if params["q"] == "":
params["q"] = category.lower()
else:
params["q"] += f" {category.lower()}"
# if search_criteria.sort and len(search_criteria.sort) >= 2:
# sort_field, sort_order = search_criteria.sort
# # Add other properties if SketchFab supports more sorting options in the future.
# if sort_field in ["published_at"]:
# params["sort_by"] = sort_field
# if sort_order.lower() == "desc":
# params["sort_by"] = f"-likeCount"
# The SketchFab API limits the number of search results per page to at most 24
to_continue = True
while required_count > 0:
params["count"] = min(self._max_count_per_page, required_count)
(page_assets, to_continue) = await self._search_one_page(params)
if page_assets:
params["cursor"] += params["count"]
required_count -= params["count"]
assets.extend(page_assets)
if not to_continue:
break
else:
break
return (assets, to_continue)
async def _search_one_page(self, params: Dict) -> Tuple[List[AssetModel], bool]:
"""
Search one page. Max 24 assets.
Args:
params (Dict): Search parameters.
Returns:
List[AssetModel]: List of searched assets.
bool: True means more results to be searched. False means end of search.
"""
to_continue = True
items = []
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(self._search_url, params=params) as response:
results = await response.json()
cursors = results.get("cursors", {})
# If no more resutls
to_continue = cursors["next"] is not None
items = results.get("results", [])
assets: List[AssetModel] = []
for item in items:
item_categories = [x.get("name", "") for x in item.get("categories", [])]
item_tags = [x.get("name", "") for x in item.get("tags", [])]
item_thumbnails = [x for x in item.get("thumbnails", {}).get("images", [])]
item_thumbnail = self._pick_most_appropriate_thumbnail(item_thumbnails)
if item.get("isDownloadable"):
download_url = f"{self._models_url}/{item.get('uid')}/download"
else:
download_url = ""
if item_thumbnail is not None:
assets.append(
AssetModel(
identifier=item.get("uid"),
name=item.get("name"),
version="",
published_at=item.get("publishedAt"),
categories=item_categories,
tags=item_tags,
vendor=self._provider_id,
download_url=download_url,
product_url=item.get("viewerUrl", ""),
# price=0.0, # SketchFab does not display price for assets when using the search API.
thumbnail=item_thumbnail,
)
)
return (assets, to_continue)
def _sanitize_categories(self, categories: List[str]) -> List[str]:
"""
Sanitize the given list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories to sanitize.
Returns:
List[str]: Sanitized category names from the given list of categories.
"""
sanitized_categories: List[str] = []
for category in categories:
if category.startswith("/"):
category = category[1:]
category_keywords = category.split("/")
sanitized_categories.extend(category_keywords)
return sanitized_categories
def _pick_category(self, categories: List[str]) -> Optional[str]:
"""
Pick the most appropriate category from the list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories from which to pick the most appropriate
category for a search.
Returns:
Optional[str]: The most appropriate category from the given list of ``SearchCriteria`` categories, or
``None`` if no category could be identified.
"""
sanitized_categories = self._sanitize_categories(categories=categories)
if sanitized_categories:
return sanitized_categories[-1]
return None
def _pick_most_appropriate_thumbnail(self, thumbnails: List[Dict[str, Union[str, int]]]) -> Optional[str]:
"""
Pick the most appropriate thumbnail URL from the list of provided image metadata abot the asset.
Args:
thumbnails (): List of image metadata about the asset.
Returns:
Optional[str]: The URL of the image thumbnail to use for the asset, or ``None`` if no suitable thumbnail was
found.
"""
high_res_thumbnails: List[Dict[str, Union[str, int]]] = []
low_res_thumbnails: List[Dict[str, Union[str, int]]] = []
# Sort the thumbnails in 2 buckets (whether higher resolution than desired, or lower than desired):
for thumbnail in thumbnails:
thumbnail_width: Optional[int] = thumbnail.get("width")
thumbnail_height: Optional[int] = thumbnail.get("height")
if thumbnail_width is not None and thumbnail_height is not None:
if thumbnail_width >= self._min_thumbnail_size and thumbnail_height >= self._min_thumbnail_size:
high_res_thumbnails.append(thumbnail)
else:
low_res_thumbnails.append(thumbnail)
# Pick the most appropriate thumbnail within the list of high-res candidates:
if high_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = high_res_thumbnails[0]
for thumbnail in high_res_thumbnails:
if thumbnail.get("width") < candidate_thumbnail.get("width") and thumbnail.get(
"height"
) < candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
# Pick the largest thumbnail within the list of low-res candidates:
if low_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = low_res_thumbnails[0]
for thumbnail in low_res_thumbnails:
if thumbnail.get("width") > candidate_thumbnail.get("width") and thumbnail.get(
"height"
) > candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
return None
async def _download(self, asset: AssetModel, dest_url: str, on_progress_fn: Callable[[float], None] = None) -> Dict:
""" Downloads an asset from the asset store.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `download` function that will wrap this function in a timeout.
"""
ret_value = {"url": None}
if not (asset and asset.download_url):
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
return ret_value
import aiohttp
async with aiohttp.ClientSession() as session:
headers = {"Authorization": "Bearer %s" % self.get_access_token()}
async with session.get(asset.download_url, headers=headers) as response:
results = await response.json()
# Parse downloaded response; see https://sketchfab.com/developers/download-api/downloading-models
if "usdz" in results:
download_url = results["usdz"].get("url")
else:
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
carb.log_error(f"[{asset.name}] Invalid download url: {asset.download_url}!")
carb.log_info(f"addtional result: {results}")
return ret_value
content = bytearray()
# Download content from the given url
downloaded = 0
async with session.get(download_url) as response:
size = int(response.headers.get("content-length", 0))
if size > 0:
async for chunk in response.content.iter_chunked(1024 * 512):
content.extend(chunk)
downloaded += len(chunk)
if on_progress_fn:
on_progress_fn(float(downloaded) / size)
else:
if on_progress_fn:
on_progress_fn(0)
content = await response.read()
if on_progress_fn:
on_progress_fn(1)
if response.ok:
# Write to destination
filename = os.path.basename(download_url.split("?")[0])
dest_url = f"{dest_url}/{filename}"
(result, list_entry) = await omni.client.stat_async(dest_url)
if result == omni.client.Result.OK:
# If dest file already exists, use asset identifier in filename to different
dest_url = dest_url[:-5] + "_" + str(asset.identifier) + ".usdz"
ret_value["status"] = await omni.client.write_file_async(dest_url, content)
ret_value["url"] = dest_url
else:
carb.log_error(f"[{asset.name}] access denied: {download_url}")
ret_value["status"] = omni.client.Result.ERROR_ACCESS_DENIED
return ret_value
def destroy(self):
self._auth_params = None
| 14,643 | Python | 41.944281 | 120 | 0.583077 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/tests/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited
from .test_sketchfab import *
| 456 | Python | 44.699996 | 76 | 0.809211 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/tests/test_sketchfab.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from typing import Any
import omni.kit.test
import aiohttp
from unittest.mock import patch
from typing import Dict
from omni.services.browser.asset import SearchCriteria, AssetModel
from ..sketchfab import SketchFabAssetProvider
class MockHeader:
def __init__(self):
pass
def get(self, attr: str, default: Any):
return default
class MockResponse:
def __init__(self, json: Dict = {}, data: str = ""):
self._json = json
self._data = data
self.headers = MockHeader()
@property
def ok(self):
return True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
async def json(self) -> Dict:
return self._json
async def read(self) -> str:
return self._data
class SketchfabTestCase(omni.kit.test.AsyncTestCaseFailOnLogError):
VALID_USERNAME = "username"
VALID_PASSWORD = "password"
VALID_ACCESS_TOKEN = "access_token"
DOWNLOADED_CONTENT = "abc def"
def _mock_aiohttp_post_impl(self, url: str, params: Dict = None):
import carb.settings
from ..constants import SETTING_ROOT
settings = carb.settings.get_settings()
if url == settings.get_as_string(SETTING_ROOT + "accessTokenUrl"):
# Auth endpoint
if params["username"] == self.VALID_USERNAME and params["password"] == self.VALID_PASSWORD:
return MockResponse(json={"access_token": self.VALID_ACCESS_TOKEN})
else:
return MockResponse(json={"error": "invalid_grant", "error_description": "Invalid credentials given."})
return MockResponse(json={})
def _mock_aiohttp_get_impl(self, url: str, headers: Dict = None):
if headers is not None:
self.assertTrue(self.VALID_ACCESS_TOKEN in headers["Authorization"])
if url.endswith("download"):
return MockResponse(json={"usdz": {"url": url.split("?")[0]}})
else:
return MockResponse(data=self.DOWNLOADED_CONTENT)
async def _mock_write_file_impl(self, url: str, buffer):
return omni.client.Result.OK
# NOTE: this test is disabled by default to avoid reaching out to Turbosquid continiously during our tests.
async def notest_search_no_criteria(self):
"""Test listing first page assets."""
store = SketchFabAssetProvider()
RESULTS_COUNT = 50
(result, *_) = await store.search(search_criteria=SearchCriteria(), search_timeout=60)
self.assertEqual(len(result), RESULTS_COUNT)
async def test_authentication_succeeds(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = self.VALID_PASSWORD
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
self.assertTrue(under_test.authorized())
async def test_authentication_fails(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = "invalid_password"
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
self.assertFalse(under_test.authorized())
async def test_download_succeeds(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = self.VALID_PASSWORD
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
with patch.object(aiohttp.ClientSession, "get", side_effect=self._mock_aiohttp_get_impl):
with patch("omni.client.write_file_async", side_effect=self._mock_write_file_impl) as mock_write_file:
asset = AssetModel(
identifier="1c54053d-49dd-4e18-ba46-abbe49a905b0",
name="car-suv-1",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/suv"],
tags=["vehicle", "cars", "suv"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-suv-1.usdz?download",
product_url="https://acme.org/products/purchase/car-suv-1",
price=10.99,
thumbnail="https://images.com/thumbnails/256x256/car-suv-1.png",
)
dest_url = "C:/Users/user/Downloads"
results = await under_test.download(asset, dest_url)
expected_filename = os.path.basename(asset.download_url.split("?")[0])
expected_url = f"{dest_url}/{expected_filename}"
mock_write_file.assert_called_once_with(expected_url, self.DOWNLOADED_CONTENT)
self.assertEqual(results["status"], omni.client.Result.OK)
self.assertEqual(results["url"], expected_url)
| 5,692 | Python | 38.811189 | 119 | 0.636683 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.0.1] - 2023-09-13
### Changed
- Initialize extension
| 312 | Markdown | 30.299997 | 168 | 0.730769 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/README.md | # Insiderobo Asset Provider [omni.asset_provider.digitalbotlab] | 63 | Markdown | 62.999937 | 63 | 0.84127 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/index.rst | Base Job Facility
#################
This Extension offers a facility offering base job features.
:platform: windows-x86_64, linux-x86_64
:members:
:private-members:
:special-members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 304 | reStructuredText | 20.785713 | 60 | 0.661184 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/insiderobo/license/extension.py | import omni.ext
import omni.ui as ui
import omni.usd
from pxr import Sdf
import os
# import omni.kit.window.file
from .params import LICENSE2PATH
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[insiderobo.license] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class InsideroboLicenseExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[insiderobo.license] insiderobo license startup")
self._count = 0
self._window = ui.Window("Insiderobo License", width=300, height=300)
with self._window.frame:
with ui.VStack():
with ui.HStack(height = 20):
ui.Label("Prim Path:", width = 100)
self.prim_path_ui = ui.StringField()
with ui.HStack(height = 20):
ui.Label("License Name:", width = 100)
self.license_name_ui = ui.StringField()
self.license_name_ui.model.set_value("kinova")
with ui.HStack(height = 20):
ui.Label("License Path:", width = 100)
# self.license_path_ui = ui.StringField()
# self.license_name = self.license_name_ui.model.get_value_as_string()
# self.license_path_ui.model.set_value(LICENSE2PATH[self.license_name])
ui.Button("Add License to Prim", height = 20, clicked_fn=self.add_license)
def add_license(self):
print("adding license")
stage = omni.usd.get_context().get_stage()
prim_path = self.prim_path_ui.model.get_value_as_string()
# if the prim path is empty, use the default prim path
if prim_path == "":
prim_path = stage.GetDefaultPrim().GetPath().pathString
self.license_name = self.license_name_ui.model.get_value_as_string()
license_path = LICENSE2PATH[self.license_name] #self.license_path_ui.model.get_value_as_string()
prim = stage.GetPrimAtPath(prim_path)
# load the license file into string
license_file = open(license_path, "r")
license_text = license_file.read()
print("license text: ", license_text)
attribute_name = f"{self.license_name}_license"
if not prim.HasAttribute(attribute_name):
# create a new attribute on the prim
prim.CreateAttribute(attribute_name, Sdf.ValueTypeNames.String, False).Set(license_text)
# save the stage
omni.usd.get_context().get_stage().Save()
license_file.close()
def debug(self):
print("[insiderobo.license] insiderobo license debug: ")
def on_shutdown(self):
print("[insiderobo.license] insiderobo license shutdown")
| 3,364 | Python | 38.588235 | 119 | 0.615933 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/insiderobo/license/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/insiderobo/license/params.py | # parameters
import os
# Get the absolute path of the current script file
script_path = os.path.abspath(__file__)
# Get the directory of the script file
script_directory = os.path.dirname(script_path)
LICENSE2PATH = {
"kinova": os.path.join(script_directory, "licences", "KINOVA_LICENSE.txt"),
"ufactory": os.path.join(script_directory, "licences", "UFACTORY_LICENSE.txt"),
"digitalbotlab": os.path.join(script_directory, "licences", "DIGITALBOTLAB_EXTENDED_LICENSE.txt"),
"universal_robot": os.path.join(script_directory, "licences", "UR_LICENESE_INFO.txt"),
"franka": os.path.join(script_directory, "licences", "FRANKA_LICENSE.txt"),
} | 660 | Python | 40.312497 | 102 | 0.719697 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "insiderobo license"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import insiderobo.license".
[[python.module]]
name = "insiderobo.license"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,580 | TOML | 31.937499 | 118 | 0.746835 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/docs/README.md | # Python Extension Example [insiderobo.license]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 177 | Markdown | 34.599993 | 126 | 0.79096 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/docs/index.rst | insiderobo.license
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"insiderobo.license"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 337 | reStructuredText | 15.095237 | 43 | 0.623145 |
DigitalBotLab/3DRotationCalculator/README.md | # 3DRotationCalculator | 22 | Markdown | 21.999978 | 22 | 0.909091 |
DigitalBotLab/3DRotationCalculator/Extension/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "rotaiton.calculator" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,043 | Markdown | 37.566037 | 258 | 0.757709 |
DigitalBotLab/3DRotationCalculator/Extension/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.