file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/deployment.py | class DeploymentInfo:
def __init__(self, version=None, has_deployment=False):
self.version = version
self.has_deployment = has_deployment
| 158 | Python | 30.799994 | 59 | 0.677215 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/classifier.py | import asyncio
import subprocess
import uuid
from enum import Enum, auto
import os
import tempfile
import shutil
import numpy as np
import glob
from omni.kit.widget.viewport.capture import ByteCapture
import omni.isaac.core.utils.viewports as vp
import ctypes
from PIL import Image, ImageDraw, ImageFont
from concurrent.futures import ThreadPoolExecutor
import yaml
from .utils import get_models_directory, is_node_installed
class ClassifierError(Enum):
SUCCESS = auto()
NODEJS_NOT_INSTALLED = auto()
FAILED_TO_RETRIEVE_PROJECT_ID = auto()
MODEL_DEPLOYMENT_NOT_AVAILABLE = auto()
FAILED_TO_DOWNLOAD_MODEL = auto()
FAILED_TO_PROCESS_VIEWPORT = auto()
FAILED_TO_PROCESS_CLASSIFY_RESULT = auto()
class Classifier:
def __init__(
self, rest_client, project_id, impulse_image_height, impulse_image_width, log_fn
):
self.rest_client = rest_client
self.project_id = project_id
self.model_ready = False
self.model_path = os.path.expanduser(f"{get_models_directory()}/model.zip")
self.featuresTmpFile = None
self.log_fn = log_fn
self.impulse_image_height = impulse_image_height
self.impulse_image_width = impulse_image_width
self.image = None
self.original_width = None
self.original_height = None
self.new_width = None
self.new_height = None
self.output_image_path = None
async def __check_and_update_model(self):
if not is_node_installed():
self.log_fn("Error: NodeJS not installed")
return ClassifierError.NODEJS_NOT_INSTALLED
deployment_info = await self.rest_client.get_deployment_info(self.project_id)
if not deployment_info:
self.log_fn("Error: Failed to get deployment info")
return ClassifierError.MODEL_DEPLOYMENT_NOT_AVAILABLE
current_version = deployment_info.version
model_dir_name = f"ei-model-{self.project_id}-{current_version}"
model_dir = os.path.join(os.path.dirname(self.model_path), model_dir_name)
# Check if the model directory exists and its version
if os.path.exists(model_dir):
self.log_fn("Latest model version already downloaded.")
self.model_ready = True
return ClassifierError.SUCCESS
# If the model directory for the current version does not exist, delete old versions and download the new one
self.__delete_old_models(os.path.dirname(self.model_path), model_dir_name)
self.log_fn("Downloading model...")
model_content = await self.rest_client.download_model(self.project_id)
if model_content is not None:
self.__save_model(model_content, model_dir)
self.model_ready = True
self.log_fn("Model is ready for classification.")
return ClassifierError.SUCCESS
else:
self.log_fn("Error: Failed to download the model")
return ClassifierError.FAILED_TO_DOWNLOAD_MODEL
def __delete_old_models(self, parent_dir, exclude_dir_name):
for dirname in os.listdir(parent_dir):
if dirname.startswith("ei-model-") and dirname != exclude_dir_name:
dirpath = os.path.join(parent_dir, dirname)
shutil.rmtree(dirpath)
self.log_fn(f"Deleted old model directory: {dirpath}")
def __save_model(self, model_content, model_dir):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
self.log_fn(f"'{model_dir}' directory created")
model_zip_path = os.path.join(model_dir, "model.zip")
with open(model_zip_path, "wb") as model_file:
model_file.write(model_content)
self.log_fn(f"Model zip saved to {model_zip_path}")
# Extract the zip file
shutil.unpack_archive(model_zip_path, model_dir)
os.remove(model_zip_path)
self.log_fn(f"Model extracted to {model_dir}")
def __resize_image_and_extract_features(
self, image, target_width, target_height, channel_count
):
# Resize the image
self.image = image.resize(
(target_width, target_height), Image.Resampling.LANCZOS
)
# Convert the image to the required color space
if channel_count == 3: # RGB
self.image = self.image.convert("RGB")
img_array = np.array(self.image)
# Extract RGB features as hexadecimal values
features = [
"0x{:02x}{:02x}{:02x}".format(*pixel)
for pixel in img_array.reshape(-1, 3)
]
elif channel_count == 1: # Grayscale
self.image = self.image.convert("L")
img_array = np.array(self.image)
# Repeat the grayscale values to mimic the RGB structure
features = [
"0x{:02x}{:02x}{:02x}".format(pixel, pixel, pixel)
for pixel in img_array.flatten()
]
return {
"features": features,
"originalWidth": image.width,
"originalHeight": image.height,
"newWidth": target_width,
"newHeight": target_height,
}
async def __capture_and_process_image(self):
def on_capture_completed(buffer, buffer_size, width, height, format):
try:
image_size = width * height * 4
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.POINTER(
ctypes.c_byte * image_size
)
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [
ctypes.py_object,
ctypes.c_char_p,
]
content = ctypes.pythonapi.PyCapsule_GetPointer(buffer, None)
pointer = ctypes.cast(
content, ctypes.POINTER(ctypes.c_byte * image_size)
)
np_arr = np.ctypeslib.as_array(pointer.contents)
image = Image.frombytes("RGBA", (width, height), np_arr.tobytes())
self.image = image
# Directly use the image for resizing and feature extraction
target_width = self.impulse_image_width
target_height = self.impulse_image_height
channel_count = 3 # 3 for RGB, 1 for grayscale
resized_info = self.__resize_image_and_extract_features(
image, target_width, target_height, channel_count
)
features = resized_info["features"]
features_str = ",".join(features)
self.original_width = resized_info["originalWidth"]
self.original_height = resized_info["originalHeight"]
self.new_width = resized_info["newWidth"]
self.new_height = resized_info["newHeight"]
try:
with tempfile.NamedTemporaryFile(
delete=False, suffix=".txt", mode="w+t"
) as tmp_file:
tmp_file.write(features_str)
self.featuresTmpFile = tmp_file.name
self.log_fn(f"Features saved to {tmp_file.name}")
except Exception as e:
self.log_fn(f"Error: Failed to save features to file: {e}")
except Exception as e:
self.log_fn(f"Error: Failed to process and save image: {e}")
viewport_window_id = vp.get_id_from_index(0)
viewport_window = vp.get_window_from_id(viewport_window_id)
viewport_api = viewport_window.viewport_api
capture_delegate = ByteCapture(on_capture_completed)
capture = viewport_api.schedule_capture(capture_delegate)
await capture.wait_for_result()
async def __run_subprocess(self, command, cwd):
"""Run the given subprocess command in a thread pool and capture its output."""
loop = asyncio.get_running_loop()
def subprocess_run():
# Execute the command and capture output
return subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=cwd,
)
with ThreadPoolExecutor() as pool:
# Run the blocking operation in the executor
result = await loop.run_in_executor(pool, subprocess_run)
return result
# TODO The logic to normalize bouding boxes is not right, so we simply display
# the boxes on the resized image directly
def __normalize_bounding_boxes(self, bounding_boxes):
orig_factor = self.original_width / self.original_height
new_factor = self.new_width / self.new_height
if orig_factor > new_factor:
# Boxed in with bands top/bottom
factor = self.new_width / self.original_width
offset_x = 0
offset_y = (self.new_height - (self.original_height * factor)) / 2
elif orig_factor < new_factor:
# Boxed in with bands left/right
factor = self.new_height / self.original_height
offset_x = (self.new_width - (self.original_width * factor)) / 2
offset_y = 0
else:
# Image was already at the right aspect ratio
factor = self.new_width / self.original_width
offset_x = 0
offset_y = 0
# Adjust bounding boxes
for bb in bounding_boxes:
bb["x"] = round((bb["x"] - offset_x) / factor)
bb["width"] = round(bb["width"] / factor)
bb["y"] = round((bb["y"] - offset_y) / factor)
bb["height"] = round(bb["height"] / factor)
return bounding_boxes
def __draw_bounding_boxes_and_save(self, bounding_boxes):
draw = ImageDraw.Draw(self.image)
try:
font = ImageFont.truetype("arial.ttf", 10)
except IOError:
font = ImageFont.load_default()
print("Custom font not found. Using default font.")
# Loop through the bounding boxes and draw them along with labels and confidence values
for box in bounding_boxes:
x, y, width, height = box["x"], box["y"], box["width"], box["height"]
label = box["label"]
confidence = box["value"]
# Draw the bounding box rectangle
draw.rectangle(((x, y), (x + width, y + height)), outline="red", width=2)
# Prepare the label text with confidence
label_text = f"{label} ({confidence:.2f})"
# Calculate the text position to appear above the bounding box
text_width, text_height = draw.textsize(label_text, font=font)
text_x = x + 5 # A small offset from the left edge of the bounding box
text_y = y - text_height # Above the bounding box with a small offset
# Ensure the text is not drawn outside the image
if text_y < 0:
text_y = (
y + height + 5
) # Below the bounding box if there is no space above
# Draw the text background
draw.rectangle(
((text_x - 2, text_y), (text_x + text_width + 2, text_y + text_height)),
fill="red",
)
# Draw the label text with confidence
draw.text((text_x, text_y), label_text, fill="white", font=font)
# Save the image
random_file_name = f"captured_with_bboxes_{uuid.uuid4()}.png"
self.output_image_path = os.path.join(tempfile.gettempdir(), random_file_name)
self.image.save(self.output_image_path)
self.log_fn(
f"Image with bounding boxes and labels saved at {self.output_image_path}"
)
async def classify(self):
self.log_fn("Checking and updating model...")
result = await self.__check_and_update_model()
if result != ClassifierError.SUCCESS:
self.log_fn(f"Failed to update model: {result.name}")
return result, None # Return None or an empty list for the bounding boxes
self.log_fn("Capturing and processing image...")
await self.__capture_and_process_image()
try:
model_dirs = glob.glob(
os.path.join(os.path.dirname(self.model_path), "ei-model-*")
)
if model_dirs:
latest_model_dir = max(model_dirs, key=os.path.getctime)
self.log_fn(f"Using latest model directory: {latest_model_dir}")
if self.featuresTmpFile:
script_dir = os.path.join(latest_model_dir, "node")
self.log_fn(f"Running inference on {self.featuresTmpFile}")
command = ["node", "run-impulse.js", self.featuresTmpFile]
# Run subprocess and capture its output
process_result = await self.__run_subprocess(command, script_dir)
if process_result.returncode == 0:
self.log_fn(f"{process_result.stdout}")
# Attempt to find the start of the JSON content
try:
json_start_index = process_result.stdout.find("{")
if json_start_index == -1:
self.log_fn(
"Error: No JSON content found in subprocess output."
)
return (
ClassifierError.FAILED_TO_PROCESS_CLASSIFY_RESULT,
None,
)
json_content = process_result.stdout[json_start_index:]
# Use YAML loader to parse the JSON content. We cannot use json directly
# because the result of running run-impulse.js is not a well formed JSON
# (i.e. missing double quotes in names)
output_dict = yaml.load(
json_content, Loader=yaml.SafeLoader
)
if "results" in output_dict:
output_dict["bounding_boxes"] = output_dict.pop(
"results"
)
self.__draw_bounding_boxes_and_save(
output_dict["bounding_boxes"]
)
return (
ClassifierError.SUCCESS,
self.output_image_path,
)
else:
self.log_fn(
"Error: classifier output does not contain 'results' key."
)
return (
ClassifierError.FAILED_TO_PROCESS_CLASSIFY_RESULT,
None,
)
except yaml.YAMLError as e:
self.log_fn(
f"Error parsing classification results with YAML: {e}"
)
return (
ClassifierError.FAILED_TO_PROCESS_CLASSIFY_RESULT,
None,
)
else:
self.log_fn(f"Classification failed: {process_result.stderr}")
return ClassifierError.FAILED_TO_PROCESS_CLASSIFY_RESULT, None
else:
self.log_fn("No model directory found.")
return ClassifierError.FAILED_TO_DOWNLOAD_MODEL, None
except subprocess.CalledProcessError as e:
self.log_fn(f"Error executing model classification: {e.stderr}")
return ClassifierError.FAILED_TO_DOWNLOAD_MODEL, None
| 16,201 | Python | 41.862434 | 117 | 0.542868 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/extension.py | # SPDX-License-Identifier: Apache-2.0
import omni.ext
import omni.ui as ui
from omni.kit.window.file_importer import get_file_importer
import asyncio
from .config import Config
from .uploader import upload_data
from .classifier import Classifier
from .state import State
from .client import EdgeImpulseRestClient
class EdgeImpulseExtension(omni.ext.IExt):
config = Config()
classifier = None
def on_startup(self, ext_id):
print("[edgeimpulse.dataingestion] Edge Impulse Extension startup")
self.config.print_config_info()
# Load the last known state from the config
saved_state_name = self.config.get_state()
try:
self.state = State[saved_state_name]
except KeyError:
self.state = State.NO_PROJECT_CONNECTED
self.reset_to_initial_state()
self._window = ui.Window("Edge Impulse", width=300, height=300)
with self._window.frame:
with ui.VStack():
self.no_project_content_area = ui.VStack(
spacing=15, height=0, visible=False
)
self.project_connected_content_area = ui.VStack(
spacing=15, height=0, visible=False
)
self.setup_ui_project_connected()
self.setup_ui_no_project_connected()
self.transition_to_state(self.state)
def reset_to_initial_state(self):
self.project_id = None
self.api_key = None
self.project_name = None
self.impulse = None
self.upload_logs_text = ""
self.uploading = False
self.classify_logs_text = ""
self.classifying = False
self.training_samples = 0
self.testing_samples = 0
self.anomaly_samples = 0
self.impulse_info = None
self.deployment_info = None
def transition_to_state(self, new_state):
"""
Transition the extension to a new state.
:param new_state: The new state to transition to.
"""
# Store the new state in memory
self.state = new_state
# Save the new state in the configuration
self.config.set_state(self.state)
# Call the corresponding UI setup method based on the new state
if self.state == State.PROJECT_CONNECTED:
self.project_id = self.config.get("project_id")
self.project_name = self.config.get("project_name")
self.api_key = self.config.get("project_api_key")
self.rest_client = EdgeImpulseRestClient(self.api_key)
self.project_info_label.text = (
f"Connected to project {self.project_id} ({self.project_name})"
)
self.update_ui_visibility()
def update_ui_visibility(self):
"""Update UI visibility based on the current state."""
if hasattr(self, "no_project_content_area") and hasattr(
self, "project_connected_content_area"
):
self.no_project_content_area.visible = (
self.state == State.NO_PROJECT_CONNECTED
)
self.project_connected_content_area.visible = (
self.state == State.PROJECT_CONNECTED
)
def setup_ui_no_project_connected(self):
with self.no_project_content_area:
# Title and welcome message
ui.Label(
"Welcome to Edge Impulse for NVIDIA Omniverse",
height=20,
word_wrap=True,
)
ui.Label(
"1. Create a free Edge Impulse account: https://studio.edgeimpulse.com/",
height=20,
word_wrap=True,
)
# API Key input section
with ui.VStack(height=20, spacing=10):
ui.Label(
"2. Connect to your Edge Impulse project by setting your API Key",
width=300,
)
with ui.HStack():
ui.Spacer(width=3)
ei_api_key = ui.StringField(name="ei_api_key", height=20)
ui.Spacer(width=3)
ui.Spacer(width=30)
with ui.HStack(height=20):
ui.Spacer(width=30)
# Connect button
connect_button = ui.Button("Connect")
connect_button.set_clicked_fn(
lambda: asyncio.ensure_future(
self.validate_and_connect_project(
ei_api_key.model.get_value_as_string()
)
)
)
ui.Spacer(width=30)
self.error_message_label = ui.Label(
"", height=20, word_wrap=True, visible=False
)
def setup_ui_project_connected(self):
with self.project_connected_content_area:
# Project information
self.project_info_label = ui.Label(
f"Connected to project {self.project_id} ({self.project_name})",
height=20,
word_wrap=True,
)
# Disconnect button
with ui.HStack(height=20):
ui.Spacer(width=30)
disconnect_button = ui.Button("Disconnect")
disconnect_button.set_clicked_fn(lambda: self.disconnect())
ui.Spacer(width=30)
# Data Upload Section
self.setup_data_upload_ui()
# Classification Section
self.setup_classification_ui()
def hide_error_message(self):
if self.error_message_label:
self.error_message_label.text = ""
self.error_message_label.visible = False
def display_error_message(self, message):
if self.error_message_label:
self.error_message_label.text = message
self.error_message_label.visible = True
async def validate_and_connect_project(self, api_key):
self.hide_error_message()
self.rest_client = EdgeImpulseRestClient(api_key)
project_info = await self.rest_client.get_project_info()
if project_info:
print(f"Connected to project: {project_info}")
self.config.set("project_id", project_info["id"])
self.config.set("project_name", project_info["name"])
self.config.set("project_api_key", api_key)
self.transition_to_state(State.PROJECT_CONNECTED)
else:
# Display an error message in the current UI
self.display_error_message(
"Failed to connect to the project. Please check your API key."
)
def disconnect(self):
print("Disconnecting")
self.reset_to_initial_state()
self.config.set("project_id", None)
self.config.set("project_name", None)
self.config.set("project_api_key", None)
self.classify_button.visible = False
self.ready_for_classification.visible = False
self.data_collapsable_frame.collapsed = True
self.classification_collapsable_frame.collapsed = True
self.transition_to_state(State.NO_PROJECT_CONNECTED)
### Data ingestion
def setup_data_upload_ui(self):
self.data_collapsable_frame = ui.CollapsableFrame(
"Data Upload", collapsed=True, height=0
)
self.data_collapsable_frame.set_collapsed_changed_fn(
lambda c: asyncio.ensure_future(self.on_data_upload_collapsed_changed(c))
)
with self.data_collapsable_frame:
with ui.VStack(spacing=10, height=0):
with ui.VStack(height=0, spacing=5):
self.training_samples_label = ui.Label(
f"Training samples: {self.training_samples}"
)
self.testing_samples_label = ui.Label(
f"Test samples: {self.testing_samples}"
)
self.anomaly_samples_label = ui.Label(
f"Anomaly samples: {self.anomaly_samples}"
)
with ui.HStack(height=20):
ui.Spacer(width=3)
ui.Label("Data Path", width=70)
ui.Spacer(width=8)
data_path = self.config.get("data_path", "No folder selected")
self.data_path_display = ui.Label(data_path, width=250)
ui.Spacer(width=10)
ui.Button("Select Folder", clicked_fn=self.select_folder, width=150)
ui.Spacer(width=3)
with ui.HStack(height=20):
ui.Spacer(width=3)
ui.Label("Dataset", width=70)
ui.Spacer(width=8)
dataset_types = ["training", "testing", "anomaly"]
self.dataset_type_dropdown = ui.ComboBox(0, *dataset_types)
self.dataset_type_subscription = (
self.dataset_type_dropdown.model.subscribe_item_changed_fn(
self.on_dataset_type_changed
)
)
initial_dataset_type = self.config.get("dataset_type", "training")
if initial_dataset_type in dataset_types:
for i, dtype in enumerate(dataset_types):
if dtype == initial_dataset_type:
self.dataset_type_dropdown.model.get_item_value_model().as_int = (
i
)
break
ui.Spacer(width=3)
with ui.HStack(height=20):
self.upload_button = ui.Button(
"Upload to Edge Impulse", clicked_fn=lambda: self.start_upload()
)
# Scrolling frame for upload logs
self.upload_logs_frame = ui.ScrollingFrame(height=100, visible=False)
with self.upload_logs_frame:
self.upload_logs_label = ui.Label("", word_wrap=True)
with ui.HStack(height=20):
self.clear_upload_logs_button = ui.Button(
"Clear Logs", clicked_fn=self.clear_upload_logs, visible=False
)
async def on_data_upload_collapsed_changed(self, collapsed):
if not collapsed:
await self.get_samples_count()
def select_folder(self):
def import_handler(filename: str, dirname: str, selections: list = []):
if dirname:
self.data_path_display.text = dirname
EdgeImpulseExtension.config.set("data_path", dirname)
else:
print("No folder selected")
file_importer = get_file_importer()
file_importer.show_window(
title="Select Data Folder",
show_only_folders=True,
import_handler=import_handler,
import_button_label="Select",
)
def on_dataset_type_changed(
self, item_model: ui.AbstractItemModel, item: ui.AbstractItem
):
value_model = item_model.get_item_value_model(item)
current_index = value_model.as_int
dataset_type = ["training", "testing", "anomaly"][current_index]
self.config.set("dataset_type", dataset_type)
def get_dataset_type(self):
selected_index = self.dataset_type_dropdown.model.get_value_as_int()
dataset_types = ["training", "testing", "anomaly"]
return dataset_types[selected_index]
def add_upload_logs_entry(self, message):
self.upload_logs_text += message + "\n"
self.upload_logs_label.text = self.upload_logs_text
self.update_clear_upload_logs_button_visibility()
def clear_upload_logs(self):
self.upload_logs_text = ""
self.upload_logs_label.text = self.upload_logs_text
self.update_clear_upload_logs_button_visibility()
def update_clear_upload_logs_button_visibility(self):
self.clear_upload_logs_button.visible = bool(self.upload_logs_text)
self.upload_logs_frame.visible = self.uploading
def start_upload(self):
if not self.uploading: # Prevent multiple uploads at the same time
self.uploading = True
self.upload_button.text = "Uploading..."
self.upload_logs_frame.visible = True
async def upload():
await upload_data(
self.config.get("project_api_key"),
self.config.get("data_path"),
self.config.get("dataset_type"),
self.add_upload_logs_entry,
lambda: asyncio.ensure_future(self.get_samples_count()),
self.on_upload_complete,
)
asyncio.ensure_future(upload())
def on_upload_complete(self):
self.uploading = False
self.upload_button.text = "Upload to Edge Impulse"
async def get_samples_count(self):
self.training_samples = await self.rest_client.get_samples_count(
self.project_id, "training"
)
self.testing_samples = await self.rest_client.get_samples_count(
self.project_id, "testing"
)
self.anomaly_samples = await self.rest_client.get_samples_count(
self.project_id, "anomaly"
)
print(
f"Samples count: Training ({self.training_samples}) - Testing ({self.testing_samples}) - Anomaly ({self.anomaly_samples})"
)
self.training_samples_label.text = f"Training samples: {self.training_samples}"
self.testing_samples_label.text = f"Test samples: {self.testing_samples}"
self.anomaly_samples_label.text = f"Anomaly samples: {self.anomaly_samples}"
### Classification
def setup_classification_ui(self):
self.classification_collapsable_frame = ui.CollapsableFrame(
"Classification", collapsed=True, height=0
)
self.classification_collapsable_frame.set_collapsed_changed_fn(
lambda c: asyncio.ensure_future(self.on_classification_collapsed_changed(c))
)
with self.classification_collapsable_frame:
with ui.VStack(spacing=10, height=0):
self.impulse_status_label = ui.Label(
"Fetching your Impulse design...", height=20, visible=False
)
self.deployment_status_label = ui.Label(
"Fetching latest model deployment...", height=20, visible=False
)
self.ready_for_classification = ui.Label(
"Your model is ready! You can now run inference on the current scene",
height=20,
visible=False,
)
ui.Spacer(height=20)
with ui.HStack(height=20):
self.classify_button = ui.Button(
"Classify current scene frame",
clicked_fn=lambda: asyncio.ensure_future(self.start_classify()),
visible=False,
)
# Scrolling frame for classify logs
self.classify_logs_frame = ui.ScrollingFrame(height=100, visible=False)
with self.classify_logs_frame:
self.classify_logs_label = ui.Label("", word_wrap=True)
with ui.HStack(height=20):
self.clear_classify_logs_button = ui.Button(
"Clear Logs", clicked_fn=self.clear_classify_logs, visible=False
)
self.classification_output_section = ui.CollapsableFrame(
"Ouput", collapsed=True, visible=False, height=0
)
with self.classification_output_section:
self.image_display = ui.Image(
"",
width=400,
height=300,
)
self.image_display.visible = False
async def on_classification_collapsed_changed(self, collapsed):
if not collapsed:
if not self.impulse_info:
self.impulse_status_label.visible = True
self.impulse_status_label.text = "Fetching your Impulse design..."
self.impulse_info = await self.rest_client.get_impulse(self.project_id)
if not self.impulse_info:
self.impulse_status_label.text = f"""Your Impulse is not ready yet.\n
Go to https://studio.edgeimpulse.com/studio/{self.project_id}/create-impulse to configure and train your model"""
return
if self.impulse_info.input_type != "image":
self.impulse_info = None
self.impulse_status_label.text = "Invalid Impulse input block type. Only 'image' type is supported"
return
self.impulse_status_label.text = "Impulse is ready"
self.impulse_status_label.visible = False
if not self.deployment_info or not self.deployment_info.has_deployment:
self.deployment_status_label.visible = True
self.deployment_status_label.text = (
"Fetching your latest model deployment..."
)
self.deployment_info = await self.rest_client.get_deployment_info(
self.project_id
)
if not self.deployment_info.has_deployment:
self.deployment_status_label.text = f"""Your model WebAssembly deployment is not ready yet.\n
Go to https://studio.edgeimpulse.com/studio/{self.project_id}/deployment to build a WebAssembly deployment"""
return
self.deployment_status_label.text = "Model deployment ready"
self.deployment_status_label.visible = False
if self.impulse_info and self.deployment_info:
self.classify_button.visible = True
self.ready_for_classification.visible = True
else:
self.classify_button.visible = False
self.ready_for_classification.visible = False
def add_classify_logs_entry(self, message):
self.classify_logs_text += message + "\n"
self.classify_logs_label.text = self.classify_logs_text
self.update_clear_classify_logs_button_visibility()
def clear_classify_logs(self):
self.classify_logs_text = ""
self.classify_logs_label.text = self.classify_logs_text
self.update_clear_classify_logs_button_visibility()
def update_clear_classify_logs_button_visibility(self):
self.clear_classify_logs_button.visible = bool(self.classify_logs_text)
self.classify_logs_frame.visible = self.classifying
async def get_impulse(self):
self.impulse = await self.rest_client.get_impulse(self.project_id)
async def start_classify(self):
if not self.classifier:
if not self.impulse:
await self.get_impulse()
if not self.impulse:
self.add_classify_logs_entry("Error: impulse is not ready yet")
return
self.classifier = Classifier(
self.rest_client,
self.project_id,
self.impulse.image_height,
self.impulse.image_width,
self.add_classify_logs_entry,
)
async def classify():
try:
self.classifying = True
self.classify_button.text = "Classifying..."
self.clear_classify_logs()
self.classification_output_section.visible = False
image_path = await self.classifier.classify()
corrected_path = image_path[1].replace("\\", "/")
self.image_display.source_url = corrected_path
self.image_display.width = ui.Length(self.impulse_info.image_width)
self.image_display.height = ui.Length(self.impulse_info.image_height)
self.image_display.visible = True
self.classification_output_section.visible = True
self.classification_output_section.collapsed = False
finally:
self.classifying = False
self.classify_button.text = "Classify"
asyncio.ensure_future(classify())
def on_shutdown(self):
print("[edgeimpulse.dataingestion] Edge Impulse Extension shutdown")
| 20,673 | Python | 38.988395 | 134 | 0.559716 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/utils.py | import omni.kit.app
import carb.settings
import carb.tokens
import os
import subprocess
def get_extension_name() -> str:
"""
Return the name of the Extension where the module is defined.
Args:
None
Returns:
str: The name of the Extension where the module is defined.
"""
extension_manager = omni.kit.app.get_app().get_extension_manager()
extension_id = extension_manager.get_extension_id_by_module(__name__)
extension_name = extension_id.split("-")[0]
return extension_name
def get_models_directory() -> str:
extension_name = get_extension_name()
models_directory_name = carb.settings.get_settings().get_as_string(
f"exts/{extension_name}/models_directory"
)
temp_kit_directory = carb.tokens.get_tokens_interface().resolve("${omni_data}")
models_directory = os.path.join(temp_kit_directory, models_directory_name)
return models_directory
def is_node_installed():
try:
subprocess.run(
["node", "--version"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
| 1,231 | Python | 27.651162 | 83 | 0.654752 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/uploader.py | # uploader.py
import asyncio
import requests
import os
async def upload_data(
api_key,
data_folder,
dataset,
log_callback,
on_sample_upload_success,
on_upload_complete,
):
dataset_types = ["training", "testing", "anomaly"]
if dataset not in dataset_types:
log_callback(
"Error: Dataset type invalid (must be training, testing, or anomaly)."
)
return
url = "https://ingestion.edgeimpulse.com/api/" + dataset + "/files"
try:
for file in os.listdir(data_folder):
file_path = os.path.join(data_folder, file)
label = os.path.basename(file_path).split(".")[0]
if os.path.isfile(file_path):
await asyncio.sleep(1)
try:
with open(file_path, "rb") as file_data:
res = requests.post(
url=url,
headers={
"x-label": label,
"x-api-key": api_key,
"x-disallow-duplicates": "1",
},
files={
"data": (
os.path.basename(file_path),
file_data,
"image/png",
)
},
)
if res.status_code == 200:
log_callback(f"Success: {file_path} uploaded successfully.")
on_sample_upload_success()
else:
log_callback(
f"Error: {file_path} failed to upload. Status Code {res.status_code}: {res.text}"
)
except Exception as e:
log_callback(
f"Error: Failed to process {file_path}. Exception: {str(e)}"
)
except FileNotFoundError:
log_callback("Error: Data Path invalid.")
log_callback("Done")
on_upload_complete()
| 2,196 | Python | 35.016393 | 113 | 0.414845 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/impulse.py | class Impulse:
def __init__(self, input_type, image_width=None, image_height=None):
self.input_type = input_type
self.image_width = image_width
self.image_height = image_height
| 205 | Python | 33.333328 | 72 | 0.64878 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/edgeimpulse/dataingestion/client.py | import httpx
from .impulse import Impulse
from .deployment import DeploymentInfo
class EdgeImpulseRestClient:
def __init__(self, project_api_key):
self.base_url = "https://studio.edgeimpulse.com/v1/api/"
self.headers = {"x-api-key": project_api_key}
async def get_project_info(self):
"""Asynchronously retrieves the project info."""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}projects", headers=self.headers
)
if response.status_code == 200 and response.json()["success"]:
project = response.json()["projects"][0]
return {"id": project["id"], "name": project["name"]}
else:
return None
async def get_deployment_info(self, project_id):
"""Asynchronously retrieves deployment information, including version."""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}{project_id}/deployment?type=wasm&engine=tflite",
headers=self.headers,
)
if response.status_code == 200 and response.json().get("success"):
# Returns the deployment info if available
version = response.json().get("version")
has_deployment = response.json().get("hasDeployment")
return DeploymentInfo(
version=version,
has_deployment=has_deployment,
)
else:
# Returns None if the request failed or no deployment info was found
return None
async def download_model(self, project_id):
"""Asynchronously downloads the model."""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}{project_id}/deployment/download?type=wasm&engine=tflite",
headers=self.headers,
)
if response.status_code == 200:
return response.content
else:
return None
async def get_impulse(self, project_id):
"""Asynchronously fetches the impulse details and returns an Impulse object or None"""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}{project_id}/impulse",
headers=self.headers,
)
if response.status_code == 200:
data = response.json()
if "impulse" in data and data["impulse"].get("inputBlocks"):
first_input_block = data["impulse"]["inputBlocks"][0]
return Impulse(
input_type=first_input_block.get("type"),
image_width=first_input_block.get("imageWidth"),
image_height=first_input_block.get("imageHeight"),
)
else:
return None
else:
return None
async def get_samples_count(self, project_id, category="training"):
"""Asynchronously fetches the number of samples ingested for a specific category"""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}{project_id}/raw-data/count?category={category}",
headers=self.headers,
)
if response.status_code == 200:
data = response.json()
if "count" in data:
return data["count"]
else:
return 0
else:
return 0
| 3,746 | Python | 40.175824 | 94 | 0.540844 |
edgeimpulse/edge-impulse-omniverse-ext/exts/edgeimpulse.dataingestion/docs/README.md | # Edge Impulse Data Ingestion
NVIDIA Omniverse extension to upload data directly into your Edge Impulse project.
| 114 | Markdown | 27.749993 | 82 | 0.824561 |
md84419/coderage-ext-spacemouse/README.md | # CodeRage's coderage.io.spacemouse extension for Omniverse
# Getting Started
## Install Omniverse and some Apps
1. Install *Omniverse Launcher*: [download](https://www.nvidia.com/en-us/omniverse/download)
2. Install and launch one of *Omniverse* apps in the Launcher. For instance: *Code*.
## Option 1: Clone this repo and add this extension to your *Omniverse App*
0. Clone the repo
- `git https://github.com/md84419/coderage-ext-spacemouse.git`
-- or -
- `git clone [email protected]:md84419/coderage-ext-spacemouse.git`
1. In the *Omniverse App* open extension manager: *Window* → *Extensions*.
2. In the *Extension Manager Window* open a settings page, with a small gear button in the top left bar.
3. In the settings page there is a list of *Extension Search Paths*. Add the `exts` subfolder for this extension there as another search path, e.g.: `C:\projects\coderage-ext-spacemouse\exts`

4. Now you can find `coderage.io.spacemouse` extension in the top left search bar. Select and enable it.
### A few tips
* Now that `exts` folder was added to the search you can add new extensions to this folder and they will be automatically found by the *App*.
* Look at the *Console* window for warnings and errors. It also has a small button to open the current log file.
* All the same commands work on Linux. Replace `.bat` with `.sh` and `\` with `/`.
* `Extension name` is a folder name in the `exts` folder, in this example: `coderage.io.spacemouse`.
* The most important feature an extension has is a config file: `extension.toml`, take a peek.
## Option 2: Add this extension to your *Omniverse App* without cloning the github repo
Alternatively, a direct link to a git repository can be added to *Omniverse Kit* extension search paths. Instead of the 'C:\' path above, use this path in the Extension manager (```Extension Manager -> Gear Icon -> Extension Search Path```):
`git://github.com/md84419/coderage-ext-spacemouse.git?branch=main&dir=exts`
Notice that `exts` is the repo subfolder containing the extension(s). More information can be found in ["Git URL as Extension Search Paths"](https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/extensions_advanced.html#git-url-paths) section of the [Omniverse developers manual](https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/index.html).
## Option 3: Linking with an *Omniverse* app
For a better experience when developing this extension, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
1. Remove the search path added in the previous section.
2. Open this cloned repo using Visual Studio Code: `code C:\projects\coderage-ext-spacemouse`. It will suggest installing a few extensions to improve the Python experience.
3. In the terminal (CTRL + \`), run:
```bash
> link_app.bat
```
If successful you should see the `app` folder link in the root of this repo.
If multiple Omniverse apps are installed, the script will select the recommended one. Or you can explicitly pass an app:
```bash
> link_app.bat --app create
```
You can also just pass a path to use when creating the link:
```bash
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
4. Run this app with `exts` folder added as an extensions search path and new extension enabled:
```bash
> app\omni.code.bat --ext-folder exts --enable coderage.io.spacemouse
```
- `--ext-folder [path]` - adds a new folder to the search path
- `--enable [extension]` - enables an extension on startup.
Use `-h` for help:
```bash
> app\omni.code.bat -h
```
5. After the *App* has started you should see:
* the extension search paths in *Extensions* window as in the previous section.
* the extension is enabled in the list of extensions.
6. If you look inside `omni.code.bat` or any other *Omniverse App*, they all run *Omniverse Kit* (`kit.exe`). *Omniverse Kit* is the Omniverse Application runtime that powers *Apps* build out of extensions.
Think of it as analagous to `python.exe`. It is a small runtime, that enables all the basics, like settings, Python, logging and searches for extensions. **Everything else is an extension.** You can run only this new extension without running any big *App* like *Code*:
```bash
> app\kit\kit.exe --ext-folder exts --enable coderage.io.spacemouse
```
It starts much faster and will only have extensions enabled that are required for this new extension (look at the `[dependencies]` section of `extension.toml`). You can enable more extensions: try adding `--enable omni.kit.window.extensions` to have the extensions window enabled (yes, extension window is an extension too!):
```bash
> app\kit\kit.exe --ext-folder exts --enable coderage.io.spacemouse --enable omni.kit.window.extensions
```
You should see a menu in the top left. From here you can enable more extensions from the UI.
### A few tips
* In the *Extensions* window, press the *Burger* menu button near the search bar and select *Show Extension Graph*. It will show how the current *App* comes to be: all extensions and dependencies.
* Learn more: [Extensions system documentation](http://omniverse-docs.s3-website-us-east-1.amazonaws.com/kit-sdk/104.0/docs/guide/extensions.html)
# Running Tests
To run tests we run a new process where only the tested extension (and it's dependencies) is enabled. Like in example above + testing system (`omni.kit.test` extension). There are two ways to run extension tests:
1. Run: `app\kit\test_ext.bat coderage.io.spacemouse --ext-folder exts`
That will run a test process with all tests and then exit. For development mode, pass `--dev`: that will open the test selection window. As everywhere, hot-reload also works in this mode, give it a try by changing some code!
2. Alternatively, in *Extension Manager* (*Window → Extensions*) find your extension, click on the *TESTS* tab, click *Run Test*
For more information about testing refer to: [testing doc](http://omniverse-docs.s3-website-us-east-1.amazonaws.com/kit-sdk/104.0/docs/guide/ext_testing.html).
# Sharing This Extension to Github
To make the extension available to other users, use [Github Releases](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository).
1. Make sure the repo has [omniverse-kit-extension](https://github.com/topics/omniverse-kit-extension) topic set for auto discovery.
2. For each new release increment extension version (in `extension.toml`) and update the changelog (in `docs/CHANGELOG.md`). [Semantic versionning](https://semver.org/) must be used to express the severity of API changes.
- See time index 4:17 in [this video from Mati](https://www.youtube.com/watch?v=lEQ2VmzXMgQ) for more information.
# Known limitations
1. Integration is achieved by simulating (injecting) keyboard and mouse events into the application.
This is a significant limitation as it means that the usual use case of operating the spacemouse with
the non-dominant hand and the regular mouse with the dominant hand, doesn't work - one can only use one
input device at a time. Hopefully in a future release a better API for injecting Spacemouse commands
will become available.
2. spacenavigator dependency is currently Windows-only. Therefore this extension only works on Windows currently. There is a fork at https://github.com/JakubAndrysek/PySpaceMouse which we should investigate.
To reduce dependency friction, we may wish to stick with spacenavigator for Windows and only use
pyspacemouse on Linux and Mac OS X
# Contributing
The source code for this repository is provided as-is. We only accept outside contributions from individuals who have signed an Individual Contributor License Agreement.
| 7,860 | Markdown | 52.114865 | 366 | 0.759542 |
md84419/coderage-ext-spacemouse/exts/coderage.io.spacemouse/coderage/io/spacemouse/extension.py | from datetime import datetime, timedelta
import math
import platform
import carb
import omni.ext
import omni.kit.viewport.window
import omni.kit.app
import omni.kit.viewport
import omni.ui.scene
import omni.ui as ui
import omni.usd
# import omni.kit.viewport.utility as vp_util
from omni.kit.viewport.utility import get_active_viewport
from omni.kit.viewport.utility.camera_state import ViewportCameraState as VpCamera
from pxr import Usd, UsdGeom, Gf, Sdf
if platform.system() == 'Windows':
omni.kit.pipapi.install("pywinusb")
# import pywinusb
import spacenavigator
UPDATE_TIME_MILLIS = 10
DEFAULT_STEP = 50 # Ideally we'd get this from Camera Speed @TODO
DEFAULT_ROTATION = 1
DEBUG = True
# Spacemouse supports six degrees of freedom. By default, these are mapped to the ViewPort camera as so:
# * x: tracking (trucking) left and right
# * y: dollying forwards and backwards (move the *camera's* Omniverse-z axis)
# * z: pedestal lift/lower
# * roll: rolling the camera body on its z axis (the line-of-sight axis)
# * pitch: tilting the camera up and down (rotating camera body on its horizontal axis)
# * yaw: panning left/right (rotating camera body on its vertical axis)
class CoderageIoSpacemouseExtension(omni.ext.IExt):
def on_startup(self, ext_id):
self._count = 0
self.__previous_time = None
self.__previous_state = None
viewport = get_active_viewport()
self.__camera_path = str(viewport.camera_path)
self._MovementValue = 1.0
self._RotationValue = 1.0
self._MovementScalar = DEFAULT_STEP
self._RotationScalar = DEFAULT_ROTATION
print("[coderage.io.spacemouse] coderage io spacemouse startup")
# Angles
def RollLeft_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": self._RotationValue, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def PanRight_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": self._RotationValue, "buttons": [0,0]}
)
self.update_state(state)
def TiltDown_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": self._RotationValue, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def RollRight_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": -self._RotationValue, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def PanLeft_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": -self._RotationValue, "buttons": [0,0]}
)
self.update_state(state)
def TiltUp_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": -self._RotationValue, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
# Movements
def Up_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": self._MovementValue, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def Forward_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": self._MovementValue, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def Down_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": -self._MovementValue, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def Left_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": -self._MovementValue, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def Back_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": -self._MovementValue, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def Right_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": self._MovementValue, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def XAxisUp_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": self._MovementValue, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
def YAxisUp_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": self._MovementValue, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
def ZAxisUp_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": self._MovementValue, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
def XAxisDown_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": -self._MovementValue, "y": 0.0, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
def YAxisDown_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": -self._MovementValue, "z": 0.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
def ZAxisDown_Click():
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 0.0, "y": 0.0, "z": -self._MovementValue, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state, True)
self._window = ui.Window("Spacemouse debug", width=300, height=300)
with self._window.frame:
with ui.VStack():
# add an IntSlider for translate Strength
ui.Label("Camera Rotation Amount")
self._rotationSlider = ui.IntSlider(min = 1, max = 90, step=5)
self._rotationSlider.model.set_value(self._RotationScalar)
# self._rotationValue = 5
self._rotationSlider.model.add_value_changed_fn(self._onrotation_value_changed)
with ui.HStack():
rollLeftButton = ui.Button("Roll L", clicked_fn=RollLeft_Click)
panLeftButton = ui.Button("Pan L", clicked_fn=PanLeft_Click)
tiltDownButton = ui.Button("Tilt -", clicked_fn=TiltDown_Click)
with ui.HStack():
rollRightButton = ui.Button("Roll R", clicked_fn=RollRight_Click)
panRightButton = ui.Button("Pan R", clicked_fn=PanRight_Click)
tiltUpButton = ui.Button("Tilt +", clicked_fn=TiltUp_Click)
# add an IntSlider for translate Strength
ui.Label("Camera Movement Amount")
self._movementSlider = ui.IntSlider(min = 10, max = 1000, step=10)
self._movementSlider.model.set_value(self._MovementScalar)
# self._MovementValue = 100
self._movementSlider.model.add_value_changed_fn(self._on_movement_changed)
with ui.HStack():
upButton = ui.Button("Up", clicked_fn=Up_Click)
forwardButton = ui.Button("Forward", clicked_fn=Forward_Click)
downButton = ui.Button("Down", clicked_fn=Down_Click)
with ui.HStack():
leftButton = ui.Button("Left", clicked_fn=Left_Click)
backButton = ui.Button("Back", clicked_fn=Back_Click)
rightButton = ui.Button("Right", clicked_fn=Right_Click)
with ui.HStack():
xAxisButtonUp = ui.Button("X +", clicked_fn=XAxisUp_Click)
yAxisButtonUp = ui.Button("Y +", clicked_fn=YAxisUp_Click)
zAxisButtonUp = ui.Button("Z +", clicked_fn=ZAxisUp_Click)
with ui.HStack():
xAxisButtonDown = ui.Button("X -", clicked_fn=XAxisDown_Click)
yAxisButtonDown = ui.Button("Y -", clicked_fn=YAxisDown_Click)
zAxisButtonDown = ui.Button("Z -", clicked_fn=ZAxisDown_Click)
# with ui.VStack():
self._label_status_line_1 = ui.Label("")
self._label_status_line_2 = ui.Label("")
self._label_buttons = ui.Label("")
self._label_connected = ui.Label("")
self._label_debug = ui.Label("")
# with ui.HStack():
# ui.Button("Move", clicked_fn=self.on_click)
# Note1: It is possible to have multiple 3D mice connected.
# See: https://github.com/johnhw/pyspacenavigator/blob/master/spacenavigator.py
self._nav1 = spacenavigator.open(callback=self.on_spacemouse,button_callback=self.on_spacemouse_buttons, DeviceNumber=0)
self._nav2 = spacenavigator.open(callback=self.on_spacemouse,button_callback=self.on_spacemouse_buttons, DeviceNumber=1)
if self._nav1 or self._nav2:
if self._nav1.connected or self._nav2.connected:
self._label_connected.text = "Connected"
else:
self._label_connected.text = "Not Connected"
else:
self._label_connected.text = "No spacemouse detected"
def on_click(self):
current_time = datetime.now()
if self.__previous_time:
if current_time - self.__previous_time < timedelta(milliseconds=UPDATE_TIME_MILLIS):
return
self.__previous_time = current_time
state: spacenavigator.SpaceNavigator = spacenavigator.SpaceNavigator(
**{"t": 255.0, "x": 30.0, "y": 30.0, "z": 30.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0, "buttons": [0,0]}
)
self.update_state(state)
def on_spacemouse(self, state: spacenavigator.SpaceNavigator):
if self.__previous_state == state:
return
self.__previous_state = state
current_time = datetime.now()
if self.__previous_time:
if current_time - self.__previous_time < timedelta(milliseconds=UPDATE_TIME_MILLIS):
return
self.__previous_time = current_time
self.update_state(state)
def on_spacemouse_buttons(self, state: spacenavigator.SpaceNavigator, buttons: spacenavigator.ButtonState):
current_time = datetime.now()
if self.__previous_time:
if current_time - self.__previous_time < timedelta(milliseconds=UPDATE_TIME_MILLIS):
return
self.__previous_time = current_time
self.update_state(state)
def get_projection_matrix(self, fov, aspect_ratio, z_near, z_far) -> omni.ui.scene.Matrix44:
"""
Calculate the camera projection matrix.
Args:
fov (float): Field of View (in radians)
aspect_ratio (float): Image aspect ratio (Width / Height)
z_near (float): distance to near clipping plane
z_far (float): distance to far clipping plane
Returns:
(UsdGeom.Matrix4d): Flattened `(4, 4)` view projection matrix
"""
a = -1.0 / math.tan(fov / 2)
b = -a * aspect_ratio
c = z_far / (z_far - z_near)
d = z_near * z_far / (z_far - z_near)
return omni.ui.scene.Matrix44(
a, 0.0, 0.0, 0.0,
0.0, b, 0.0, 0.0,
0.0, 0.0, c, 1.0,
0.0, 0.0, d, 0.0
)
def gfmatrix_to_matrix44(self, matrix: Gf.Matrix4d) -> omni.ui.scene.Matrix44:
"""
A helper method to convert Gf.Matrix4d to omni.ui.scene.Matrix44
Args:
matrix (Gf.Matrix): Input matrix
Returns:
UsdGeom.Matrix4d: Output matrix
"""
# convert the matrix by hand
# USING LIST COMPREHENSION IS VERY SLOW (e.g. return [item for sublist
# in matrix for item in sublist]), which takes around 10ms.
matrix44 = omni.ui.scene.Matrix44(
matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]
)
return matrix44
def gfmatrix_to_array(self, matrix: Gf.Matrix4d) -> list:
"""
A helper method to convert Gf.Matrix4d to omni.ui.scene.Matrix44
Args:
matrix (Gf.Matrix): Input matrix
Returns:
UsdGeom.Matrix4d: Output matrix
"""
# flatten the matrix by hand
# USING LIST COMPREHENSION IS VERY SLOW (e.g. return [item for sublist
# in matrix for item in sublist]), which takes around 10ms.
return (
matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]
)
def decompose_matrix(self, mat: Gf.Matrix4d):
reversed_ident_matrix = reversed(Gf.Matrix3d())
translate: Gf.Vec3d = mat.ExtractTranslation()
scale: Gf.Vec3d = Gf.Vec3d(*(v.GetLength() for v in mat.ExtractRotationMatrix()))
mat.Orthonormalize()
rotate: Gf.Vec3d = Gf.Vec3d(*reversed(mat.ExtractRotation().Decompose(*reversed_ident_matrix)))
return translate, rotate, scale
def update_translate(self, state, cam_state, update_world_position=False):
## On spacemouse, by default x is left(-)/right(+), y is forward(+)/backwards(-), z is up(+)/down(-)
## In Omniverse, -z is always camera forwards
cam_state = VpCamera()
# Get the current position and target
cam_pos = cam_state.position_world
cam_target = cam_state.target_world
# Create the vector transform - set to state * d
transform = Gf.Vec3d(
round(state.x * self._MovementScalar, 1),
round(state.z * self._MovementScalar, 1),
round(-state.y * self._MovementScalar, 1)
)
if not update_world_position:
# compute world transform from local
world_translation = cam_state.usd_camera.ComputeLocalToWorldTransform(Usd.TimeCode.Default()).Transform(transform)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(self.__camera_path+'.xformOp:translate'),
value=world_translation,
prev=cam_pos)
else:
world_translation = transform
cam_pos = cam_pos + world_translation
cam_target = cam_target + world_translation
# Update the world
cam_state.set_position_world(cam_pos, False)
cam_state.set_target_world(cam_target, False)
return transform
def update_rotate(self, state, cam_state, world=False):
# Get the local transformation - I think we should be using ComputeLocalToWorldTransform rather than GetLocalTransformation & decompose_matrix
yawsign = 1
local_transformation: Gf.Matrix4d = cam_state.usd_camera.GetLocalTransformation()
# translation: Gf.Vec3d = local_transformation.ExtractTranslation()
# rotation: Gf.Rotation = local_transformation.ExtractRotation()
decomposed_transform = self.decompose_matrix(local_transformation)
rotationX = round(decomposed_transform[1][0], 1)
rotationY = round(decomposed_transform[1][1], 1)
rotationZ = round(decomposed_transform[1][2], 1)
# Attempt to hack around issue with going beyond yaw (pan) -90 or +90
# if( yawsign == 1 and rotationX == -180.0 ):
# yawsign = -1
# elif( yawsign == 1 and rotationX == 180.0 ):
# yawsign = -1
prev_rotation = Gf.Vec3f(rotationX, rotationY, rotationZ)
new_rotationX = round(rotationX - state.pitch * self._RotationScalar, 1)
new_rotationY = round(rotationY - state.yaw * self._RotationScalar * yawsign, 1)
new_rotationZ = round(rotationZ + state.roll * self._RotationScalar, 1)
alt_local_rotation = Gf.Vec3d(new_rotationX, new_rotationY, new_rotationZ)
if DEBUG:
new_rotation = Gf.Rotation(Gf.Vec3d(1, 0, 0), new_rotationX) * \
Gf.Rotation(Gf.Vec3d(0, 1, 0), new_rotationY) * \
Gf.Rotation(Gf.Vec3d(0, 0, -1), new_rotationZ)
rotation_transform = Gf.Matrix4d().SetRotate(new_rotation)
reversed_ident_mtx = reversed(Gf.Matrix3d())
rotation_transform.Orthonormalize()
local_rotation = Gf.Vec3d(*reversed(rotation_transform.ExtractRotation().Decompose(*reversed_ident_mtx)))
self._label_debug.text = f"{new_rotationX:.03f} | {new_rotationY:.03f} | {new_rotationZ:.03f} | {yawsign}"
self._label_debug.text = self._label_debug.text + '\n' + f"{local_rotation[0]:.03f} | {local_rotation[1]:.03f} | {local_rotation[2]:.03f}"
world_rotation = alt_local_rotation
# Update the world
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(self.__camera_path+'.xformOp:rotateXYZ'),
value=world_rotation,
prev=prev_rotation)
def update_state(self, state: spacenavigator.SpaceNavigator, world=False):
status_line_1 = f"{state.x:.03f}, {state.y:.03f}, {state.z:.03f}"
status_line_2 = f"roll: {state.roll:.03f}, tilt: {state.pitch:.03f}, pan: {state.yaw:.03f}"
# Note1: The number of buttons varies with the type of 3DConnexion product we have
# Note2: The mappings of buttons is user-configurable so not guaranteed order - we have to account for this
buttons = f"buttons: {state.buttons}"
self._label_status_line_1.text = status_line_1
self._label_status_line_2.text = status_line_2
self._label_buttons.text = buttons
self._label_connected.text = f"{state.t}"
if (
state.x != 0
or state.y != 0
or state.z != 0
or state.roll != 0
or state.pitch !=0
or state.yaw != 0
):
## On spacemouse, by default x is left(-)/right(+), y is forward(+)/backwards(-), z is up(+)/down(-)
## In Omniverse, -z is always camera forwards
cam_state = VpCamera()
# Update position
self.update_translate(state, cam_state, world)
# Now calculate the rotation
self.update_rotate(state, cam_state, world)
def _on_movement_changed(self, model: ui.SimpleIntModel):
self._MovementScalar = model.get_value_as_int()
self._label_debug.text = "Camera movement value = " + str(self._MovementScalar)
def _onrotation_value_changed(self, model: ui.SimpleIntModel):
self._RotationValue = model.get_value_as_int()
self._label_debug.text = "Camera rotation value = " + str(self._RotationValue)
def on_shutdown(self):
if self._nav1 is not None:
self._nav1.close()
self._nav1.callback = None
self._nav1.button_callback = None
if self._nav2 is not None:
self._nav2.close()
self._nav2.callback = None
self._nav2.button_callback = None
self._nav1 = None
self._nav2 = None
self.__previous_time = None
if self._label_status_line_1 is not None:
self._label_status_line_1.text = ""
if self._label_status_line_2 is not None:
self._label_status_line_2.text = ""
if self._label_buttons is not None:
self._label_buttons.text = ""
if self._label_connected is not None:
self._label_connected.text = "Not connected"
self._window = None
self._active_viewport_window = None
self._ext_id = None
print("[coderage.io.spacemouse] coderage io spacemouse shutdown")
| 22,034 | Python | 44.526859 | 154 | 0.564083 |
md84419/coderage-ext-spacemouse/exts/coderage.io.spacemouse/config/extension.toml | [package]
version = "0.0.1"
authors = ["CodeRage"]
title = "Spacemouse Driver"
description="Integration of 3DConnexion's spacemouse with Omniverse."
category = "other"
keywords = ["kit", "io", "spacemouse", "3dconnexion", "spacenavigator"]
readme = "docs/README.md"
repository = "https://github.com/md84419/coderage-ext-spacemouse.git"
icon = "data/icon.png"
preview_image = "data/preview.png"
changelog="docs/CHANGELOG.md"
feature=true
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.pipapi" = {}
#"omni.kit.window.viewport" = {}
"omni.kit.viewport.window" = {}
[python.pipapi]
requirements = [
# "pywinusb", # Required by spacenavigator when on Windows. See extension.py.
# The next line is equivilent to .\app\kit\python\python.exe -m pip install "spacenavigator@git+https://github.com/md84419/pyspacenavigator#egg=spacenavigator.py-0.2.3"
"spacenavigator@git+https://github.com/md84419/pyspacenavigator#egg=spacenavigator.py-0.2.3==0.2.3"
]
modules = [
"spacenavigator"
]
use_online_index = true
# Main python module this extension provides, it will be publicly available as "import coderage.io.spacemouse".
[[python.module]]
name = "coderage.io.spacemouse"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,349 | TOML | 30.395348 | 172 | 0.722016 |
md84419/coderage-ext-spacemouse/exts/coderage.io.spacemouse/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.0.1] - 2023-05-13
- Initial commit
## [0.0.2] - 2023-12-19
- Jen's version
## [0.0.3] - 2024-04-07
- x, y, z partially working. Better debug panel
| 251 | Markdown | 16.999999 | 80 | 0.63745 |
md84419/coderage-ext-spacemouse/exts/coderage.io.spacemouse/docs/README.md | # CodeRage's coderage.io.spacemouse extension for Omniverse
Integration of 3DConnexion's spacemouse with Omniverse
The SpaceMouse provides 6 degrees of freedom navigation in virtual space.
Some user choose to navigate the spacemouse with the left hand and the normal mouse the their right hand. Alternatively, you can simply switch between the two
input devices with your right hand.
- [Video](https://youtu.be/1xoEKTCjVE8)
- [Link to 3D connexion website](https://3dconnexion.com/uk/spacemouse)
| 500 | Markdown | 40.749997 | 158 | 0.804 |
md84419/kit-app-title-menu-logo-experiment/README.md | # Omniverse kit-app-title-menu-logo-experiment [robotica.example.app.logo]
A scratch space for exploring and experimenting with an app logo that spans the title and menu bars.

# Getting Started
## Requirements
- NVIDIA Omniverse Launcher
- An Omniverse App (Create, Code, etc)
- Kit 104.1 or later
- Tested with Create 2022.3.3
```
> .\link_app.bat
> .\runbuild.bat
```
# Background
## User Story
As a marketing exec, I want to brand our company apps so that they are visually distinctive and easily
identifiable, delivering 5 key benefits:
- Brand recognition - establish and reinforce brand identify, as a visual representation of our company
and its values
- Professionalism and credibility, emphasising that we pay attention to detail and instilling confidence
in users, allowing them to trust and engage with the app
- Differentiation and memorability, building a connection with our target audience
## The solution
Omniverse currently provides limited ability to control the [chrome](https://www.nngroup.com/articles/browser-and-gui-chrome/)
for applications. This experiment provides
a way to work around those limitations to create a more distinctive app design. This should be considered a
temporary workaround until Omniverse provides a better solution: we're taking advantage of undocumented features
about the way that Omniverse currently works, which results in fragile code which is likely to break in future
Omniverse releases.
This experiment is designed for and only works when running on Microsoft Windows.
This is example code. It is not ready for production use and has not been optimised. It is unlikely to scale well.
# Feature tracking
Two feature requests have been logged related to this ticket.
- [OVC-2561 - Positioning of title in main window](https://nvidia-omniverse.atlassian.net/servicedesk/customer/portal/4/OVC-2561)
- [Forum discussion 254770 - UI Apps - Finer-grained control of main window chrome](https://forums.developer.nvidia.com/t/ui-apps-finer-grained-control-of-main-window-chrome/254770)
# Contributing
The source code for this repository is provided as-is. We only accept outside contributions from individuals who have
signed an Individual Contributor License Agreement.
| 2,313 | Markdown | 44.372548 | 181 | 0.792045 |
md84419/kit-app-title-menu-logo-experiment/source/extensions/robotica.example.app.logo/robotica/example/app/logo/logo_menu.py | # Copyright (c), ROBOTICA MACHINE LEARNING LIMITED, 2022.
# License: Apache 2.0
from pathlib import Path
import omni.client
from omni.kit.menu.utils import MenuItemDescription, MenuAlignment
import omni.ext
import omni.kit.app
import omni.ui as ui
import omni.kit.menu.utils
from typing import Union
import omni.kit.window.modifier.titlebar
DATA_PATH = Path(__file__).parent.parent.parent.parent.parent
class RoboticaLogoDelegate(ui.MenuDelegate):
def destroy(self):
pass
def build_item(self, item: ui.MenuHelper):
with ui.HStack(width=0):
ui.Spacer(width=0)
with ui.HStack(content_clipping=1, width=0):
with ui.Placer(offset_x=-36, offset_y=-1):
with ui.Frame(width=80, horizontal_clipping=True):
ui.Image(
f"{DATA_PATH}/data/icon.png",
width=90,
height=32,
alignment=ui.Alignment.BOTTOM,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP
)
ui.Spacer(width=6)
def get_menu_alignment(self):
return MenuAlignment.DEFAULT
def update_menu_item(self, menu_item: Union[ui.Menu, ui.MenuItem], menu_refresh: bool):
if isinstance(menu_item, ui.MenuItem):
menu_item.visible = False
class LogoMenu:
"""
Place the bottom half of the Robotica logo as the first item in the menu, so it lines up with the top half of
the Robotica logo from the title bar.
"""
def __init__(self):
self._live_menu_name = "Robotica logo Widget"
self._menu_list = [MenuItemDescription(name="placeholder", show_fn=lambda: False)]
def register_menu_widgets(self):
self._cache_state_delegate = RoboticaLogoDelegate()
omni.kit.menu.utils.add_menu_items(self._menu_list, name=self._live_menu_name, menu_index=-98, delegate=self._cache_state_delegate)
self._cache_state_delegate.build_item(self._live_menu_name)
def unregister_menu_widgets(self):
omni.kit.menu.utils.remove_menu_items(self._menu_list, self._live_menu_name)
if self._cache_state_delegate:
self._cache_state_delegate.destroy()
self._cache_state_delegate = None
self._menu_list = None
| 2,350 | Python | 34.621212 | 139 | 0.624255 |
md84419/kit-app-title-menu-logo-experiment/source/extensions/robotica.example.app.logo/robotica/example/app/logo/setup.py | # Copyright (c), ROBOTICA MACHINE LEARNING LIMITED, 2022.
# License: Apache 2.0
import asyncio
from pathlib import Path
import carb.imgui as _imgui
import carb.settings
import carb.tokens
import omni.ext
import omni.kit.ui as ui
import omni.kit.menu.utils
from omni.kit.menu.utils import MenuLayout
from omni.kit.quicklayout import QuickLayout
from omni.kit.window.title import get_main_window_title
from .logo_menu import LogoMenu
async def _load_layout(layout_file: str):
"""this private methods just help loading layout, you can use it in the Layout Menu"""
await omni.kit.app.get_app().next_update_async()
QuickLayout.load_file(layout_file)
# This extension is mostly loading the Layout updating menu
class SetupExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
# get the settings
self._settings = carb.settings.get_settings()
self._await_layout = asyncio.ensure_future(self._delayed_layout())
# setup the menu and their layout
self._setup_menu()
# setup the Application Title
window_title = get_main_window_title()
window_title.set_app_version(self._settings.get("/app/titleVersion"))
# setup some imgui Style overide
imgui = _imgui.acquire_imgui()
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrab, carb.Float4(0.4, 0.4, 0.4, 1))
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabHovered, carb.Float4(0.6, 0.6, 0.6, 1))
imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabActive, carb.Float4(0.8, 0.8, 0.8, 1))
imgui.push_style_var_float(_imgui.StyleVar.DockSplitterSize, 1)
self._is_darker_mode = False
self._toggle_darker_mode()
self._logo_menu = LogoMenu()
self._logo_menu.register_menu_widgets()
def _toggle_darker_mode(self):
"""Update Imgui to be on its darker Mode state vs the default Create/View mode"""
self._is_darker_mode = not self._is_darker_mode
if self._is_darker_mode:
black = carb.Float4(0.0, 0.0, 0.0, 1)
dark0 = carb.Float4(0.058, 0.058, 0.058, 1) # title bar colour
dark1 = carb.Float4(0.09, 0.094, 0.102, 1)
# dark2 = carb.Float4(0.122, 0.129, 0.149, 1) # carb.Float4(0.129, 0.129, 0.149, 1)
# mid1 = carb.Float4(0.157, 0.157, 0.157, 1) # carb.Float4(0.157, 0.157, 0.18, 1)
mid2 = carb.Float4(0.22, 0.22, 0.22, 1) # colour of the bottom info bar
blue = carb.Float4(0.058, 0.058, 1, 1)
menu_bar = dark0
title_bar = dark0
background = dark0
popup_bg = black
tab = dark0
tab_unfocussed = dark0
frame = dark1
window_bg = mid2
else:
menu_bar = carb.Float4(0.27, 0.27, 0.27, 1)
title_bar = carb.Float4(0.12, 0.12, 0.12, 1)
popup_bg = carb.Float4(0.22, 0.22, 0.22, 1)
tab = carb.Float4(0.192, 0.192, 0.192, 1)
background = menu_bar
tab_unfocussed = carb.Float4(0.27 / 1.5, 0.27 / 1.5, 0.27 / 1.5, 1)
frame = title_bar
imgui = _imgui.acquire_imgui()
imgui.push_style_color(_imgui.StyleColor.MenuBarBg, menu_bar)
imgui.push_style_color(_imgui.StyleColor.TitleBg, title_bar)
imgui.push_style_color(_imgui.StyleColor.TitleBgActive, title_bar)
imgui.push_style_color(_imgui.StyleColor.PopupBg, popup_bg)
imgui.push_style_color(_imgui.StyleColor.FrameBg, frame)
imgui.push_style_color(_imgui.StyleColor.NavHighlight, blue)
imgui.push_style_color(_imgui.StyleColor.NavWindowingDimBg, blue)
imgui.push_style_color(_imgui.StyleColor.WindowBg, window_bg)
imgui.push_style_color(_imgui.StyleColor.Border, window_bg)
imgui.push_style_color(_imgui.StyleColor.ChildBg, background)
imgui.push_style_color(_imgui.StyleColor.Tab, tab)
imgui.push_style_color(_imgui.StyleColor.TabActive, tab)
imgui.push_style_color(_imgui.StyleColor.TabUnfocusedActive, tab_unfocussed)
imgui.push_style_color(_imgui.StyleColor.TabUnfocused, tab_unfocussed)
imgui.push_style_color(_imgui.StyleColor.TabHovered, tab)
async def _delayed_layout(self):
# few frame delay to allow automatic Layout of window that want their own positions
for i in range(4):
await omni.kit.app.get_app().next_update_async()
settings = carb.settings.get_settings()
# setup the Layout for your app
layouts_path = carb.tokens.get_tokens_interface().resolve("${robotica.example.app.logo}/layouts")
layout_file = Path(layouts_path).joinpath(f"{settings.get('/app/layout/name')}.json")
asyncio.ensure_future(_load_layout(f"{layout_file}"))
# using imgui directly to adjust some color and Variable
imgui = _imgui.acquire_imgui()
# DockSplitterSize is the variable that drive the size of the Dock Split connection
imgui.push_style_var_float(_imgui.StyleVar.DockSplitterSize, 2)
editor_menu = ui.get_editor_menu()
editor_menu.set_priority("File", -96)
def _setup_menu(self):
editor_menu = ui.get_editor_menu()
# you can have some file Menu
self._file_open = editor_menu.add_item("File/Open", self._open_file)
# some Menu Item
self._help_menu = editor_menu.add_item("Help/Show", self._show_help)
# from omni.kit.menu.utils import MenuLayout
# self._menu_layout = [
# MenuLayout.Menu("Window", [
# MenuLayout.Item("MyWindow"),
# ]),
# ]
# omni.kit.menu.utils.add_layout(self._menu_layout)
def _show_help(self, menu, toggled):
print("Help is Coming")
def _open_file(self, menu, toggled):
print("Open the File you want")
def on_shutdown(self):
editor_menu = ui.get_editor_menu()
self._file_open = editor_menu.remove_item("File/Open")
self._help_menu = editor_menu.remove_item("Help/Show")
self._file_open = None
self._help_menu = None
self._logo_menu.unregister_menu_widgets()
self._logo_menu = None
self._settings = None
self._await_layout = None
| 6,454 | Python | 39.597484 | 119 | 0.63387 |
md84419/kit-app-title-menu-logo-experiment/source/extensions/robotica.example.app.logo/config/extension.toml | [package]
version = "1.0.0"
authors = ["Robotica"]
title = "robotica example app logo"
description="A simple python extension example to use as a starting point for your extensions."
category = "Example"
keywords = ["kit", "example"]
readme = "docs/README.md"
repository = ""
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
# icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.quicklayout" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "robotica.example.app.logo"
| 604 | TOML | 27.809522 | 105 | 0.726821 |
md84419/usd-experiements/README.md | # usd-experiements
A scratch space for USD experments and testcases
| 69 | Markdown | 16.499996 | 48 | 0.811594 |
md84419/usd-experiements/exts/robotica.usd.experiments/robotica/usd/experiments/extension.py | import os
from pathlib import Path
import omni.ext
import omni.ui as ui
from pxr import UsdShade, Tf, Sdf, Usd
CURRENT_PATH = Path(__file__).parent
PROJECT_ROOT = CURRENT_PATH.parent.parent.parent
DATA_DIR = os.path.join(PROJECT_ROOT, 'data')
TEST_USDA = os.path.join(DATA_DIR, 'usd-experiements.usda')
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[robotica.usd.experiments] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class RoboticaUsdExperimentsExtension(omni.ext.IExt):
def __init__(self):
self._prim_path = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[robotica.usd.experiments] robotica usd experiments startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self._count += 1
label.text = f"count: {self._count}"
method1()
method2()
def method1():
# The following is based on code by Michal S in NVIDIA Omniverse Discord:
# https://discord.com/channels/827959428476174346/989589513909203014/1108002804695572480
reference_path = TEST_USDA
name = os.path.splitext(os.path.basename(reference_path))[0]
stage = omni.usd.get_context().get_stage()
if self._prim_path is None:
if stage.HasDefaultPrim():
self._prim_path = omni.usd.get_stage_next_free_path(
stage, stage.GetDefaultPrim().GetPath().pathString + "/" + Tf.MakeValidIdentifier(name), False
)
else:
self._prim_path = omni.usd.get_stage_next_free_path(stage, "/" + Tf.MakeValidIdentifier(name), False)
omni.kit.commands.execute("CreateReference", usd_context=omni.usd.get_context(), path_to=self._prim_path, asset_path=reference_path, instanceable=False)
scale = 2.5
#some of my attempts
if scale is not None and scale != 1:
scale = float(scale)
UsdShade.Material(stage.GetPrimAtPath(self._prim_path)).CreateInput("texture_scale", Sdf.ValueTypeNames.Float2).Set((scale, scale))
for prim in Usd.PrimRange(stage.GetPrimAtPath(self._prim_path)):
if prim.IsA(UsdShade.Shader):
prim = UsdShade.Shader(prim)
inp = prim.GetInput("scale")
if inp:
inp.Set(tuple(scale * value for value in inp.Get()))
else:
prim.CreateInput("scale", Sdf.ValueTypeNames.Float2).Set((scale, scale))
return self._prim_path
def method2():
'''
Read from the 'global' section of the usda:
```
#usda 1.0
(
customLayerData = {
...
}
endTimeCode = 100
metersPerUnit = 0.01
startTimeCode = 0
timeCodesPerSecond = 24
upAxis = "Y"
)
```
'''
return self._prim_path
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Experiment", clicked_fn=on_click)
def on_shutdown(self):
print("[robotica.usd.experiments] robotica usd experiments shutdown")
self._window = None
| 4,659 | Python | 40.238938 | 176 | 0.509551 |
DigitalBotLab/App/README.md | # App
Main App for Digital Bot Lab, Public
# Start
```bash
.\_build\windows-x86_64\release\dbl.app.bat
cd _build\windows-x86_64\release
kit\kit.exe apps/dbl.app.kit --ext-folder D:\\dbl\\App\\kit-app\\source\\extensions --enable omni.simready.explorer
``` | 259 | Markdown | 20.666665 | 115 | 0.718147 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.26"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Jeffrey Qiu <[email protected]>", "John Kosnik <[email protected]>", "Attila Szabo <[email protected]"]
# The title and description fields are primarly for displaying extension info in UI
title = "Browser for SimReady Assets"
description="A browser for SimReady Assets"
# URL of the extension source repository.
repository="https://gitlab-master.nvidia.com/omniverse/kit-extensions/kit-browsers"
# Keywords for the extension
keywords = ['browser', 'asset', 'simready', "explorer"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
category = "Rendering"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/icon.svg"
feature = true
[dependencies]
"omni.kit.browser.folder.core" = {}
"omni.kit.commands" = {}
"omni.kit.menu.utils" = {}
"omni.usd" = {}
"omni.kit.viewport.utility" = {}
"omni.kit.window.property" = {}
"omni.kit.viewport.window" = {}
# Main python module this extension provides, it will be publicly available as "import omni.simready.explorer".
[[python.module]]
name = "omni.simready.explorer"
[settings.exts."omni.simready.explorer"]
# The list of asset root folders to be browsed.
folders = ["https://omniverse-content-staging.s3.us-west-2.amazonaws.com/Assets/simready_content"]
default_physics = "RigidBody"
[[trigger]]
menu.name = "Window/Browsers/SimReady Explorer"
menu.window = "SimReady Explorer"
[[test]]
dependencies = [
"omni.kit.renderer.core",
"omni.kit.renderer.capture",
]
args = [
"--no-window",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/app/menu/legacy_mode=false",
]
[documentation]
pages = [
"docs/Overview.md",
"docs/CHANGELOG.md",
]
| 1,993 | TOML | 28.323529 | 115 | 0.717511 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/config/extension.gen.toml |
[package]
archivePath = "http://d4i3qtqj3r0z5.cloudfront.net/omni.simready.explorer-1.0.26.zip"
[package.publish]
kitVersion = "105.0.0+release.109326.bd6b456a.tc"
date = 1687298400
buildNumber = "101.1.0+2023.1.1527.751de03b.tc"
repoName = "kit-browsers"
| 298 | TOML | 28.899997 | 89 | 0.654362 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/empty_property_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import List, Optional
import omni.ui as ui
from .browser_model import AssetDetailItem
from .browser_property_delegate import BrowserPropertyDelegate
class EmptyPropertyDelegate(BrowserPropertyDelegate):
"""
A delegate to show when no asset selected.
"""
def accepted(self, asset: Optional[AssetDetailItem]) -> bool:
"""BrowserPropertyDelegate method override"""
return asset is None
def show(self, asset: Optional[AssetDetailItem], frame: ui.Frame) -> None:
"""BrowserPropertyDelegate method override"""
if hasattr(self, "_container"):
self._container.visible = True
else:
with frame:
self._container = ui.VStack()
with self._container:
ui.Label("Please Select a SimReady Asset!", alignment=ui.Alignment.CENTER)
def accepted_multiple(self, detail_items: List[AssetDetailItem]) -> bool:
"""BrowserPropertyDelegate method override"""
return False
def show_multiple(self, detail_items: List[AssetDetailItem], frame: ui.Frame) -> None:
"""BrowserPropertyDelegate method override"""
pass
| 1,606 | Python | 36.372092 | 94 | 0.701121 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/prop_property_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
from typing import List, Optional
import omni.kit.app
import omni.ui as ui
from omni.kit.browser.core import TreeBrowserWidget
from .asset import AssetType
from .browser_model import AssetDetailItem
from .browser_property_delegate import BrowserPropertyDelegate
from .property_widgets import *
class PropAssetPropertyDelegate(BrowserPropertyDelegate):
"""
A delegate to show properties of assets of type Prop.
"""
def accepted(self, item: Optional[AssetDetailItem]) -> bool:
"""BrowserPropertyDelegate method override"""
return item and item.asset.asset_type == AssetType.PROP
def show(self, item: Optional[AssetDetailItem], frame: ui.Frame) -> None:
"""BrowserPropertyDelegate method override"""
if item is None:
return
asset = item.asset
self._item = item
if hasattr(self, "_container"):
self._container.visible = True
self._name_label.text = asset.name
self._qcode_value.text = asset.qcode
self._dimensions_value.text = asset.extent_as_str
if hasattr(self, "h_buttons"):
self._h_hstack.clear()
with self._h_hstack:
self._build_hierarchy_links()
self._thumbnail_img.source_url = asset.thumbnail
self._physx_combobox.model = item.physics_model
self._tags_field.tags = asset.tags
async def __delay_show_physics():
# Delay to show badge to make display correct
if item.physics_model:
await omni.kit.app.get_app().next_update_async()
self._badge_container.visible = item.physics_model.current_index.as_int != 0
self._sub_physics = None
self._sub_physics = item.physics_model.current_index.subscribe_value_changed_fn(
self.__on_item_physics_changed
)
asyncio.ensure_future(__delay_show_physics())
else:
with frame:
self._container = ui.VStack(height=0, spacing=5)
with self._container:
self._build_thumbnail(item)
with ui.HStack():
ui.Spacer()
self._name_label = ui.Label(asset.name, height=0, style_type_name_override="Asset.Title")
ui.Spacer()
with ui.CollapsableFrame("Behaviors"):
self._physx_combobox = self._build_combobox("PhysicsVariant", item.physics_model)
with ui.CollapsableFrame("Asset info"):
self.collapse_info = self._build_info_widget()
with ui.CollapsableFrame("Tags", height=ui.Percent(100)):
self._tags_field = self._build_tags()
ui.Spacer()
def accepted_multiple(self, detail_items: List[AssetDetailItem]) -> bool:
"""BrowserPropertyDelegate method override"""
return False
def show_multiple(self, detail_items: List[AssetDetailItem], frame: ui.Frame) -> None:
"""BrowserPropertyDelegate method override"""
pass
def _build_info_widget(self):
"""Build the Asset information section"""
with ui.VStack(spacing=10):
with ui.HStack():
self._qcode_label = ui.Label("QCode:", style_type_name_override="Asset.Label", width=100)
self._qcode_value = ui.Label(
self._item.asset.qcode, alignment=ui.Alignment.LEFT_BOTTOM, style_type_name_override="Asset.Value"
)
with ui.HStack():
self._dimensions_label = ui.Label("Dimensions:(m)", style_type_name_override="Asset.Label", width=100)
self._dimensions_value = ui.Label(
self._item.asset.extent_as_str,
alignment=ui.Alignment.LEFT_BOTTOM,
style_type_name_override="Asset.Value",
)
self._h_hstack = ui.HStack(alignment=ui.Alignment.LEFT_BOTTOM)
with self._h_hstack:
self._hierachy_label = ui.Label("Hierarchy:", style_type_name_override="Asset.Label", width=100)
self._build_hierarchy_links()
ui.Spacer()
def _build_tags(self):
"""Add the tag link section. must pass explorer instance"""
from .extension import get_instance
browser_widget: Optional[TreeBrowserWidget] = get_instance().browser_widget
tag_widget = PropAssetTagsWidget(self._item.asset.tags, browser_widget)
return tag_widget
def _build_thumbnail(self, item: AssetDetailItem):
"""Builds thumbnail frame and resizes"""
self._thumbnail_frame = ui.Frame(height=0)
self._thumbnail_frame.set_computed_content_size_changed_fn(self._on_thumbnail_frame_size_changed)
with self._thumbnail_frame:
with ui.HStack():
ui.Spacer(width=ui.Percent(25))
self._thumbnail_container = ui.ZStack(height=ui.Fraction(1))
with self._thumbnail_container:
self._thumbnail_img = ui.Image(
item.asset.thumbnail,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
alignment=ui.Alignment.CENTER_TOP,
)
# Badge to indicate item physics ON, need to change visibility if physics status changed
self._badge_container = ui.HStack(
alignment=ui.Alignment.LEFT_BOTTOM, visible=item.physics != "None"
)
with self._badge_container:
ui.Spacer(width=ui.Percent(80))
with ui.VStack():
ui.Spacer(height=2)
ui.ImageWithProvider(
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT,
alignment=ui.Alignment.LEFT,
style_type_name_override="GridView.Item.Badge.Image",
)
if item.physics_model:
self._sub_physics = item.physics_model.current_index.subscribe_value_changed_fn(
self.__on_item_physics_changed
)
ui.Spacer(width=ui.Percent(25))
def __on_item_physics_changed(self, model: ui.SimpleIntModel) -> None:
self._badge_container.visible = model.as_int != 0
def _build_hierarchy_links(self):
"""build h links"""
from .extension import get_instance
browser_widget = get_instance().browser_widget
def on_click(value):
"""set the browse search field with this tag
Args:
value (List): tag search value
"""
search_field = browser_widget.search_field
search_field.search_words = value
self.h_buttons = []
for i, h in enumerate(self._item.asset.hierarchy):
text_v = "{}".format(h)
new_button = ui.Button(
text_v,
alignment=ui.Alignment.LEFT_BOTTOM,
click_fn=on_click,
spacing=0,
width=0,
style_type_name_override="Asset.ButtonLinks",
)
h_list = self._item.asset.hierarchy[0 : (i + 1)]
new_button.set_clicked_fn(lambda value=h_list: on_click(value))
if i < (len(self._item.asset.hierarchy) - 1):
new_label = ui.Label(" > ", width=0)
def clear_buttons(self, buttons):
for b in buttons:
b.visible = False
self.h_buttons = []
def _on_thumbnail_frame_size_changed(self):
# Dynamic change thumbnail size to be half of frame width
async def __change_thumbnail_size_async():
await omni.kit.app.get_app().next_update_async()
image_size = self._thumbnail_frame.computed_width / 2
self._thumbnail_img.height = ui.Pixel(image_size)
asyncio.ensure_future(__change_thumbnail_size_async())
| 8,681 | Python | 41.145631 | 118 | 0.570211 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/property_widgets.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.client
import omni.ui as ui
class PropAssetTagsWidget:
"""Class for building and maintaining tags chips"""
def __init__(self, tags, browser_widget):
"""tag chips
Args:
tags (list): list of tags
browser_widget(get_instance().browser_widget): instance of base browser widget
"""
self.browser_widget = browser_widget
self._tags = tags
self._tag_chips = []
self._tag_buttons = {}
self.tag_stack = ui.Frame(spacing=5)
self._frame_size = 50
def setFrame():
self._frame_size = int(self.tag_stack.computed_width / 8)
self.clear_tags()
with self.tag_stack:
self.build_tag_chips()
self.tag_stack.set_computed_content_size_changed_fn(setFrame)
with self.tag_stack:
self.build_tag_chips()
@property
def tags(self):
"""Property gor tag values
Returns:
List: tag values
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Property Setter for adding tag also rebuilds tags buttons
Args:
tags (List): List tag values
"""
# Remove existing tags in frame
self.clear_tags()
# Set Tags Property
self._tags = tags
# Rebuild new tags
with self.tag_stack:
self.build_tag_chips()
def clear_tags(self):
"""clears all tag button in the frame"""
self.tag_stack.clear()
def append_tag(self, tag):
"""Add a tag value to the existing tag list. Check for doubles"""
if tag not in self.tags:
add_tag = self.tag.append(tag)
self._tag = add_tag
def remove_chip(self, chip):
"""Remove this chip from the ui"""
for c in chip:
c.visible = False
def build_tag_chips(self):
"""Build the tag chip ui and added link function to Browser Search Bar"""
def on_click(value):
"""set the browse search field with this tag
Args:
value (str): tag search value
"""
search_field = self.browser_widget.search_field
search_field.search_words = [value]
row_list = []
row_count = 0
char_count = 0
list_of_rows = []
# each index is a list of tags that will fit in the row based on the size of the frame.
for t in self.tags:
new_chars = char_count + len(t)
if new_chars < self._frame_size:
row_list.append(t)
char_count = new_chars
else:
list_of_rows.append(row_list)
row_list = []
row_list.append(t)
char_count = len(t)
list_of_rows.append(row_list)
# build the buttons
with ui.VGrid(padding=5, row_height=30):
for row in list_of_rows:
with ui.HStack(spacing=5):
for r in row:
new_button = ui.Button(r, clicked_fn=on_click, height=0, width=0)
new_button.set_clicked_fn(lambda value=r: on_click(value))
| 3,654 | Python | 29.974576 | 95 | 0.564039 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/combobox_model.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import weakref
from typing import Any, List, Optional
import omni.ui as ui
class ComboBoxItem(ui.AbstractItem):
def __init__(self, text: str, value: Any) -> None:
super().__init__()
self.model = ui.SimpleStringModel(text)
self.value = value
class ComboBoxModel(ui.AbstractItemModel):
"""
The model used for combobox
Args:
texts (List[str]): Texts displayed in combobox
kwargs:
values (Optional[List[Any]]): Values for combobox list. Default None to use text as value
current_value (Any): Current value displayed in combobox. Default None to use first one.
"""
def __init__(self, texts: List[str], values: Optional[List[Any]] = None, current_value: Any = None):
super().__init__()
# List items
self._items = []
for index, text in enumerate(texts):
value = values[index] if values else text
self._items.append(ComboBoxItem(text, value))
# Current value
current = self._get_current_index_by_value(current_value)
self.current_index = ui.SimpleIntModel(current)
self._sub = self.current_index.subscribe_value_changed_fn(
lambda _, this=weakref.proxy(self): this.on_current_changed()
)
def destroy(self):
self._sub = None
self.current_index = None
self._items = []
@property
def current_value(self) -> str:
items = self.get_item_children(None)
return items[self.current_index.as_int].value
@current_value.setter
def current_value(self, value: str) -> None:
index = self._get_current_index_by_value(value)
self.current_index.set_value(index)
def get_item_children(self, item) -> List[ComboBoxItem]:
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self.current_index
if isinstance(item, ComboBoxItem):
return item.model
else:
return ui.SimpleStringModel("Unknown")
def on_current_changed(self):
current_index = self.current_index.as_int
items = self.get_item_children(None)
self._on_current_item_changed(items[current_index])
self._item_changed(None)
def _get_current_index_by_value(self, value: Any, default: int = 0) -> int:
if value is None:
current = default
else:
items = self.get_item_children(None)
if isinstance(value, float):
current = next((i for i, item in enumerate(items) if abs(item.value - value) < 0.0001), default)
else:
if isinstance(value, list):
value = tuple(value)
def compare_values(a, b):
if isinstance(a, list):
a = tuple(a)
return a == b
current = next((i for i, item in enumerate(items) if item.value == value), default)
return current # noqa: R504
def _on_current_item_changed(self, item: ComboBoxItem) -> None:
pass
| 3,537 | Python | 33.349514 | 112 | 0.615776 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/style.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from pathlib import Path
from omni.ui import color as cl
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("data/icons")
SIMREADY_DRAG_PREFIX = "SimReady::"
# Use same context menu style with content browser
cl.context_menu_background = cl.shade(cl("#343432"))
cl.context_menu_separator = cl.shade(0x449E9E9E)
cl.context_menu_text = cl.shade(cl("#9E9E9E"))
cl.simready_background = cl.shade(cl("#23211F"))
CONTEXT_MENU_STYLE = {
"Menu": {"background_color": cl.context_menu_background_color, "color": cl.context_menu_text, "border_radius": 2},
"Menu.Item": {"background_color": 0x0, "margin": 0},
"Separator": {"background_color": 0x0, "color": cl.context_menu_separator},
}
UI_STYLES = {
"ToolBar.Button": {"background_color": 0x0, "padding": 3, "margin": 0},
"ToolBar.Button:selected": {"background_color": cl.simready_background},
"Splitter": {"background_color": 0x0, "margin_width": 0},
"Splitter:hovered": {"background_color": 0xFFB0703B},
"Splitter:pressed": {"background_color": 0xFFB0703B},
"Property.Path": {"background_color": cl.simready_background},
"Property.Path::mixed": {"color": 0xFFCC9E61},
"Property.Frame": {"padding": 0},
"GridView.Item.Badge.Image": {
"image_url": f"{ICON_PATH}/physx.png",
},
"GridView.Item.Badge.Background": {"background_color": 0xFFC2C2C2},
}
PROPERTY_STYLES = {
"Asset.Title": {"font_size": 18, "color": 0xFF9E9E9E, "font-weight": 900},
"Asset.Label": {"font_size": 14, "font_weight": 1200, "text-align": "left"},
"Asset.Value": {"font_size": 14, "font_weight": 1, "text-align": "right"},
"Asset.ButtonLinks": {"background_color": "transparent", "padding": 0},
"Asset.ButtonLinks:hovered": {"background_color": 0xFF9E9E9E, "border_color": cl("#0078D7")},
"Asset.ButtonLinks:pressed": {
"background_color": cl("#CCE4F7"),
"border_color": cl("#005499"),
"border_width": 1.0,
},
}
| 2,414 | Python | 42.124999 | 118 | 0.680613 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_api.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Dict, List, Optional, Tuple
import carb
import omni.ext
import omni.kit.app
import omni.kit.commands
import omni.kit.undo
import omni.usd
from pxr import Gf, Sdf, Usd, UsdGeom
from .actions import _add_asset_to_stage_helper, configure_prim
from .asset import SimreadyAsset
from .browser_model import AssetDetailItem
from .extension import SimReadyBrowserExtension, get_instance
def get_selected_xformable_prim_paths(usd_context: omni.usd.UsdContext, stage: Usd.Stage) -> List[Sdf.Path]:
"""Get the list of selected Xformable prim paths in the stage.
Args:
usd_context (omni.usd.UsdContext): The USD context to get the selection from.
stage (Usd.Stage): The stage to get the selection from.
Returns:
The list of prim paths of the selected Xformable prims in the stage,
or an empty list if there's nothing selected.
"""
if usd_context is None or stage is None:
return []
selection = usd_context.get_selection()
return [
path
for path in selection.get_selected_prim_paths()
if stage.GetPrimAtPath(path) and stage.GetPrimAtPath(path).IsA(UsdGeom.Xformable)
]
def get_average_position_of_prims(prims: List[Usd.Prim]) -> Gf.Vec3d:
"""Get the average position of a list of prims.
Args:
prims (List[Usd.Prim]): The list of prims to get the average position of.
Returns:
The average 3D position of the prims, or the origin if no prims provided.
"""
position = Gf.Vec3d(0)
for prim in prims:
if prim.IsA(UsdGeom.Xformable):
position += omni.usd.get_world_transform_matrix(prim).ExtractTranslation()
if len(prims) > 0:
position /= len(prims)
return position
async def find_assets(search_words: Optional[List[str]] = None) -> List[SimreadyAsset]:
"""Search assets in the current asset library
Filter the current asset library by a list of search words.
Search words are not case sensitive, and are matched partially against asset names as tags.
If no search words are provided, all assets are returned.
Example: Analyzing the results of searching with ["residential", "chair", "wood"] will reveal
that the found assets have "residential" and "chair" in their tags, and "wood" in their names.
Args:
search_words (Optional[List[str]]): List of search words to filter assets on.
Returns:
List[SimreadyAsset]: List of SimReady assets that match the search words.
"""
ext: SimReadyBrowserExtension = get_instance()
collections = ext.browser_model.get_item_children(None)
# Tree mode, only has a default collection
# First get categories to force traverse assets
categories = ext.browser_model.get_item_children(collections[0])
while True:
for root_folder in ext.browser_model._root_folders:
# Make sure all assets from all folders are loaded
if not root_folder.prepared:
await omni.kit.app.get_app().next_update_async()
break
else:
# Reload categories after traverse done
categories = ext.browser_model.get_item_children(collections[0])
# First category "ALL" has all assets
all_asset_items: List[AssetDetailItem] = ext.browser_model.get_item_children(categories[0])
if search_words:
filtered_assets: List[SimreadyAsset] = []
for asset_item in all_asset_items:
if asset_item.filter(search_words):
filtered_assets.append(asset_item.asset)
return filtered_assets
else:
all_assets: List[SimreadyAsset] = [item.asset for item in all_asset_items]
return all_assets
def add_asset_to_stage(
url: str,
parent_path: Sdf.Path = Sdf.Path.emptyPath,
position: Gf.Vec3d = Gf.Vec3d(0, 0, 0),
variants: Optional[Dict[str, str]] = None,
payload: bool = False,
instanceable: bool = False,
) -> Tuple[bool, Sdf.Path]:
"""Adds an asset to the current stage.
Args:
url (str): Url of asset to add.
parent_path (Sdf.Path): Path of parent prim to add asset to. If empty path, add to default prim or pseudo root.
position (Gf.Vec3d): Position to add asset at.
payload (bool): If True, add asset as payload, otherwise as reference.
variants (Optional[Dict[str, str]]): Variants to set on added asset. Dictionary of variant set name and value.
Returns:
Tuple[bool, Sd.Path]: Tuple of success, and path to added prim.
.. note::
The actions of this function are undoable. If you want to add an asset without undo, use the following:
.. code-block:: python
with omni.kit.undo.disabled():
add_asset_to_stage(...)
"""
if not url:
carb.log_error("Failed to add asset since url not defined!")
return False, ""
usd_context: omni.usd.UsdContext = omni.usd.get_context()
stage: Usd.Stage = usd_context.get_stage() if usd_context else None
if stage is None:
carb.log_error(f"No valid stage found; cannot add {url}.")
return False, ""
# Creation and translation of the prim is done in a single undo group
with omni.kit.undo.group():
# Add the asset to the stage
(added_prim_path, _, _) = _add_asset_to_stage_helper(
usd_context, stage, url, prim_path=parent_path, payload=payload, instanceable=instanceable
)
# Translate the added prim to the specified position
omni.kit.commands.execute("TransformPrimSRTCommand", path=added_prim_path, new_translation=position)
# Set the variants on the added prim
if added_prim_path and variants:
configure_prim(stage, added_prim_path, variants)
return True, Sdf.Path(added_prim_path)
def add_asset_to_stage_using_prims(
usd_context: omni.usd.UsdContext,
stage: Usd.Stage,
url: str,
variants: Optional[Dict[str, str]] = None,
replace_prims: bool = False,
prim_paths: List[Sdf.Path] = [],
) -> Tuple[bool, Sdf.Path]:
"""Add an asset to a stage using a list of prims.
The asset will be added to the average position of the provided prims, or the origin if no prims supplied.
The asset will be added as a reference or payload based on whether the first provided prim has authored references or payloads.
If no prims specified, the setting at "/persistent/app/stage/dragDropImport" is used.
If the new asset is to replace the prims, the asset's parent will be the common ancestor of all prims.
If no prims specified, the default prim or pseudo root will be used as the parent prim of the added asset.
Args:
usd_context (omni.usd.UsdContext): UsdContext to add asset to.
stage (Usd.Stage): Stage to add asset to.
url (str): Url of asset to add.
variants (Optional[Dict[str, str]]): Variants to set on the added prim.
replace_prims (bool): If True, replace the selection with the new asset.
prim_paths (List[Sdf.Path]): List of prims to use for adding the asset.
Returns:
Tuple of success and added prim path.
.. note::
The actions of this function are undoable. If you want to add an asset without undo, use the following:
.. code-block:: python
with omni.kit.undo.disabled():
add_asset_to_stage_using_prims(...)
"""
if not url:
carb.log_error("Failed to add asset since url not defined!")
return False, ""
if usd_context is None or stage is None:
carb.log_error(f"No valid stage found; cannot add {url}.")
return False, ""
prims = [
stage.GetPrimAtPath(path)
for path in prim_paths
if stage.GetPrimAtPath(path) and stage.GetPrimAtPath(path).IsA(UsdGeom.Xformable)
]
# If replacing prims, all prims must be deletable\replaceable
if replace_prims:
for prim in prims:
if not prim.IsValid() or (
prim.GetMetadata("no_delete") is not None and prim.GetMetadata("no_delete") is True
):
carb.log_error("Failed to add asset since cannot replace specified prims!")
return False, ""
# Get average position of prims
position: Gf.Vec3d = get_average_position_of_prims(prims)
# If prims specified, use the first prim's payload/reference status
create_option = ""
if prims and len(prims) > 0:
for prim in prims:
if prim.HasAuthoredReferences():
create_option = "reference"
break
if prim.HasAuthoredPayloads():
create_option = "payload"
break
# Determine parent path for new prim
if replace_prims and prims and len(prims) > 0:
parent_path: Sdf.Path = Sdf.Path.emptyPath
for prim in prims:
if parent_path == Sdf.Path.emptyPath:
parent_path: Sdf.Path = prim.GetParent().GetPath()
else:
parent_path: Sdf.Path = parent_path.GetCommonPrefix(prim.GetParent().GetPath())
elif stage.HasDefaultPrim():
parent_path: Sdf.Path = stage.GetDefaultPrim().GetPath()
else:
parent_path: Sdf.Path = Sdf.Path.absoluteRootPath
# Deletion, creation and translation of prims is done in a single undo group
with omni.kit.undo.group():
# If replacing prims, delete them before adding new ones to prevent potential name collision
if replace_prims and prim_paths and len(prim_paths) > 0:
omni.kit.commands.execute("DeletePrimsCommand", paths=prim_paths)
# Add asset to stage
(added_prim_path, _, _) = _add_asset_to_stage_helper(
usd_context, stage, url, prim_path=parent_path, payload=create_option, instanceable=""
)
# Translate the added prim to the specified position
omni.kit.commands.execute("TransformPrimSRTCommand", path=added_prim_path, new_translation=position)
# Set the variants on the added prim
if added_prim_path and variants:
configure_prim(stage, added_prim_path, variants)
return True, added_prim_path
| 10,652 | Python | 39.2 | 131 | 0.659501 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_toolbar.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Dict, List, Optional, Union
from omni import ui
from .style import ICON_PATH
class BrowserToolBarButtonDesc:
"""
Represent a button in browser toolbar
Args:
image_url (Optional[str]): Image url of button. None means spacer.
clicked_fn (callable): Function called when button clicked. Default None. Function signure:
void clicked_fn()
tooltips (Optinal[str]): Button tooltips. Default None.
"""
def __init__(self, image_url: Optional[str], clicked_fn: callable = None, tooltips: Optional[str] = None):
self.image_url = image_url
self.clicked_fn = clicked_fn
self.tooltips = tooltips
class BrowserToolBarBase:
"""
Represent a base tool bar for browser.
Args:
descs (List[BrowserToolBarButtonDesc]): Default buttons to show on tool bar.
"""
def __init__(self, descs: List[BrowserToolBarButtonDesc]):
self._buttons: Dict[BrowserToolBarButtonDesc, ui.Button] = {}
self._button_descs: List[BrowserToolBarButtonDesc] = []
self._button_descs.extend(descs)
self.widget = ui.HStack(height=0, spacing=4)
self._spacer_visible = True
self._spacers: List[ui.Spacer] = []
self._build_buttons()
@property
def visible(self) -> bool:
"""
Toolbar visibility.
"""
return self.widget.visible
@visible.setter
def visible(self, value) -> None:
self.widget.visible = value
@property
def computed_height(self):
return self.widget.computed_height
@property
def spacer_visible(self) -> bool:
"""Visibility of spacers in toolbar"""
return self._spacer_visible
@spacer_visible.setter
def spacer_visible(self, visible) -> None:
if visible != self._spacer_visible:
self._spacer_visible = visible
for spacer in self._spacers:
spacer.visible = visible
@property
def width(self):
return self.widget.computed_width if self.widget else 0
@property
def position_x(self):
return self.widget.screen_position_x if self.widget else 0
def destroy(self) -> None:
for desc in self._buttons:
self._buttons[desc] = None
self.widget = None
def append_buttons(self, button_descs: Union[BrowserToolBarButtonDesc, List[BrowserToolBarButtonDesc]]) -> None:
"""
Append buttons to toolbar.
Args:
button_descs (Union[BrowserToolBarButtonDesc, \
List[BrowserToolBarButtonDesc]]): Desc of buttons to be appended.
"""
if isinstance(button_descs, list):
self._button_descs.extend(button_descs)
else:
self._button_descs.append(button_descs)
self._build_buttons()
def get_button(self, desc: BrowserToolBarButtonDesc) -> Optional[ui.Button]:
"""
Get toolbar button by desc. Return None if not found.
Args:
desc (BrowserToolBarButtonDesc): Button description.
"""
return self._buttons[desc] if desc in self._buttons else None
def _build_buttons(self):
self.widget.clear()
self._buttons.clear()
self._spacers.clear()
with self.widget:
for desc in self._button_descs:
if desc.image_url:
with ui.VStack(width=26):
ui.Spacer()
self._buttons[desc] = ui.Button(
image_url=desc.image_url,
image_width=20,
image_height=20,
width=26,
height=26,
clicked_fn=desc.clicked_fn,
style_type_name_override="ToolBar.Button",
tooltip=desc.tooltips if desc.tooltips else "",
)
ui.Spacer()
else:
spacer = ui.Spacer()
self._spacers.append(spacer)
class BrowserPropertyToolBar(BrowserToolBarBase):
"""
Represent a tool bar with a button to display a Property widget (window).
Args:
on_toggle_property_fn (callable): Function called when show/hide property button clicked. Function signure:
void on_toggle_property_fn()
"""
def __init__(self, on_toggle_property_fn: callable):
self._on_toggle_property_fn = on_toggle_property_fn
self._property_button_desc = BrowserToolBarButtonDesc(
f"{ICON_PATH}/property_dark.svg",
clicked_fn=self._on_toggle_property_fn,
tooltips="Show/Hide property widget",
)
super().__init__([BrowserToolBarButtonDesc(""), self._property_button_desc])
def destroy(self):
super().destroy()
@property
def btn_property(self) -> ui.Button:
return self.get_button(self._property_button_desc)
| 5,439 | Python | 33.43038 | 116 | 0.600294 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/extension.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import List, Optional
import carb.settings
import omni.ext
import omni.kit.menu.utils
import omni.ui as ui
from omni.kit.browser.core import TreeBrowserWidget
from .actions import deregister_actions, register_actions
from .browser_model import SimReadyBrowserModel
from .empty_property_delegate import EmptyPropertyDelegate
from .multi_property_delegate import MultiPropertyDelegate
from .prop_property_delegate import PropAssetPropertyDelegate
from .window import SIMREADY_EXPLORER_NAME, SimReadyBrowserWindow
BROWSER_MENU_ROOT = "Window"
SETTINGS_ROOT = "/exts/omni.simready.explorer/"
SETTIGNS_VISIBLE = SETTINGS_ROOT + "visible_after_startup"
_extension_instance = None
class SimReadyBrowserExtension(omni.ext.IExt):
@property
def window(self) -> Optional[SimReadyBrowserWindow]:
return self._window
@property
def browser_widget(self) -> Optional[TreeBrowserWidget]:
return self._window._widget
@property
def browser_model(self) -> Optional[SimReadyBrowserModel]:
return self._window._browser_model
def on_startup(self, ext_id):
try:
self.__ext_id = omni.ext.get_extension_name(ext_id)
except AttributeError:
def get_extension_name(ext_id: str) -> str:
"""Convert 'omni.foo-tag-1.2.3' to 'omni.foo-tag'"""
a, b, *_ = ext_id.split("-") + [""]
if b and not b[0:1].isdigit():
return f"{a}-{b}"
return a
self.__ext_id = get_extension_name(ext_id)
register_actions(self.__ext_id, self)
self._window = None
ui.Workspace.set_show_window_fn(
SIMREADY_EXPLORER_NAME,
self._show_window, # pylint: disable=unnecessary-lambda
)
self._register_menuitem()
visible = carb.settings.get_settings().get_as_bool(SETTIGNS_VISIBLE)
if visible:
self._show_window(True)
# Instantiate the property delegates so they get registered with class BrowserPropertyDelegate
self.__empty_property_delegate = EmptyPropertyDelegate()
self.__propasset_property_delegate = PropAssetPropertyDelegate()
self.__multi_property_delegate = MultiPropertyDelegate()
global _extension_instance
_extension_instance = self
def on_shutdown(self):
omni.kit.menu.utils.remove_menu_items(self._menu_entry, name=BROWSER_MENU_ROOT)
if self._window is not None:
self._window.destroy()
self._window = None
self.__empty_property_delegate = None
self.__propasset_property_delegate = None
deregister_actions(self.__ext_id)
global _extension_instance
_extension_instance = None
def _show_window(self, visible) -> None:
if visible:
if self._window is None:
self._window = SimReadyBrowserWindow(visible=True)
self._window.set_visibility_changed_fn(self._on_visibility_changed)
else:
self._window.visible = True
else:
self._window.visible = False
def _toggle_window(self):
self._show_window(not self._is_visible())
def _register_menuitem(self):
self._menu_entry = [
omni.kit.menu.utils.MenuItemDescription(
name="Browsers",
sub_menu=[
omni.kit.menu.utils.MenuItemDescription(
name=SIMREADY_EXPLORER_NAME,
ticked=True,
ticked_fn=self._is_visible,
onclick_action=(self.__ext_id, "toggle_window"),
)
],
)
]
omni.kit.menu.utils.add_menu_items(self._menu_entry, BROWSER_MENU_ROOT)
def _is_visible(self):
return self._window.visible if self._window else False
def _on_visibility_changed(self, visible):
omni.kit.menu.utils.refresh_menu_items(BROWSER_MENU_ROOT)
def get_instance() -> Optional[SimReadyBrowserExtension]:
return _extension_instance
| 4,537 | Python | 34.453125 | 102 | 0.63985 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""SimReady Explorer API
This module contains the API for the SimReady Explorer extension.
"""
__all__ = [
"AssetType",
"AssetFactory",
"SimreadyAsset",
"PropAsset",
"find_assets",
"add_asset_to_stage",
"add_asset_to_stage_using_prims",
"get_average_position_of_prims",
"get_selected_xformable_prim_paths",
]
from .asset import AssetFactory, AssetType, PropAsset, SimreadyAsset # noqa: F401, symbol is reexported
from .browser_api import ( # noqa: F401, F403, symbol is reexported
add_asset_to_stage,
add_asset_to_stage_using_prims,
find_assets,
get_average_position_of_prims,
get_selected_xformable_prim_paths,
)
from .browser_model import SimReadyBrowserModel # noqa: F401, symbol is reexported
from .extension import SimReadyBrowserExtension, get_instance # noqa: F401, symbol is reexported
| 1,284 | Python | 34.694443 | 104 | 0.744548 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_folder.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import json
from typing import Dict, List, Optional
import carb
import carb.tokens
import omni.client
from omni.kit.browser.folder.core import FileSystemFolder
from .asset import AssetFactory, SimreadyAsset
ASSET_LIST_FILE = "asset_info.json"
class AssetFolder(FileSystemFolder):
"""
Represent folder to traverse to find SimReady assets.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._asset_by_tags: Optional[Dict[str, List[SimreadyAsset]]] = None
self._asset_by_labels: Optional[Dict[str, List[SimreadyAsset]]] = None
@property
def asset_by_tags(self) -> Dict[str, List[SimreadyAsset]]:
"""
All assets grouped by tag in this folder, comes from all SimReady assets
"""
if self._asset_by_tags is None:
self._asset_by_tags = {}
for asset in self.files:
for tag in asset.tags:
if tag not in self._asset_by_tags:
self._asset_by_tags[tag] = []
self._asset_by_tags[tag].append(asset)
for sub_folder in self.sub_folders:
asset_by_tags = sub_folder.asset_by_tags
for tag in asset_by_tags:
if tag not in self._asset_by_tags:
self._asset_by_tags[tag] = []
for asset in asset_by_tags[tag]:
if asset not in self._asset_by_tags[tag]:
self._asset_by_tags[tag].append(asset)
return self._asset_by_tags
@property
def asset_by_labels(self) -> Dict[str, List[SimreadyAsset]]:
"""
All assets grouped by label in this folder, comes from all SimReady assets
"""
if self._asset_by_labels is None:
self._asset_by_labels = {}
for asset in self.files:
for label in asset.labels:
if label not in self._asset_by_labels:
self._asset_by_labels[label] = []
self._asset_by_labels[label].append(asset)
return self._asset_by_labels
async def _traverse_folder_async(self, url: str, recurse: bool = True):
# There will be a json file to list all assets in the folder
# Here just read the json file to get assets instead of traverse folder
self._asset_by_tags = None
asset_json = await self._load_json()
if asset_json:
for asset_data in asset_json:
asset_data["Root Path"] = self.url
asset: SimreadyAsset = AssetFactory.create_asset(asset_data)
if asset:
self.files.append(asset)
else:
carb.log_warn(f"Couldn't create asset for url {url}.")
self._on_traverse_async_done()
async def _load_json(self) -> Optional[List[Dict]]:
# Load json file to get assets list
json_file = self.url + "/" + ASSET_LIST_FILE
result, _ = await omni.client.stat_async(json_file)
if result != omni.client.Result.OK:
carb.log_error(f"Cannot find {json_file}, error: {result}")
return None
try:
result, _, content = await omni.client.read_file_async(json_file)
if result != omni.client.Result.OK:
carb.log_error(f"Cannot read {json_file}, error: {result}")
return None
return json.loads(memoryview(content).tobytes().decode("utf-8"))
except FileNotFoundError:
carb.log_info(f"Failed to open {json_file}!")
except PermissionError:
carb.log_error(f"Cannot read {json_file}: permission denied!")
except Exception as exc:
carb.log_error(f"Unknown failure to read {json_file}: {exc}")
return None
| 4,284 | Python | 38.311926 | 82 | 0.59127 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/asset.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import abc
import functools
import os
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Type
import carb
from omni.kit.browser.folder.core import BrowserFile
class AssetType(Enum):
"""SimReady asset types.
An asset type derived from SimreadyAsset is expected to be of one of these types.
The AssetType values are used when registering a new asset type with the AssetFactory,
using the AssetFactory.register() decorator. The asset class should also set its _type
attribute to the same AssetType value it registered itself with.
Example:
.. code-block:: python
@AssetFactory.register(AssetType.CHARACTER)
class CharacterAsset(SimreadyAsset):
def __init__(self):
self._type = AssetType.CHARACTER
.. note::
The SimReady Explorer API does not fully support yet asset classes created by 3rd party developers.
"""
PROP = 1
"""A non-deformable static object."""
VEHICLE = 2
"""A vehicle with wheels and doors."""
CHARACTER = 3
"""A skinned bipedal character with a rig and animations."""
SCENE = 4
"""A scene with a number of static objects and a ground plane"""
SIGN = 5
"""Road sign"""
ROADMARK = 6
"""Marks on the road used to direct traffic"""
GENERIC = 7
"""Generic asset type"""
UNKNOWN = 8
"""Non-categorized asset"""
class SimreadyAsset(BrowserFile, abc.ABC):
"""Base class for SimReady assets.
SimreadyAsset is an abstract type that implements the common behavior of all SimReady assets.
Derived classes can further refine and customize the semantics of a Simready asset.
A SimReady asset is a collection of sublayers and other data files usually grouped in a folder.
The SimReady Specification <todo: add link> defines the structure of a SimReady asset in detail.
At a high level, a SimReady asset is comprised of:
* Main file : {asset_name}.usd
* Sublayers : {asset_name}_{description}.usd
* Materials: all mdl or usd material data
* Textures: all image data pertaining to materials
* Thumbnails: thumbnail images of the asset
SimReady assets expose a number of behaviors such as physics, appearance, etc.
Assets can also have behaviors that are specific to their type. For example, vehicles
may have the number of wheels and doors as part of their behavior.
SimReady assets can be tagged. Tags are space delimited groups of words used in
filtering assets in an asset library.
SimReady assets can be labeled. Hierarchical labels can come from the Wiki Data database (https://www.wikidata.org)
and are backed by a QCode. The SimReady Explorer displays the labels in the `Category Tree Window`.
Labels, just like tags can be multi-word, and can be used to search for assets.
.. note::
The SimReady Explorer API does not fully support yet asset classes created by 3rd party developers.
"""
def __init__(self, raw_asset_data: Dict):
"""Initialize a SimReady asset.
SimReadyAsset instances are created by the AssetFactory. Developers don't need to instantiate
this and derived classes directly.
Args:
raw_asset_data (Dict): Dictionary with data used to initialize the SimreadyAsset instance.
"""
self._type = AssetType.UNKNOWN
self._name = raw_asset_data.get("Simple Name", "")
rel_url = raw_asset_data.get("Relative Path")
root_url = raw_asset_data.get("Root Path")
if rel_url.startswith("/") or root_url.endswith("/"):
url = root_url + rel_url
else:
url = root_url + "/" + rel_url
path = os.path.dirname(url)
thumbnail_path = raw_asset_data.get("Thumbnail Path", None)
thumbnail = f"{path}/{thumbnail_path}" if thumbnail_path else None
label_dict: Dict[str, Any] = raw_asset_data.get("Labels", {})
self._labels: List[str] = [label_dict.get("Hierarchy", "").strip()]
self._hierarchy: List[str] = label_dict.get("Hierarchy", "").split("/")
self._tags: List[str] = [tag.strip() for tag in raw_asset_data.get("Tags", [])]
self._extent: List[float] = raw_asset_data.get("Extent", [])
self._qcode: str = label_dict.get("QCode", "").strip()
self._behaviors: List[Dict] = raw_asset_data.get("Behaviors", [])
for behavior in self._behaviors:
if "Value" in behavior:
behavior["Value"] = behavior["Value"].strip()
super().__init__(url, thumbnail=thumbnail)
@classmethod
@abc.abstractmethod
def is_asset_data(cls, raw_asset_data: Dict) -> bool:
"""Returns true if the provided raw asset data is determined to represent this asset type.
To be implemented by derived classes.
"""
return False
@property
def name(self) -> str:
"""The user readable name of this asset."""
return self._name
@property
def asset_type(self) -> AssetType:
"""The type of this asset.
Must be one of the values in the AssetType enum.
"""
return self._type
@property
def main_url(self) -> str:
"""The full path to the main file representing the asset.
This file represents the top-level USD composition arc for a SimReady asset.
It may take other USD layers and combine them together into one USD file.
The path is set by the AssetFactory when creating the asset based on the provided raw asset data.
"""
return self.url
@property
def thumbnail_url(self) -> Optional[str]:
"""The full path to the asset's thumbnail image file.
The thumbnail path is set by the AssetFactory when creating the asset based on the provided raw asset data.
"""
return self.thumbnail
@property
def tags(self) -> List[str]:
"""The list of tags of this asset.
Each tag can be a multi-word space delimited string.
Examples: "car", "three wheeled car", etc
"""
return self._tags
@property
def labels(self) -> List[str]:
"""The labels of this asset as a list.
Labels are hierarchical and can be used to group assets in a tree structure.
The label hierarchy is determined by a Wiki Data QCode (see https://www.wikidata.org)
Examples: ['furniture/seat/chair/armchair'], QCode: Q11285759
"""
return self._labels
@property
def labels_as_str(self) -> List[str]:
"""The labels of this asset as a comma delimited string.
Example: "furniture/seat/chair/armchair"
"""
return ",".join(self._labels)
@property
def tags_as_str(self) -> str:
"""List of comma delimited tag words.
Note that tags, labels and the QCode are all part of the list of tags
in order to make searching more effective.
Example: "residential,furniture,seat,chair,armchair,Q11285759"
"""
return ",".join(self.tags)
@property
def extent_as_str(self) -> str:
"""The extent of this asset in 3D space.
Example: "20x10.5x0.25"
"""
return "x".join([str(x) for x in self._extent])
@property
def qcode(self) -> str:
"""The Wiki Data QCode of this asset.
Every asset is identified by a QCode.
The QCode is used to retrieve the labels from Wiki Data,
which in effect, act as a classification of the asset.
Example: "Q11285759"
"""
return self._qcode
@property
def hierarchy(self) -> str:
"""The Wiki Data label hierarchy of this asset.
This is the same as the "labels" property.
Examples: ['furniture', 'seat', 'chair', 'armchair']
"""
return self._hierarchy
@property
def hierarchy_as_str(self):
"""The Wiki Data label hierarchy of this asset formatted as a '>' delimited string
Example: "furniture > seat > chair > armchair"
"""
reform = " > ".join(self.hierarchy)
return reform
@property
def physics_variant(self) -> Optional[Dict]:
"""The physics variant set for this asset.
This dictionary has the following structure:
{'Prim Path': <prim_path>, 'Values': ['None', 'RigidBody']}
where <prim_path> is a string representing the path to the prim that contains the physics variant set.
The name of the Physics variant set is always "PhysicsVariant".
"""
for behavior in self._behaviors:
if "PhysicsVariant" in behavior:
return behavior["PhysicsVariant"]
return None
@property
def behaviors(self) -> Dict[str, List[str]]:
"""The behaviors of this asset.
The behaviors of this asset is represented as a dictionary of
variant set names as keys and lists of variant names as values.
Example: {'PhysicsVariant': ['None', 'RigidBody']}
"""
behaviors: Dict[str, List[str]] = {}
for behavior in self._behaviors:
for variant_set, variant_data in behavior.items():
if variant_set not in behaviors:
behaviors[variant_set] = variant_data["Values"]
return behaviors
def __repr__(self) -> str:
return f"[SimReady Asset]{self.url}"
class AssetFactory:
"""A factory of SimReady asset objects.
Allows to register asset classes and create instances of them.
.. note::
Currently only one asset class can be registered per asset type.
The last registered asset class of a given type will be used.
Asset classes created and registered by 3rd party developers are not fully supported yet.
"""
registry: Dict[AssetType, SimreadyAsset] = {}
"""The registry of asset types. Maps asset types to asset classes."""
@classmethod
def register(cls, asset_type: AssetType) -> Callable:
"""Decorator. Registers a new asset class as asset_type with the AssetFactory.
Example:
Register MyAsset as AssetTypePROP with the AssetFactory
.. code-block:: python
@AssetFactory.register(AssetType.PROP)
class MyMyAsset(BaseAsset):
pass
"""
@functools.wraps(cls)
def inner_wrapper(wrapped_asset_class: Type[SimreadyAsset]) -> Type[SimreadyAsset]:
"""
Register the wrapped asset class with the asset factory.
Return the asset class unaltered.
"""
if asset_type in cls.registry:
carb.log_warn("f{asset_type} already registered; will be replaced by {wrapped_asset_class}")
cls.registry[asset_type] = wrapped_asset_class
return wrapped_asset_class
return inner_wrapper
@classmethod
def create_asset(cls, raw_asset_data: Dict) -> Optional[SimreadyAsset]:
"""Creates an asset based on a dictionary of data with content specific to the asset type."""
for asset_class in cls.registry.values():
if asset_class.is_asset_data(raw_asset_data):
return asset_class(raw_asset_data)
return None
@classmethod
def num_asset_types(cls) -> int:
"""Returns the number of registered asset types"""
return len(cls.registry)
@classmethod
def dump_asset_types(cls):
"""Prints the list of registered asset types"""
for asset_type, asset_class in cls.registry.items():
print(f"asset_type: {asset_type}, asset_class: {asset_class}")
@AssetFactory.register(AssetType.PROP)
class PropAsset(SimreadyAsset):
"""A SimReady prop asset.
Props are physically based static objects that do not deform and don't have kinematic behavior.
Examples include chairs, tools, equipment, containers, etc.
"""
def __init__(self, raw_asset_data: Dict):
super().__init__(raw_asset_data)
self._type = AssetType.PROP
@classmethod
def is_asset_data(cls, raw_asset_data: Dict) -> bool:
"""Returns true if the raw asset data is determined to represent a PROP type asset."""
asset_type: str = raw_asset_data.get("Asset Type", AssetType.UNKNOWN.name)
return asset_type.upper() == AssetType.PROP.name
| 12,798 | Python | 35.155367 | 119 | 0.64346 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/context_menu.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
from typing import List, Set
import carb
import omni.kit.ui as kitui
import omni.ui as ui
import omni.usd
from carb import settings
from pxr import Sdf, Usd
from .browser_model import AssetDetailItem
from .combobox_model import ComboBoxItem
from .style import CONTEXT_MENU_STYLE
class ContextMenu(ui.Menu):
"""
Context menu for simready asset browser.
"""
def __init__(self):
super().__init__("SimReady Asset browser context menu", style=CONTEXT_MENU_STYLE)
self._settings = settings.get_settings()
def show_item(self, item: AssetDetailItem) -> None:
self.clear()
from .extension import get_instance
selection: List[AssetDetailItem] = get_instance().browser_widget.selection
if not selection and not item:
return
assets: List[AssetDetailItem] = selection if selection else [item]
# Gather all physics variant values from all the selected assets
possible_physics_values = set()
for asset in assets:
physics_items = asset.physics_model.get_item_children(None) if asset.physics_model else []
possible_physics_values.update(item.value for item in physics_items)
# Compute common physics variant value all selected assets share
common_physics_value = assets[0].physics
for asset in assets:
if asset.physics != common_physics_value:
common_physics_value = None
break
with self:
# Behavior menu (only physics for now)
# Always create new menu items to match asset variant settings
if len(possible_physics_values) > 1:
with ui.MenuItemCollection(f"{kitui.get_custom_glyph_code('${glyphs}/none.svg')} Physics"):
for physics_value in possible_physics_values:
value = physics_value
ui.MenuItem(
value or "None",
checked=(value == common_physics_value),
checkable=True,
triggered_fn=(lambda assets=assets, v=value: self.__set_item_physics(assets, v)),
)
# Some right-click commands only make sense when only one asset is selected
if len(assets) == 1:
ui.Separator()
has_collect = False
try:
# pylint: disable=redefined-outer-name
import omni.kit.tool.collect # noqa: F401
has_collect = True
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/upload.svg')} Collect",
triggered_fn=(lambda asset=assets[0]: self.__collect(asset)),
)
except ImportError:
carb.log_warn("Please enable omni.kit.tool.collect first to collect.")
if has_collect:
ui.Separator()
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/show.svg')} Open",
triggered_fn=(lambda asset=assets[0]: self.__open_stage(asset, load_all=True)),
)
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/show.svg')} Open with Payloads Disabled",
triggered_fn=(lambda asset=assets[0]: self.__open_stage(asset, load_all=False)),
)
ui.Separator()
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/plus.svg')} Add at Current Selection",
triggered_fn=(lambda assets=assets: self.__add_using_selection(assets, False)),
)
if len(assets) == 1:
# If enabled for multi-selection, only the first asset would be placed at the position
# where the selected prims were located, as these prims need to be deleted before any
# new prims are added. Therefore, disabling this command for multi-selection.
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/plus.svg')} Replace Current Selection",
triggered_fn=(lambda assets=assets: self.__add_using_selection(assets, True)),
)
ui.Separator()
ui.MenuItem(
f"{kitui.get_custom_glyph_code('${glyphs}/share.svg')} Copy URL Link",
triggered_fn=(lambda assets=assets: self.__copy_url_link(assets)),
)
self.show()
def __set_item_physics(self, assets: List[AssetDetailItem], physics: str) -> None:
for asset in assets:
asset.physics = physics
def __copy_url_link(self, assets: List[AssetDetailItem]) -> None:
try:
import omni.kit.clipboard
omni.kit.clipboard.copy("\n".join(asset.url for asset in assets))
except ImportError:
carb.log_warn("Warning: Could not import 'omni.kit.clipboard'.")
async def __open_stage_async(self, url: str, load_all: bool):
try:
import omni.kit.window.file
if load_all:
loadset = omni.usd.UsdContextInitialLoadSet.LOAD_ALL
else:
loadset = omni.usd.UsdContextInitialLoadSet.LOAD_NONE
omni.kit.window.file.open_stage(url, open_loadset=loadset)
except ImportError:
carb.log_warn("Warning: Could not import 'omni.kit.window.file'.")
except Exception as e:
carb.log_error(str(e))
else:
carb.log_info(f"Opened '{url}'.\n")
def __open_stage(self, item: AssetDetailItem, load_all: bool = True) -> None:
"""
Open a stage in the current context.
Args:
item (AssetDetailItem): The asset to open.
load_all (bool): Whether to load all payloads.
"""
asyncio.ensure_future(self.__open_stage_async(item.url, load_all))
def __add_using_selection(self, assets: List[AssetDetailItem], replace_selection: bool) -> None:
from .browser_api import add_asset_to_stage_using_prims, get_selected_xformable_prim_paths
usd_context: omni.usd.UsdContext = omni.usd.get_context()
stage: Usd.Stage = usd_context.get_stage() if usd_context else None
prim_paths: List[Sdf.Path] = get_selected_xformable_prim_paths(usd_context, stage)
for asset in assets:
add_asset_to_stage_using_prims(
usd_context,
stage,
asset.url,
variants={
"PhysicsVariant": asset.physics if asset.physics != "None" else ""
}, # Hardcoded to physics variant for now
replace_prims=replace_selection,
prim_paths=prim_paths,
)
def __collect(self, asset: AssetDetailItem) -> None:
try:
# pylint: disable=redefined-outer-name
import omni.kit.tool.collect
collect_instance = omni.kit.tool.collect.get_instance()
collect_instance.collect(asset.url)
collect_instance = None
except ImportError:
carb.log_warn("Failed to import collect module (omni.kit.tool.collect). Please enable it first.")
except AttributeError:
carb.log_warn("Require omni.kit.tool.collect v2.0.5 or later!")
| 7,892 | Python | 42.368132 | 109 | 0.58515 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_with_property_widget.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
import math
from typing import List, Optional
import carb
import omni.kit.app
import omni.ui as ui
from omni.kit.browser.folder.core import FileDetailItem, TreeFolderBrowserWidget
from .browser_property_view import BrowserPropertyView
from .browser_toolbar import BrowserPropertyToolBar
from .style import UI_STYLES
# Layout are dynmiac changed when window size changed: small to V mode otherwise to H mode
class Layout:
# V mode, max window width. If window width is bigger, switch to H mode
V_MAX_WINDOW_WIDTH = 400
# V mode, min view height, at least show one row
V_MIN_VIEW_HEIGHT = 156
# V model, min property view height
V_MIN_PROPERTY_HEIGHT = 20
# V mode, default toolkits height
V_DEFAULT_TOOLKITS_HEIGHT = 280
# Vmode, search bar width
V_SEARCH_BAR_WIDTH = ui.Pixel(400)
# H mode, default toolkits width
H_DEFAULT_TOOLKITS_WIDTH = 450
# H mode, min view width, at least show one column
H_MIN_VIEW_WIDTH = 200
# H mode, min property view width
H_MIN_PROPERTY_WIDTH = 75
class BrowserWithPropertyWidget(TreeFolderBrowserWidget):
"""
The Browser View (V in the MDV achitecture).
Responsible for creating the frames for Category view, Search bar, Browser area, Property view.
Takes care of updating the layout when user resizes windows.
"""
def __init__(self, *args, **kwargs):
self._frame_width: Optional[float] = None
self._frame_height: Optional[float] = None
# Default horizontal layout sice docked bottom along with Content
self._layout_vertical = False
self._last_toolkits_width = 0
self._last_toolkits_height = 0
super().__init__(*args, **kwargs)
self._switch_layout(self._layout_vertical)
def build_widgets(self) -> None:
"""
Build widgets with property view.
"""
self._frame = ui.Frame()
with self._frame:
with ui.VStack(spacing=4, style=UI_STYLES):
self._build_search_toolbar()
self._build_browser_widgets()
# Need change layout and children size if widget size changed
self._frame.set_computed_content_size_changed_fn(self._on_size_changed)
def _build_search_toolbar(self):
with ui.ZStack(height=0):
self._build_search_bar()
self._h_toolbar_container = ui.HStack(height=0)
with self._h_toolbar_container:
ui.Spacer()
self._toolbar = BrowserPropertyToolBar(self._toggle_property_view)
def _build_browser_widgets(self):
self._browser_container = ui.Stack(ui.Direction.TOP_TO_BOTTOM, spacing=4)
with self._browser_container:
self._browser_view_container = ui.ZStack(height=0)
with self._browser_view_container:
self._build_browser_widget()
# Draggable splitter for H/V layout mode
self._v_splitter = ui.Placer(offset_y=0, draggable=True, drag_axis=ui.Axis.Y)
with self._v_splitter:
ui.Rectangle(height=4, style_type_name_override="Splitter")
self._h_splitter = ui.Placer(offset_x=0, draggable=True, drag_axis=ui.Axis.X)
with self._h_splitter:
ui.Rectangle(width=4, style_type_name_override="Splitter")
# Toolkits
self._toolkits_container = ui.VStack(spacing=4)
with self._toolkits_container:
# Toolbar with property button
self._v_toolbar_frame = ui.Frame(height=0)
self._v_toolbar_frame.add_child(self._toolbar.widget)
# Property View
self._property_view = BrowserPropertyView()
self._v_splitter.set_offset_y_changed_fn(self._splitter_offset_y_changed)
self._h_splitter.set_offset_x_changed_fn(self._splitter_offset_x_changed)
self._toolbar.btn_property.selected = self._property_view.visible
self._browser_widget._detail_view.set_selection_changed_fn(self._on_detail_selection_changed)
# Default show property view
async def __show_property_view_async():
await omni.kit.app.get_app().next_update_async()
self._toggle_property_view()
asyncio.ensure_future(__show_property_view_async())
def _splitter_offset_y_changed(self, offset_y: ui.Length) -> None:
if self._property_view.visible:
if offset_y.value < Layout.V_MIN_VIEW_HEIGHT:
self._v_splitter.offset_y = Layout.V_MIN_VIEW_HEIGHT
return
available_property_height = (
self._browser_container.computed_height - offset_y - self._toolbar.computed_height - 12
)
if available_property_height < Layout.V_MIN_PROPERTY_HEIGHT:
self._last_toolkits_height = 0
self._toggle_property_view()
if self._property_view.visible:
self._last_toolkits_height = self._browser_container.computed_height - offset_y
def _splitter_offset_x_changed(self, offset_x: ui.Length) -> None:
if self._property_view.visible:
if offset_x.value < Layout.H_MIN_VIEW_WIDTH:
self._h_splitter.offset_x = Layout.H_MIN_VIEW_WIDTH
return
available_property_width = self._browser_container.computed_width - offset_x - 8
if available_property_width < Layout.H_MIN_PROPERTY_WIDTH:
self._toggle_property_view()
self._last_toolkits_width = 0
if self._property_view.visible:
self._last_toolkits_width = self._browser_container.computed_width - offset_x
def _switch_layout(self, vertical: bool) -> None:
# toolbar visibility
self._toolbar.spacer_visible = vertical
self._toolbar.widget.width = ui.Fraction(1) if vertical else ui.Pixel(0)
self._v_toolbar_frame.visible = vertical
self._h_toolbar_container.visible = not vertical
# searchbar
self.__update_search_bar_width(vertical)
# browser view and splitters
self._browser_container.direction = ui.Direction.TOP_TO_BOTTOM if vertical else ui.Direction.LEFT_TO_RIGHT
self._v_splitter.visible = vertical
self._h_splitter.visible = not vertical
if vertical:
self._browser_view_container.width = ui.Fraction(1)
self._toolkits_container.width = ui.Fraction(1)
else:
self._browser_view_container.height = ui.Fraction(1)
self._toolkits_container.height = ui.Fraction(1)
self._layout_vertical = vertical
# Hide property if not enough space
if self._property_view.visible and not self._has_space_for_property():
self._property_view.visible = False
self._toolbar.btn_property.selected = False
# Update widgets position
asyncio.ensure_future(self._update_layout_async())
def _on_size_changed(self) -> None:
# Widget size changed callback
if self._frame_width is None or self._frame_height is None:
self._frame_width = self._frame.computed_content_width
self._frame_height = self._frame.computed_content_height
else:
if not math.isclose(self._frame_width, self._frame.computed_content_width):
async def __delay_update_width():
await omni.kit.app.get_app().next_update_async()
self._on_width_changed(self._frame.computed_content_width)
asyncio.ensure_future(__delay_update_width())
self._frame_width = self._frame.computed_content_width
if not math.isclose(self._frame_height, self._frame.computed_content_height):
async def __delay_update_height():
await omni.kit.app.get_app().next_update_async()
self._on_height_changed(self._frame.computed_content_height)
asyncio.ensure_future(__delay_update_height())
self._frame_height = self._frame.computed_content_height
def _on_width_changed(self, width) -> None:
# Window width changed, adjust widgets layout
vertical_layout = width < Layout.V_MAX_WINDOW_WIDTH
if vertical_layout != self._layout_vertical:
self._switch_layout(vertical_layout)
if not self._layout_vertical and self._property_view.visible and self._last_toolkits_width != 0:
self._h_splitter.offset_x = self._browser_container.computed_width - self._last_toolkits_width
self.__update_search_bar_width(self._layout_vertical)
def _on_height_changed(self, height) -> None:
if self._layout_vertical and self._property_view.visible and self._last_toolkits_height != 0:
self._v_splitter.offset_y = self._browser_container.computed_height - self._last_toolkits_height
def _toggle_property_view(self) -> None:
if not self._property_view.visible and not self._has_space_for_property():
carb.log_warn("Not enough space to show property!")
return
self._property_view.visible = not self._property_view.visible
self._toolbar.btn_property.selected = self._property_view.visible
asyncio.ensure_future(self._update_layout_async())
def __update_search_bar_width(self, vertical: bool):
# Update width of search bar
if vertical:
self._search_bar.width = ui.Fraction(1)
else:
def __set_search_bar_width():
# Adjust search bar width to match the model panel bar
self._search_bar.width = ui.Pixel(
self._toolbar.position_x - self._search_bar._frame.screen_position_x - 10
)
if self._toolbar.width > 0:
__set_search_bar_width()
else:
self._search_bar.width = Layout.V_SEARCH_BAR_WIDTH
async def __delay_update():
while self._toolbar.width == 0:
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
__set_search_bar_width()
asyncio.ensure_future(__delay_update())
def _has_space_for_property(self) -> bool:
if self._layout_vertical:
if self._browser_container.computed_height > 0:
available_property_height = (
self._browser_container.computed_height
- Layout.V_MIN_VIEW_HEIGHT
- self._toolbar.computed_height
- 12
)
if available_property_height < Layout.V_MIN_PROPERTY_HEIGHT:
carb.log_warn("Not enough space to show property!")
return False
else:
available_property_width = self._browser_container.computed_width - Layout.H_MIN_VIEW_WIDTH - 12
if available_property_width < Layout.H_MIN_PROPERTY_WIDTH:
return False
return True
async def _update_layout_async(self) -> None:
if not self._property_view.visible:
# property hidden
if self._layout_vertical:
# toolkits height fix, browser view height max
await omni.kit.app.get_app().next_update_async()
self._v_splitter.visible = False
self._browser_view_container.height = ui.Fraction(1)
self._toolkits_container.height = ui.Pixel(0)
else:
# toolkits width fix, browser view width max
await omni.kit.app.get_app().next_update_async()
self._h_splitter.visible = False
self._browser_view_container.width = ui.Fraction(1)
self._toolkits_container.width = ui.Pixel(0)
else:
# show property view
if self._layout_vertical:
# details view height changed by splitter, toolkits max
self._toolkits_container.height = ui.Fraction(1)
self._browser_view_container.height = ui.Pixel(0)
self._v_splitter.visible = True
await omni.kit.app.get_app().next_update_async()
if self._last_toolkits_height == 0:
offset_y = max(
self._browser_container.computed_height - Layout.V_DEFAULT_TOOLKITS_HEIGHT,
Layout.V_MIN_VIEW_HEIGHT,
)
self._last_toolkits_height = self._browser_container.computed_height - offset_y
self._v_splitter.offset_y = offset_y
else:
self._v_splitter.offset_y = self._browser_container.computed_height - self._last_toolkits_height
else:
# details view height changed by splitter, toolkits max
self._toolkits_container.width = ui.Fraction(1)
self._browser_view_container.width = ui.Pixel(0)
self._h_splitter.visible = True
await omni.kit.app.get_app().next_update_async()
if self._last_toolkits_width == 0:
offset_x = max(
self._browser_container.computed_width - Layout.H_DEFAULT_TOOLKITS_WIDTH,
Layout.H_MIN_VIEW_WIDTH,
)
self._last_toolkits_width = self._browser_container.computed_width - offset_x
self._h_splitter.offset_x = offset_x
else:
self._h_splitter.offset_x = self._browser_container.computed_width - self._last_toolkits_width
def _on_detail_selection_changed(self, selections: List[FileDetailItem]) -> None:
self.selection = selections
if len(selections) > 1:
self._property_view.show_detail_multi_item(selections)
else:
self._property_view.show_detail_item(selections[0] if selections else None)
| 14,533 | Python | 43.042424 | 116 | 0.606207 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/asset_property_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import abc
from typing import Optional
import omni.ui as ui
from .asset import SimreadyAsset
from .browser_model import AssetDetailItem
from .browser_property_delegate import BrowserPropertyDelegate
class AssetPropertyDelegate(BrowserPropertyDelegate):
"""Base class for all asset property delegates."""
@abc.abstractmethod
def asset_accepted(self, asset: Optional[SimreadyAsset]) -> bool:
return False
@abc.abstractmethod
def show_asset(self, asset: Optional[SimreadyAsset], frame: ui.Frame) -> None:
pass
def accepted(self, detail_item: Optional[AssetDetailItem]) -> bool:
"""
Check if detail item could be shown by this delegate.
Args:
detail_item (AssetDetailItem): Detail item to be shown.
"""
return self.asset_accepted(detail_item.asset if detail_item else None)
def show(self, detail_item: Optional[AssetDetailItem], frame: ui.Frame) -> None:
"""
Show detail item with this delegate.
Args:
detail_item (AssetDetailItem): Detail item to be shown.
frame (ui.Frame): Parent frame to put widgets.
"""
self.show_asset(detail_item.asset if detail_item else None, frame)
| 1,667 | Python | 35.260869 | 84 | 0.711458 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_property_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
__all__ = ["BrowserPropertyDelegate"]
import abc
import weakref
from typing import List, Optional
import omni.ui as ui
from omni.kit.window.property.templates import HORIZONTAL_SPACING, LABEL_HEIGHT, LABEL_WIDTH
from .browser_model import AssetDetailItem
class BrowserPropertyDelegate(abc.ABC):
"""Base class for item property delegates and registry of these same delegate instances.
Whenever an instance of this class is created, it is automatically registered.
The BrowserPropertyView will show the Property widget, and will display into it
all the registered delegates that accept the current item.
This class is not meant to be instantiated directly, but rather to be subclassed.
"""
# Use a list to track drag-drop order so that it's deterministic in the event of clashes or cancelation.
__g_registered = []
@classmethod
def get_instances(cls) -> List["BrowserPropertyDelegate"]:
remove = []
for wref in BrowserPropertyDelegate.__g_registered:
obj = wref()
if obj:
yield obj
else:
remove.append(wref)
for wref in remove:
BrowserPropertyDelegate.__g_registered.remove(wref)
def __init__(self):
self.__g_registered.append(weakref.ref(self, lambda r: BrowserPropertyDelegate.__g_registered.remove(r)))
def __del__(self):
self.destroy()
def destroy(self):
for wref in BrowserPropertyDelegate.__g_registered:
if wref() == self:
BrowserPropertyDelegate.__g_registered.remove(wref)
break
@abc.abstractmethod
def accepted(self, detail_item: Optional[AssetDetailItem]) -> bool:
"""
Check if detail item could be shown by this delegate.
Args:
detail_item (AssetDetailItem): Detail item to be shown.
"""
return False
@abc.abstractmethod
def show(self, detail_item: Optional[AssetDetailItem], frame: ui.Frame) -> None:
"""
Show detail item with this delegate.
Args:
detail_item (AssetDetailItem): Detail item to be shown.
frame (ui.Frame): Parent frame to put widgets.
"""
pass
@abc.abstractmethod
def accepted_multiple(self, detail_items: List[AssetDetailItem]) -> bool:
"""
Check if details of multiple items could be shown by this delegate.
Args:
detail_items (List[AssetDetailItem]): Detail items to be shown.
"""
return False
@abc.abstractmethod
def show_multiple(self, detail_items: List[AssetDetailItem], frame: ui.Frame) -> None:
"""
Show details of multiple items with this delegate.
The delegate may choose to show common properties of all items.
Args:
detail_items (List[AssetDetailItem]): Detail items to be shown.
frame (ui.Frame): Parent frame to put widgets.
"""
pass
def _build_label(
self, text: str, width: Optional[ui.Length] = LABEL_WIDTH, alignment=ui.Alignment.LEFT_CENTER
) -> ui.Label:
return ui.Label(
text,
name="label",
word_wrap=True,
width=width if width is not None else LABEL_WIDTH,
height=LABEL_HEIGHT,
alignment=alignment,
)
def _build_combobox(self, text: str, *args, **kwargs) -> ui.ComboBox:
with ui.HStack(spacing=HORIZONTAL_SPACING, padding=10):
ui.Label(text, style_type_name_override="Asset.Label", width=100)
return ui.ComboBox(*args, name="choices", **kwargs)
def _build_checkbox(self, model: Optional[ui.AbstractValueModel] = None) -> ui.CheckBox:
# Copy from UsdPropertiesWidgetBuilder._bool_builder
with ui.VStack(width=10):
ui.Spacer()
widget_kwargs = {"width": 10, "height": 0, "name": "greenCheck", "model": model}
with ui.ZStack():
with ui.Placer(offset_x=0, offset_y=-2):
checkbox = ui.CheckBox(**widget_kwargs)
with ui.Placer(offset_x=1, offset_y=-1):
ui.Rectangle(height=8, width=8, name="mixed_overlay", alignment=ui.Alignment.CENTER, visible=False)
ui.Spacer()
return checkbox # noqa: R504
def _build_string_field(self, model: Optional[ui.SimpleStringModel] = None, text: str = "") -> ui.StringField:
if text:
with ui.HStack(spacing=HORIZONTAL_SPACING):
self._build_label(text)
field = ui.StringField(model, name="models")
return field
else:
return ui.StringField(model, name="models")
def _build_float_field(self, model: Optional[ui.SimpleFloatModel] = None) -> ui.FloatField:
return ui.FloatField(model, name="models")
def _build_float_drag(self, model: Optional[ui.AbstractValueModel] = None, name="models") -> ui.FloatDrag:
drag = ui.FloatDrag(model, name=name, min=model.min, max=model.max)
drag.step = max(0.1, (model.max - model.min) / 1000.0)
return drag
def _build_int_drag(self, model: Optional[ui.AbstractValueModel] = None, name="models") -> ui.IntDrag:
kwargs = {}
if model.min is not None:
kwargs["min"] = model.min
if model.max is not None:
kwargs["max"] = model.max
drag = ui.IntDrag(model, name="models", **kwargs)
if model.min is not None and model.max is not None:
drag.step = max(0.1, (model.max - model.min) / 1000.0)
return drag
def _build_float_slider(self, model: Optional[ui.AbstractValueModel] = None, name="models") -> ui.FloatSlider:
slider = ui.FloatSlider(model, name=name, min=model.min, max=model.max)
slider.step = max(0.1, (model.max - model.min) / 1000.0)
return slider
| 6,332 | Python | 38.830188 | 119 | 0.630449 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/actions.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import json
import os
from typing import Dict, List, Optional, Tuple
import carb
import carb.settings
import omni.client
import omni.kit.commands
import omni.usd
from pxr import Gf, Sdf, Tf, Usd, UsdGeom
from .style import SIMREADY_DRAG_PREFIX
def normalize_sdf_path(sdf_layer_path: str):
return sdf_layer_path.replace("\\", "/")
def make_relative_to_layer(stage: Usd.Stage, url: str) -> str:
# XXX: PyBind omni::usd::UsdUtils::makePathRelativeToLayer
stage_layer = stage.GetEditTarget().GetLayer()
if not stage_layer.anonymous and not Sdf.Layer.IsAnonymousLayerIdentifier(url):
stage_layer_path = stage_layer.realPath
if normalize_sdf_path(stage_layer_path) == url:
carb.log_warn(f"Cannot reference {url} onto itself")
return url
relative_url = omni.client.make_relative_url(stage_layer_path, url)
if relative_url:
# omniverse path can have '\'
return relative_url.replace("\\", "/")
return url
def make_prim_path(stage: Usd.Stage, url: str, prim_path: Sdf.Path = None, prim_name: str = None):
"""Make a new/unique prim path for the given url"""
if prim_path is None or prim_path.isEmpty:
if stage.HasDefaultPrim():
prim_path = stage.GetDefaultPrim().GetPath()
else:
prim_path = Sdf.Path.absoluteRootPath
if prim_name is None:
prim_name = Tf.MakeValidIdentifier(os.path.basename(os.path.splitext(url)[0]))
return Sdf.Path(omni.usd.get_stage_next_free_path(stage, prim_path.AppendChild(prim_name).pathString, False))
def _add_asset_to_stage_helper(
usd_context: omni.usd.UsdContext,
stage: Usd.Stage,
url: str,
prim_path: Sdf.Path = None,
payload: str = "",
instanceable: str = "",
) -> Tuple[Sdf.Path, Usd.EditContext, str]:
"""Add a Usd.Prim to an exiting Usd.Stage, pointing to the url
Args:
usd_context (omni.usd.UsdContext): Usd context to add asset to.
stage (Usd.Stage): Usd stage to add asset to.
url (str): Url to asset to add.
Kwargs:
prim_path (Sdf.Path): Parent prim path to add asset to.
payload (str): "payload" or "reference". If empty, use the setting at "/persistent/app/stage/dragDropImport".
instanceable (str): "instanceable" or "noninstanceable". If empty (default), use the setting at "/persistent/app/stage/instanceableOnCreatingReference".
selected_prims (List[Usd.Prim]): List of selected prims. If not empty (default), use the first prim's payload/reference status.
"""
# Get a realtive URL if possible
relative_url = make_relative_to_layer(stage, url)
# When in auto authoring mode, don't create it in the current edit target
# as it will be cleared each time to be moved to default edit layer.
edit_context = None
layers = usd_context.get_layers()
if layers and layers.get_layer_edit_mode() == omni.usd.LayerEditMode.AUTO_AUTHORING:
default_identifier = layers.get_default_edit_layer_identifier()
edit_layer = Sdf.Layer.Find(default_identifier)
if edit_layer is None:
edit_layer = stage.GetRootLayer()
edit_context = Usd.EditContext(stage, edit_layer)
new_prim_path = make_prim_path(stage, url, prim_path=prim_path)
if not instanceable:
as_instanceable = carb.settings.get_settings().get("/persistent/app/stage/instanceableOnCreatingReference")
else:
as_instanceable = instanceable == "instanceable"
# Determine if we should create a payload or reference
if not payload:
create_as_payload = carb.settings.get_settings().get("/persistent/app/stage/dragDropImport") == "payload"
else:
create_as_payload = payload == "payload"
# Add asset to stage
cmd_name = "CreatePayloadCommand" if create_as_payload else "CreateReferenceCommand"
omni.kit.commands.execute(
cmd_name, usd_context=usd_context, path_to=new_prim_path, asset_path=url, instanceable=as_instanceable
)
return (new_prim_path, edit_context, relative_url)
def set_prim_variants(stage: Usd.Stage, prim_path: Sdf.Path, variants: Dict[str, str]):
"""
Set the variants on the provided prim.
"""
prim = stage.GetPrimAtPath(prim_path)
if prim:
vsets = prim.GetVariantSets()
for name, value in variants.items():
carb.log_info(f"Try to set variant for {prim_path}: {name} -> {value}")
vset = vsets.GetVariantSet(name)
if vset:
vset.SetVariantSelection(value)
def find_inst_prim(start_prim: Usd.Prim) -> Optional[Usd.Prim]:
"""Find the prim from the given prim path wose name ends in '_inst'."""
if not start_prim:
return None
if start_prim.GetName().endswith("_inst"):
return start_prim
for child in start_prim.GetChildren():
inst_prim = find_inst_prim(child)
if inst_prim:
return inst_prim
return None
def is_physics_variant_enabled(variants: Dict[str, str]) -> bool:
"""Check if the physics variant is found and enabled (not 'None').
Args:
variants (Dict[str, str]): Dictionary of variant name and value.
"""
if variants and "PhysicsVariant" in variants and variants["PhysicsVariant"] != "":
return True
return False
def configure_prim(stage: Usd.Stage, prim_path: Sdf.Path, variants: Dict[str, str]) -> None:
"""Configure the variants of the given prim."""
if not stage or not prim_path or prim_path.emptyPath or not variants:
return
# Set the variants on the prim at prim_path
set_prim_variants(stage, prim_path, variants)
def _add_asset_with_variant_to_stage(
raw_data: dict, context: str = "", prim_path: Optional[Sdf.Path] = None
) -> Tuple[bool, str]:
"""
Add simready asset to stage from raw data.
Args:
raw_data (dict): Json data decribe asset to add. See more in SimReadyBrowserModel.get_drag_mime_data()
Kwargs:
context (str): Name of usd content. Default ""
prim_path (Optional[Sdf.Path]): Parent prim path to add asset. Default None means default prim.
Returns:
Tuple of success and added prim path.
"""
usd_context = omni.usd.get_context(context)
if not usd_context:
carb.log_error("Failed to drop asset since usd context not found!")
return False, ""
stage = usd_context.get_stage()
if not stage:
carb.log_error("Failed to drop asset since usd stage not found!")
return False, ""
url = raw_data.get("url", "")
if not url:
carb.log_error("Failed to drop asset since url not defined!")
return False, ""
payload = raw_data.get("payload", "")
instanceable = raw_data.get("instanceable", "")
# Add the asset to stage
(dropped_path, _, _) = _add_asset_to_stage_helper(
usd_context, stage, url, prim_path=prim_path, payload=payload, instanceable=instanceable
)
# Set variants on the added asset
if dropped_path:
variants = raw_data.get("variants", {})
configure_prim(stage, dropped_path, variants)
return True, dropped_path
def add_single_asset_from_drag(drag_mime_data: str, context: str = "", prim_path: Optional[Sdf.Path] = None) -> bool:
"""
Add simready asset to stage from drag mime data.
Args:
drag_mime_data (str): Mime data decribe asset to add. See more in SimReadyBrowserModel.get_drag_mime_data()
Kwargs:
context (str): Name of usd content. Default ""
prim_path (Optional[Sdf.Path]): Parent prim path to add asset. Default None means default prim.
"""
if drag_mime_data.startswith(SIMREADY_DRAG_PREFIX):
drag_mime_data = drag_mime_data[len(SIMREADY_DRAG_PREFIX) :]
raw_data = json.loads(drag_mime_data)
res, _ = _add_asset_with_variant_to_stage(raw_data, context=context, prim_path=prim_path)
return res
def add_asset_from_drag(drag_mime_data: str, context: str = "", prim_path: Optional[Sdf.Path] = None) -> bool:
mime_datas = drag_mime_data.split("\n")
for data in mime_datas:
add_single_asset_from_drag(data, context=context, prim_path=prim_path)
def register_actions(extension_id: str, extension_instance):
"""
Register actions.
Args:
extension_id (str): Extension ID whcih actions belongs to
"""
try:
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
actions_tag = "SimReady"
action_registry.register_action(
extension_id,
"toggle_window",
lambda: extension_instance._toggle_window(),
display_name="SimReady Explorer show/hide window",
description="SimReady Explorer show/hide window",
tag=actions_tag,
)
action_registry.register_action(
extension_id,
"add_asset",
lambda raw_data, context="", path_to=None: _add_asset_with_variant_to_stage(raw_data, context, path_to),
display_name="SimReady->Add Asset",
description="Add SimReady asset to stage",
tag=actions_tag,
)
action_registry.register_action(
extension_id,
"add_asset_from_drag",
lambda drag_mime_data, context="", path_to=None: add_asset_from_drag(drag_mime_data, context, path_to),
display_name="SimReady->Add Asset From Drag",
description="Add SimReady asset to stage from dragging mime data",
tag=actions_tag,
)
except ImportError:
pass
def deregister_actions(extension_id: str):
"""
Deregister actions.
Args:
extension_id (str): Extension ID whcih actions belongs to
"""
try:
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.deregister_all_actions_for_extension(extension_id)
except ImportError:
pass
| 10,429 | Python | 37.205128 | 160 | 0.655959 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Optional
import omni.ui as ui
from omni.kit.browser.folder.core import FolderDetailDelegate
from .browser_model import AssetDetailItem, SimReadyBrowserModel
from .context_menu import ContextMenu
from .style import ICON_PATH
from .viewport_drop_delegate import SimReadyDragDropObject
class SimReadyDetailDelegate(FolderDetailDelegate):
"""
Delegate to show SimReady asset item in detail view
(where the list of thumbnails is shown)
Args:
model (SimReadyBrowserModel): Asset browser model
"""
def __init__(self, model: SimReadyBrowserModel):
super().__init__(model=model)
# Context menu for detail item
self._context_menu: Optional[ContextMenu] = None
# Drop helper to handle dropping in viewport (only works for VP2)
self._drop_helper = SimReadyDragDropObject()
def destroy(self):
self._drop_helper = None
if self._context_menu:
self._context_menu.destroy()
self._context_menu = None
super().destroy()
def get_thumbnail(self, item: AssetDetailItem) -> str:
"""Set default thumbnail if thumbnail is None"""
return item.thumbnail or f"{ICON_PATH}/usd_stage_256.png"
def get_tooltip(self, item: AssetDetailItem) -> Optional[str]:
"""No tooltip for detail item as it interferes with the right-click menu"""
return None
def on_drag(self, item: AssetDetailItem) -> str:
"""Could be dragged to viewport window"""
thumbnail = self.get_thumbnail(item)
icon_size = 128
with ui.VStack(width=icon_size):
if thumbnail:
ui.Spacer(height=2)
with ui.HStack():
ui.Spacer()
ui.ImageWithProvider(thumbnail, width=icon_size, height=icon_size)
ui.Spacer()
ui.Label(
item.name,
word_wrap=False,
elided_text=True,
skip_draw_when_clipped=True,
alignment=ui.Alignment.TOP,
style_type_name_override="GridView.Item",
)
return self._model.get_drag_mime_data(item)
def on_multiple_drag(self, item: AssetDetailItem) -> str:
"""Could be dragged to viewport window"""
thumbnail = self.get_thumbnail(item)
icon_size = 32
with ui.HStack(height=icon_size):
if thumbnail:
ui.ImageWithProvider(thumbnail, width=icon_size, height=icon_size)
ui.Spacer(width=8)
ui.Label(item.name, style_type_name_override="GridView.Item")
return self._model.get_drag_mime_data(item)
def on_right_click(self, item: AssetDetailItem) -> None:
"""Show context menu"""
if self._context_menu is None:
self._context_menu = ContextMenu()
self._context_menu.show_item(item)
def build_widget(
self, model: ui.AbstractItemModel, item: AssetDetailItem, index: int = 0, level: int = 0, expand: bool = False
):
super().build_widget(model, item, index=index, level=level, expand=expand)
tooltip = self.get_tooltip(item)
if tooltip is not None:
self._cached_thumbnail_widgets[item].set_tooltip(tooltip)
def build_thumbnail(self, item: AssetDetailItem) -> Optional[ui.Image]:
"""
Display thumbnail per detail item
Args:
item (DetailItem): detail item to display
"""
with ui.ZStack():
thumbnail_image = ui.Image(
self.get_thumbnail(item), fill_policy=ui.FillPolicy.STRETCH, style_type_name_override="GridView.Image"
)
# Badge to indicate item physics ON, need to change visibility if physics status changed
badge_container = ui.HStack(alignment=ui.Alignment.LEFT_BOTTOM, visible=item.physics != "None")
with badge_container:
ui.Spacer(width=ui.Percent(80))
with ui.VStack():
ui.Spacer(height=2)
ui.ImageWithProvider(
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT,
alignment=ui.Alignment.LEFT,
style_type_name_override="GridView.Item.Badge.Image",
)
badge_container.visible = item.physics != "None"
if item.physics_model:
item.physics_model.current_index.add_value_changed_fn(
lambda m, w=badge_container: self.__on_item_physics_changed(m, w)
)
return thumbnail_image # noqa: R504
def __on_item_physics_changed(self, model: ui.SimpleIntModel, badge_widget: ui.Widget) -> None:
badge_widget.visible = model.as_int != 0
| 5,234 | Python | 38.360902 | 118 | 0.618647 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_model.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import json
from itertools import chain
from typing import Dict, List, Optional
import carb.settings
from omni.kit.browser.core import CategoryItem, DetailItem
from omni.kit.browser.folder.core import (
FileDetailItem,
FileSystemFolder,
FolderCategoryItem,
FolderCollectionItem,
TreeFolderBrowserModel,
)
from .actions import add_asset_from_drag
from .asset import SimreadyAsset
from .browser_folder import AssetFolder
from .combobox_model import ComboBoxModel
from .style import SIMREADY_DRAG_PREFIX
SETTINGS_DEFAULT_PHYSICS = "/exts/omni.simready.explorer/default_physics"
class AssetDetailItem(DetailItem):
"""
Represents a single asset detail item.
Args:
name (str): asset name.
url (str): asset url.
asset (SimreadyAsset): asset object linked to this item.
thumbnail (Optional[str]): thumbnail url of file. Default is None.
"""
@classmethod
def get_default_physics_value(cls) -> str:
"""Default physics variant value for all assets.
This is used to initialize the physics model."""
return str(carb.settings.get_settings().get_as_string(SETTINGS_DEFAULT_PHYSICS) or "RigidBody")
def __init__(self, asset: SimreadyAsset):
super().__init__(asset.name, asset.url, asset.thumbnail)
self.asset = asset
# Physics model
physics_values: List[str] = []
if self.asset.physics_variant:
physics_values = self.asset.physics_variant.get("Values", [])
default_value = AssetDetailItem.get_default_physics_value()
if physics_values:
# "None" is not in the physics variantset of the asset, so we need to add it "manually"
physics_values.insert(0, "None")
if AssetDetailItem.get_default_physics_value() not in physics_values:
default_value = physics_values[0]
self.physics_model = ComboBoxModel(physics_values, current_value=default_value)
else:
self.physics_model = None
@property
def physics(self) -> str:
return self.physics_model.current_value if self.physics_model else "None"
@physics.setter
def physics(self, value: str) -> None:
if self.physics_model:
self.physics_model.current_value = value
def filter(self, filter_words: Optional[List[str]]) -> bool: # noqa: A003
# Filter asset by not only name but also tag
if filter_words is None:
return True
else:
for word in filter_words:
# Each search word should be in name or tag
if word.lower() not in self.name.lower():
# No partial match in asset name, check tags
for tag in self.asset.tags:
if word.lower() in tag.lower():
break # Found a partial match in asset tag
else:
return False # No partial match in any of the tags, reject asset
else:
return True
class SimReadyBrowserModel(TreeFolderBrowserModel):
"""
Represent browser model for SimReady assets.
It show files(assets) by tags instead of folders.
"""
def __init__(self, *args, **kwargs):
super().__init__(
show_summary_folder=True,
*args,
**kwargs,
)
# Folder for labels, used to show as category item
self.__label_folders: Dict[str, FileSystemFolder] = {}
def create_folder_object(self, *args, **kwargs):
return AssetFolder(*args, **kwargs)
def get_category_items(self, item: FolderCollectionItem) -> List[FolderCategoryItem]:
# Traverse root folder if necessary
for root_folder in self._root_folders:
# Not in cache
if not root_folder.prepared:
# No traversed
self.start_traverse(root_folder)
# Create category item from labels
category_items: List[FolderCategoryItem] = []
root_folders = [folder for folder in self.__label_folders.values() if folder.name.find("/") < 0]
summary_count = 0
for folder in root_folders:
if folder not in self._folder_cache:
self._folder_cache[folder] = self._create_folder_category_item(folder)
category_items.append(self._folder_cache[folder])
summary_count += self._folder_cache[folder].count
self.sort_items(category_items)
if self._show_summary_folder:
category_items.insert(0, CategoryItem(self.SUMMARY_FOLDER_NAME, summary_count))
return category_items
def get_detail_items(self, item: CategoryItem) -> List[FileDetailItem]:
"""Override to get list of detail items"""
if item.name == self.SUMMARY_FOLDER_NAME:
return self._get_summary_detail_items()
else:
# List files in item folder
detail_items = self._get_folder_detail_items(item.folder)
for child in item.children:
detail_items += self.get_detail_items(child)
self.sort_items(detail_items)
return detail_items
def get_drag_mime_data(self, item: AssetDetailItem = None) -> str:
if item:
drag_data = {"url": item.url, "variants": {}}
if item.physics:
drag_data["variants"]["PhysicsVariant"] = item.physics if item.physics != "None" else ""
return SIMREADY_DRAG_PREFIX + json.dumps(drag_data)
return super().get_drag_mime_data(item)
def execute(self, item: DetailItem) -> None:
drag_mime_data = self.get_drag_mime_data(item)
add_asset_from_drag(drag_mime_data)
def _on_folder_traversed(self, folder: AssetFolder, loading_completed=True, updated: bool = True) -> None:
"""
Folder traversed,
- Update tags and view
- Save data to cache
"""
carb.log_info(f"Traverse completed: {folder.url}, {loading_completed}")
if updated and folder in self._root_folders:
# Update tags
self._update_labels(folder)
# Refresh view
self.folder_changed(None)
def _update_labels(self, folder: AssetFolder) -> None:
for label, assets in folder.asset_by_labels.items():
if not label:
continue
new_folder = False
if label not in self.__label_folders:
self.__label_folders[label] = AssetFolder(label, label)
new_folder = True
self.__label_folders[label].files.extend(assets)
# Clean cache to re-generate category items
self._folder_cache[self.__label_folders[label]] = None
if new_folder:
# Create parent label folders
while True:
pos = label.rfind("/")
if pos > 0:
parent_label = label[:pos]
if parent_label not in self.__label_folders:
self.__label_folders[parent_label] = AssetFolder(parent_label, parent_label)
folder = self.__label_folders[label]
if folder not in self.__label_folders[parent_label].sub_folders:
self.__label_folders[parent_label].sub_folders.append(folder)
self.sort_items(self.__label_folders[parent_label].sub_folders)
label = parent_label
else:
break
def _get_summary_detail_items(self) -> List[FileDetailItem]:
# List all assets from label folder
root_folders = [folder for folder in self.__label_folders.values() if folder.name.find("/") < 0]
return list(chain.from_iterable([self.get_detail_items(self._folder_cache[f]) for f in root_folders]))
def create_detail_item(self, asset: SimreadyAsset) -> FileDetailItem:
"""
Create detail item(s) from a file.
A file may include multi detail items.
Args:
file (BrowserFile): File object to create detail item(s)
"""
return AssetDetailItem(asset)
| 8,686 | Python | 38.666666 | 110 | 0.605918 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_widget.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
from typing import List, Optional
import omni.kit.app
from omni.kit.browser.core import BrowserSearchBar
from omni.kit.browser.folder.core import FolderCategoryItem
from omni.kit.widget.searchfield import SearchField
from .browser_with_property_widget import BrowserWithPropertyWidget
class SimReadyBrowserWidget(BrowserWithPropertyWidget):
def __init__(self, *args, **kwargs):
self.__keep_search_words = False
super().__init__(*args, multiple_drag=True, **kwargs)
@property
def search_field(self) -> SearchField:
return self._search_bar._search_field
@property
def category_all(self) -> FolderCategoryItem:
return self._browser_model.get_item_children(self._browser_model._root_collection_item)[0]
def _build_search_bar(self):
# Hide gear button in search bar
self._search_bar = BrowserSearchBar(options_menu=None, style=self._extra_ui_style)
def _build_browser_widget(self):
super()._build_browser_widget()
# Here hack the filter function to remove category labels before filter
self._browser_widget.filter_details = self._filter_details
def _on_category_selection_changed(self, category_item: FolderCategoryItem) -> None:
# Set search words with labels from selected category
if category_item is None:
return
if self.__keep_search_words:
# Here selected category is changed along with search word
self.__keep_search_words = False
async def __delay_refresh_details_async():
await omni.kit.app.get_app().next_update_async()
self.__refresh_detail_view(self.search_field.search_words)
asyncio.ensure_future(__delay_refresh_details_async())
else:
# Here user click category treeview and select a category
self.search_field.search_words = self._get_search_words_from_category(category_item)
self.search_field.suggestions = self._get_tags_from_category(category_item)
def _filter_details(self, filter_words: Optional[List[str]]):
"""
Filter detail items.
Args:
filter_words: A string list to filter detail items. None means filtering nothing.
"""
async def __select_category_all_async() -> None:
await omni.kit.app.get_app().next_update_async()
self.__keep_search_words = True
self.category_selection = [self.category_all]
if filter_words:
# Check if all labels of current selected category in search words
# If yes, keep searching in selected category
# If no, change selected category to ALL then search
search_labels = set([w.lower() for w in filter_words])
selected_category = self.category_selection[0] if self.category_selection else None
if selected_category and selected_category != self.category_all:
selected_labels = set(l.lower() for l in self._get_search_words_from_category(selected_category))
if not selected_labels or not selected_labels.issubset(search_labels):
asyncio.ensure_future(__select_category_all_async())
return
for category_item in self.category_selection:
words = self._get_search_words_from_category(category_item)
filter_words = [w for w in filter_words if w not in words]
else:
# No search words, always select ALL
selected_category = self.category_selection[0] if self.category_selection else None
if selected_category != self.category_all:
asyncio.ensure_future(__select_category_all_async())
return
self.__refresh_detail_view(filter_words)
def __refresh_detail_view(self, filter_words: Optional[List[str]]):
if self._browser_widget._detail_view is not None:
detail_view = self._browser_widget._detail_view
detail_view.filter(filter_words)
# Refresh suggestion words in search bar from results
async def __refresh_suggestions_async():
await omni.kit.app.get_app().next_update_async()
tags = []
count = 0
for item in detail_view._delegates:
if detail_view._delegates[item].visible:
tags.extend(list(item.asset.tags))
count += 1
tags = list(set(tags))
tags.sort()
if True:
self.search_field.suggestions = tags
# TODO: Workaround for issue in omni.kit.widget.searchfield updated.
if self.search_field._suggest_window:
self.search_field._suggest_window.suggestions = tags
asyncio.ensure_future(__refresh_suggestions_async())
def _get_search_words_from_category(self, category_item: FolderCategoryItem) -> List[str]:
# Get preset search words from category
if category_item.name == self._browser_model.SUMMARY_FOLDER_NAME:
return []
else:
# Use folder name (label) as preset search words
return category_item.folder.name.split("/")
def _get_tags_from_category(self, category_item: FolderCategoryItem) -> List[str]:
if category_item.name == self._browser_model.SUMMARY_FOLDER_NAME:
tags = []
for item in self._browser_model.get_category_items(self._browser_model._root_collection_item):
if item.name != self._browser_model.SUMMARY_FOLDER_NAME:
tags.extend(list(item.folder.asset_by_tags.keys()))
tags = list(set(tags))
else:
tags = list(category_item.folder.asset_by_tags.keys())
tags.sort()
return tags
| 6,350 | Python | 43.725352 | 113 | 0.632598 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/viewport_drop_delegate.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import json
from typing import Tuple
import omni.kit.commands
import omni.usd
from omni.kit.viewport.window.dragdrop.usd_file_drop_delegate import UsdFileDropDelegate
from pxr import Usd
from .actions import configure_prim
from .style import SIMREADY_DRAG_PREFIX
class SimReadyDragDropObject(UsdFileDropDelegate):
"""
SimReady drop delegate based on UsdFileDropDelegate in omni.kit.viewport.window with variants setting.
"""
def __init__(self):
self._usd_prim_droped = None
super().__init__()
def drop_should_disable_preview(self):
return True
def get_raw_data(self, drop_data: dict) -> dict:
return json.loads(drop_data.get("mime_data")[len(SIMREADY_DRAG_PREFIX) :])
def get_url(self, drop_data: dict):
return self.get_raw_data(drop_data)["url"]
def add_reference_to_stage(self, usd_context, stage: Usd.Stage, url: str) -> Tuple[str, Usd.EditContext, str]:
(self._usd_prim_droped, edit_context, relative_url) = super().add_reference_to_stage(usd_context, stage, url)
return (self._usd_prim_droped, edit_context, relative_url)
def accepted(self, drop_data: dict):
mime_data = drop_data["mime_data"]
self._usd_prim_droped = None
return mime_data.startswith(SIMREADY_DRAG_PREFIX)
def dropped(self, drop_data: dict):
super().dropped(drop_data)
# Additionaly, set variants if required
usd_prim_droped = drop_data.get("usd_prim_droped") or self._usd_prim_droped
if usd_prim_droped is not None:
raw_drop_data = self.get_raw_data(drop_data)
variants = raw_drop_data.get("variants", {})
if variants:
context_name = drop_data.get("usd_context_name", "")
stage = omni.usd.get_context(context_name).get_stage()
if stage:
configure_prim(stage, usd_prim_droped, variants)
return
| 2,369 | Python | 37.225806 | 117 | 0.678345 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/window.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ui as ui
from .browser_delegate import SimReadyDetailDelegate
from .browser_model import SimReadyBrowserModel
from .browser_widget import SimReadyBrowserWidget
SETTING_COLLECTION_ROOTS = "/exts/omni.simready.explorer/folders"
SIMREADY_EXPLORER_NAME = "SimReady Explorer"
class SimReadyBrowserWindow(ui.Window):
"""
Represent a window to show SimReady Assets
"""
def __init__(self, visible=True):
super().__init__(SIMREADY_EXPLORER_NAME, visible=visible)
self.frame.set_build_fn(self._build_ui)
self._browser_model = SimReadyBrowserModel(setting_folders=SETTING_COLLECTION_ROOTS)
self._delegate = None
self._widget = None
# Dock it to the same space where Content is docked.
self.deferred_dock_in("Content")
def destroy(self):
if self._widget:
self._widget.destroy()
if self._delegate:
self._delegate.destroy()
super().destroy()
def _build_ui(self):
self._delegate = SimReadyDetailDelegate(self._browser_model)
with self.frame:
with ui.VStack(spacing=15):
self._widget = SimReadyBrowserWidget(
self._browser_model,
detail_delegate=self._delegate,
)
| 1,726 | Python | 32.211538 | 92 | 0.680765 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/browser_property_view.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Dict, List, Optional
from omni import ui
from omni.kit.browser.folder.core import FileDetailItem
from omni.kit.window.property.style import get_style
from .browser_property_delegate import BrowserPropertyDelegate
from .style import PROPERTY_STYLES
class BrowserPropertyView:
"""
View to show properties of an item from the browser.
This view represents a container (frame) into which the delegates
registered with class BrowserPropertyDelegate will be added.
"""
def __init__(self):
self.__delegate_frames: Dict[BrowserPropertyDelegate, ui.Frame] = {}
self._build_ui()
def destroy(self):
pass
def _build_ui(self):
self._property_container = ui.VStack(style=get_style())
with self._property_container:
with ui.ScrollingFrame(
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
):
self.__delegate_container = ui.ZStack()
@property
def visible(self) -> bool:
return self._property_container.visible
@visible.setter
def visible(self, value) -> None:
self._property_container.visible = value
def show_detail_item(self, detail_item: Optional[FileDetailItem]):
"""Various aspects of an item's properties can be shown by different delegates.
The delegates that are registered with class BrowserPropertyDelegate will be
asked to show the properties of the item. The delegates that accept the item
will be shown, and the others will be hidden."""
delegates = BrowserPropertyDelegate.get_instances()
for delegate in delegates:
if delegate.accepted(detail_item):
# Show delegate
if delegate not in self.__delegate_frames:
with self.__delegate_container:
self.__delegate_frames[delegate] = ui.Frame(style=PROPERTY_STYLES)
self.__delegate_frames[delegate].visible = True
delegate.show(detail_item, self.__delegate_frames[delegate])
elif delegate in self.__delegate_frames:
# Hide delegate
self.__delegate_frames[delegate].visible = False
def show_detail_multi_item(self, detail_item_list: List[FileDetailItem]):
"""Properties of a list of items can be shown in various ways.
The delegates that are registered with class BrowserPropertyDelegate will be
asked to show the properties of the items. The delegates that accept the items
will be shown, and the others will be hidden."""
delegates = BrowserPropertyDelegate.get_instances()
for delegate in delegates:
if delegate.accepted_multiple(detail_item_list):
# Show delegate
if delegate not in self.__delegate_frames:
with self.__delegate_container:
self.__delegate_frames[delegate] = ui.Frame(style=PROPERTY_STYLES)
self.__delegate_frames[delegate].visible = True
delegate.show_multiple(detail_item_list, self.__delegate_frames[delegate])
elif delegate in self.__delegate_frames:
# Hide delegate
self.__delegate_frames[delegate].visible = False
| 3,832 | Python | 43.057471 | 90 | 0.666232 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/omni/simready/explorer/tests/browser_api_tests.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import pathlib
from typing import Dict, List, Optional
import carb.settings
import omni.kit.test
import omni.kit.undo
import omni.ui as ui
import omni.usd
from omni.simready.explorer import (
AssetFactory,
AssetType,
PropAsset,
SimreadyAsset,
SimReadyBrowserExtension,
SimReadyBrowserModel,
add_asset_to_stage,
add_asset_to_stage_using_prims,
find_assets,
get_average_position_of_prims,
get_instance,
)
from omni.simready.explorer.actions import find_inst_prim, is_physics_variant_enabled
from pxr import Gf, Sdf, Usd
TEST_DATA_DIR = pathlib.Path(__file__).parent.joinpath("data")
TEST_ASSETS_FILE = TEST_DATA_DIR.joinpath("asset_info.json")
# Declare and register new asset types
@AssetFactory.register(AssetType.GENERIC)
class GenericTestAsset(SimreadyAsset):
def __init__(self, raw_asset_data: Dict):
super().__init__(raw_asset_data)
self._type = AssetType.GENERIC
@classmethod
def is_asset_data(cls, raw_asset_data: Dict) -> bool:
asset_type: str = raw_asset_data.get("Asset Type", AssetType.UNKNOWN.name)
return asset_type.upper() == AssetType.GENERIC.name
@AssetFactory.register(AssetType.VEHICLE)
class VehicleTestAsset(SimreadyAsset):
def __init__(self, raw_asset_data: Dict):
super().__init__(raw_asset_data)
self._type = AssetType.VEHICLE
@classmethod
def is_asset_data(cls, raw_asset_data: Dict) -> bool:
asset_type: str = raw_asset_data.get("Asset Type", AssetType.UNKNOWN.name)
return asset_type.upper() == AssetType.VEHICLE.name
class BrowserApiTest(omni.kit.test.AsyncTestCase):
async def setUp(self) -> None:
super().setUp()
# Create a browser model with the test data directory as the only folder
# class FolderBrowserModel insists on prefixing the setting_folders with "persistent"
settings_asset_folder_roots = "/persistent/exts/omni.simready.explorer.tests/folders"
carb.settings.get_settings().set(settings_asset_folder_roots, [str(TEST_DATA_DIR)])
ui.Workspace.show_window("SimReady Explorer")
await omni.kit.app.get_app().next_update_async()
self._browser_model: SimReadyBrowserModel = SimReadyBrowserModel(setting_folders=settings_asset_folder_roots)
self._browser_model.folder_changed(None)
self._explorer: Optional[SimReadyBrowserExtension] = get_instance()
if self._explorer:
# Set the browser model on the SimReadyBrowserExtension instance
# so the SimReadyBrowserExtension.find_assets() API uses it.
self._original_browser_model = self._explorer.browser_model
self._explorer.window._browser_model = self._browser_model
else:
self._original_browser_model = None
def tearDown(self) -> None:
super().tearDown()
if self._original_browser_model and self._explorer:
# Restore original browser model
self._explorer.window._browser_model = self._original_browser_model
self._explorer = None
self._browser_model.destroy()
################################
# Tests for the find_assets() API
################################
async def test_filter_all_assets(self):
"""Test getting all assets through the SimReadyBrowserExtension.find_assets() API"""
self.assertIsNotNone(self._explorer)
assets = await find_assets()
self.assertEqual(len(assets), 6)
simready_assets = await find_assets(search_words=["SimReady"])
self.assertEqual(len(assets), len(simready_assets))
generic_asset_count = 0
vehicle_asset_count = 0
prop_asset_count = 0
# Verify the type and number of assets created
for a in assets:
self.assertIsNotNone(a)
if isinstance(a, GenericTestAsset):
generic_asset_count += 1
elif isinstance(a, VehicleTestAsset):
vehicle_asset_count += 1
elif isinstance(a, PropAsset):
# PropAsset is a type defined in the SimReady Explorer extension;
# the test data contains PropAssets, so we can verify that they work
prop_asset_count += 1
else:
self.fail("Unknown asset type")
self.assertEqual(prop_asset_count, 3)
self.assertEqual(vehicle_asset_count, 2)
self.assertEqual(generic_asset_count, 1)
async def test_filter_tool_assets(self):
"""Test looking for tools"""
assets = await find_assets(search_words=["tool"])
self.assertEqual(len(assets), 2)
async def test_filter_vehicles_assets(self):
"""Test looking for vehicles"""
assets = await find_assets(search_words=["truck"])
self.assertEqual(len(assets), 2)
for a in assets:
self.assertIsInstance(a, VehicleTestAsset)
# Test looking for vehicles differently
assets = await find_assets(search_words=["truck", "vehicle"])
self.assertEqual(len(assets), 2)
for a in assets:
self.assertIsInstance(a, VehicleTestAsset)
async def test_filter_partial_asset_tag_match(self):
"""Tests that search words are matched on partial asset tags"""
assets = await find_assets(search_words=["ti-tool"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], GenericTestAsset)
# Test looking for vehicles differently
assets = await find_assets(search_words=["motor", "vehicle"])
self.assertEqual(len(assets), 2)
for a in assets:
self.assertIsInstance(a, VehicleTestAsset)
async def test_filter_partial_asset_name_match(self):
"""Test that search words are matched on partial asset names"""
assets = await find_assets(search_words=["tool1"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], GenericTestAsset)
async def test_filter_case_insensitive_match(self):
"""Test that search words are case insensitive"""
assets = await find_assets(search_words=["ToOl1"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], GenericTestAsset)
assets = await find_assets(search_words=["PORT Unit"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], PropAsset)
async def test_filter_no_assets(self):
"""Test search that should return no assets"""
assets = await find_assets(search_words=["equipment", "vehicle"])
self.assertEqual(len(assets), 0)
# Test a filter that contains tags not in the database
assets = await find_assets(search_words=["this_tag_is_not_used", "neither_is_this_one"])
self.assertEqual(len(assets), 0)
#################################################
# Tests for accessing behavior list of assets API
#################################################
async def test_asset_behavior(self):
"""Test getting the behaviors of an asset"""
assets = await find_assets(search_words=["multi-tool"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], GenericTestAsset)
# omni.kit.debug.python.wait_for_client()
self.assertEqual(len(assets[0].behaviors), 2)
self.assertTrue("Features" in assets[0].behaviors)
self.assertEqual(assets[0].behaviors["Features"], ["Five", "Seven", "Nine"])
self.assertTrue("Colors" in assets[0].behaviors)
self.assertEqual(assets[0].behaviors["Colors"], ["Black", "Red", "Blue"])
###########################################
# Tests for the def add_asset_to_stage() API
###########################################
def check_variant(self, prim: Usd.Prim, variantset_name: str, variant_name: str) -> None:
"""Helper function to check the variant value of a prim"""
self.assertIsNotNone(prim)
vsets: Usd.VariantSet = prim.GetVariantSets()
vset: Usd.VariantSet = vsets.GetVariantSet(variantset_name)
self.assertIsNotNone(vset)
self.assertEqual(vset.GetVariantSelection(), variant_name)
def check_instanceable(self, prim: Usd.Prim, variants: Dict[str, str]) -> None:
"""Helper function to check the instanceable value of the 'inst' prim"""
self.assertIsNotNone(prim)
inst_prim: Usd.Prim = find_inst_prim(prim)
self.assertIsNotNone(inst_prim)
self.assertTrue(inst_prim.IsInstanceable())
def check_added_asset(self, stage: Usd.Stage, asset_prim_path: str, parent_prim: Usd.Prim):
"""Helper function to check that an asset was added to a stage"""
self.assertIsNotNone(stage)
self.assertIsNotNone(parent_prim)
self.assertTrue(asset_prim_path)
prim: Usd.Prim = stage.GetPrimAtPath(asset_prim_path)
self.assertIsNotNone(prim)
self.assertEqual(prim.GetParent(), parent_prim)
async def test_add_asset_to_stage(self):
"""Test adding an asset to a stage and setting its variant value"""
assets = await find_assets(search_words=["box_a01"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], PropAsset)
# No undo needed in a unit test
omni.kit.undo.begin_disabled()
# Create a test stage with a default prim called "World"
usd_context: omni.usd.UsdContext = omni.usd.get_context()
usd_context.new_stage()
stage: Usd.Stage = usd_context.get_stage()
self.assertIsNotNone(stage)
omni.kit.commands.execute("CreatePrim", prim_type="XFom", prim_path="/World", create_default_xform=True)
world: Usd.Prim = stage.GetPrimAtPath("/World")
stage.SetDefaultPrim(world)
res, cube = omni.kit.commands.execute("CreatePrim", prim_type="Cube", prim_path="/World/Cube")
cube: Usd.Prim = stage.GetPrimAtPath("/World/Cube")
self.assertIsNotNone(cube)
# Create the variant to set on the asset when adding it to the stage
variantset_name = "PhysicsVariant"
self.assertTrue(variantset_name in assets[0].behaviors)
variants: Dict[str, str] = {variantset_name: assets[0].behaviors[variantset_name][0]}
# Add the asset to the stage, under a specific prim
res, asset_prim_path = add_asset_to_stage(assets[0].url, parent_path=Sdf.Path("/World/Cube"), variants=variants)
self.assertTrue(res)
# Verify that the asset was added to the stage as expected
self.check_added_asset(stage, asset_prim_path, cube)
# Check that the asset has the correct variant set
self.check_variant(
stage.GetPrimAtPath(asset_prim_path), variantset_name, assets[0].behaviors[variantset_name][0]
)
# Check that the asset has the correct instanceable attribute set
self.check_instanceable(stage.GetPrimAtPath(asset_prim_path), variants)
usd_context.close_stage()
omni.kit.undo.end_disabled()
async def test_add_asset_to_stage_using_prims(self):
"""Test adding an asset to a stage using some existent prims, and setting its variant value"""
assets = await find_assets(search_words=["box_a01"])
self.assertEqual(len(assets), 1)
self.assertIsInstance(assets[0], PropAsset)
# No undo needed in a unit test
omni.kit.undo.begin_disabled()
# Create a test stage with a default prim called "World" and some cubes
usd_context: omni.usd.UsdContext = omni.usd.get_context()
usd_context.new_stage()
stage: Usd.Stage = usd_context.get_stage()
self.assertIsNotNone(stage)
omni.kit.commands.execute("CreatePrim", prim_type="XFom", prim_path="/World", create_default_xform=True)
world: Usd.Prim = stage.GetPrimAtPath("/World")
stage.SetDefaultPrim(world)
prim_paths: List[Sdf.Path] = []
for i in range(3):
prim_path: str = f"/World/Cube{i}"
prim_paths.append(Sdf.Path(prim_path))
omni.kit.commands.execute("CreatePrim", prim_type="Cube", prim_path=prim_path)
omni.kit.commands.execute("TransformPrimSRTCommand", path=prim_path, new_translation=Gf.Vec3d(0, 0, i * 20))
# Create the variant to set on the asset when adding it to the stage
variantset_name = "PhysicsVariant"
self.assertTrue(variantset_name in assets[0].behaviors)
variants: Dict[str, str] = {variantset_name: assets[0].behaviors[variantset_name][0]}
# Add the asset to the stage, using the prims created above, without replacing them
res, asset_prim_path = add_asset_to_stage_using_prims(
usd_context, stage, assets[0].url, variants=variants, replace_prims=False, prim_paths=prim_paths
)
self.assertTrue(res)
# Verify that the asset was added to the stage as expected
self.check_added_asset(stage, asset_prim_path, world)
# Check that the asset has the correct variant set
self.check_variant(
stage.GetPrimAtPath(asset_prim_path), variantset_name, assets[0].behaviors[variantset_name][0]
)
# Check that the asset has the correct instanceable attribute set
self.check_instanceable(stage.GetPrimAtPath(asset_prim_path), variants)
# Verify that the cubes were not replaced
for i in range(3):
prim_path: str = f"/World/Cube{i}"
prim: Usd.Prim = stage.GetPrimAtPath(prim_path)
self.assertIsNotNone(prim)
self.assertTrue(prim.IsValid())
self.assertEqual(prim.GetTypeName(), "Cube")
self.assertEqual(prim.GetParent(), world)
self.assertEqual(prim.GetAttribute("xformOp:translate").Get(), Gf.Vec3d(0, 0, i * 20))
# Verify that the newly added asset is at the correct position
position: Gf.Vec3d = get_average_position_of_prims([stage.GetPrimAtPath(path) for path in prim_paths])
self.assertEqual(stage.GetPrimAtPath(asset_prim_path).GetAttribute("xformOp:translate").Get(), position)
# Add the asset to the stage, using the prims created above, replacing them
res, asset_prim_path = add_asset_to_stage_using_prims(
usd_context, stage, assets[0].url, variants=variants, replace_prims=True, prim_paths=prim_paths
)
# Verify that the asset was added to the stage as expected
self.assertTrue(res)
self.check_added_asset(stage, asset_prim_path, world)
# Check that the asset has the correct variant set
self.check_variant(
stage.GetPrimAtPath(asset_prim_path), variantset_name, assets[0].behaviors[variantset_name][0]
)
# Check that the asset has the correct instanceable attribute set
self.check_instanceable(stage.GetPrimAtPath(asset_prim_path), variants)
# Verify that the newly added asset's position is the average of the replaced cubes
self.assertEqual(stage.GetPrimAtPath(asset_prim_path).GetAttribute("xformOp:translate").Get(), position)
# Verify that the cubes are not in the stage
for i in range(3):
self.assertFalse(stage.GetPrimAtPath(f"/World/Cube{i}").IsValid())
usd_context.close_stage()
omni.kit.undo.end_disabled()
| 15,779 | Python | 44.872093 | 120 | 0.651879 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/samples/simready_explorer_api_sample.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# The following line is used to mark the start of the code that is to be pulled into the documentation
# example-begin simready_explorer_api_sample
import asyncio
from typing import List, Tuple
import omni.kit.app
import omni.simready.explorer as sre
import omni.usd
from pxr import Gf, Sdf, Usd
async def main():
# 1. Find all residential wooden chair assets.
# We use multiple search terms, some will be matched in the tags, others in the asset names
assets = await sre.find_assets(search_words=["residential", "chair", "wood"])
print(f"Found {len(assets)} chairs")
# 2. Prepare to configure the assets
# All SimReady Assets have a Physics behavior, which is implemented as a
# variantset named PhysicsVariant. To enable rigid body physics on an asset,
# this variantset needs to be set to "RigidBody".
variants = {"PhysicsVariant": "RigidBody"}
# 3. Add all assets found in step (1) to the current stage as a payload
added_prim_paths: List[Sdf.Path] = []
for i, asset in enumerate(assets):
pos = -200 + 200 * i
res, prim_path = sre.add_asset_to_stage(
asset.main_url, position=Gf.Vec3d(pos, 0, -pos), variants=variants, payload=True
)
if res:
print(f"Added '{prim_path}' from '{asset.main_url}'")
# 4. Find an ottoman
assets = await sre.find_assets(search_words=["ottoman"])
print(f"Found {len(assets)} ottomans")
# 5. Replace the first chair with an ottoman
if assets and added_prim_paths:
usd_context: omni.usd.UsdContext = omni.usd.get_context()
stage: Usd.Stage = usd_context.get_stage() if usd_context else None
await omni.kit.app.get_app().next_update_async()
res, prim_path = sre.add_asset_to_stage_using_prims(
usd_context,
stage,
assets[0].main_url,
variants=variants,
replace_prims=True,
prim_paths=[added_prim_paths[0]],
)
if res:
print(f"Replaced assets '{added_prim_paths[0]}' with '{prim_path}' from '{assets[0].main_url}'")
asyncio.ensure_future(main())
# example-end simready_explorer_api_sample
| 2,607 | Python | 39.123076 | 108 | 0.678174 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/docs/CHANGELOG.md | Changelog
=========
This document records all notable changes to ``omni.simready.explorer`` extension.
This project adheres to `Semantic Versioning <https://semver.org/>`_.
## [1.0.26] - 2023-06-20
- OM-95538: Don't turn off the instanceable attribute on assets with PhysicsVariant set to RigidBody when drag-and-dropping them
into the stage
## [1.0.25] - 2023-06-02
- OM-96886: Update screenshot used in extension registry
## [1.0.24] - 2023-06-01
- OM-96886: Update badge on SimReady asset thumbnails
## [1.0.23] - 2023-05-30
- OM-95406: Set default physics behavior of all assets to "RigidBody"
## [1.0.22] - 2023-05-17
- OM-94398: Removed superfluous print statement from class PropAsset constructor
- OM-94290: Normalized asset paths to use single backslahs ('/') as directory separators
## [1.0.21] - 2023-04-25
- OM-90516: Exposed Collect command in asset right-click menu
## [1.0.20] - 2023-04-24
- OM-83563: Created developer docs and tweaked APIs
## [1.0.19] - 2023-04-20
- Get dropped prim path when preview disabled
## [1.0.18] - 2023-04-14
- OM-90136: Disable preview when drag and drop into viewport
## [1.0.17] - 2023-04-13
- OM-90440: Update UI style of property widget to be same as other property widgets
## [1.0.16] - 2023-03-24
OM-87776: Assets are browsed on AWS S3 staging
- List of tags in asset details window resizes to fit the window
## [1.0.15] - 2023-03-24
OM-87028: "Replace Current Selection" doesn't apply to multiple asset selection
- Fix leak of SimReadyDetailDelegate instance
## [1.0.14] - 2023-03-22
OM-87028: Right-click doesn't select assets, and right-click menu works with multiple assets selected
## [1.0.13] - 2023-03-21
- OM-87026: Simple message displayed in property window when multiple assets selected
- OM-87032: Disabled tooltips since they were interfering with the right click menu
## [1.0.12] - 2023-03-18
- OM-85649: support of lazy load workflow in Create
## [1.0.11] - 2023-03-17
- OM-86101: Multiple drag items into viewport or stage window
## [1.0.10] - 2023-03-15
- OM-86315: Turn off instanceable attribute on _inst prim when adding an asset with Physics on, otherwise turn instanceable attribute on.
- Fixed issue with physics badge update on thumbnails, where the badge would be visible in the assets detail view even if physics would be off (None)
## [1.0.9] - 2023-03-13
- OM-86077: Added Open, Open with Payloads Disabled, Add at Selection, Copy URL Link right-click menu commands
- OM-85906: Renamed menu and tab to "SimReady Explorer"
- OM-86315: Set PhysicsVariant to None by default
## [1.0.8] - 2023-03-08
- OM-84838: Added API for listing an asset's behaviors, and adding an asset to the current stage
## [1.0.7] - 2023-03-07
- OM-83352: Updated asset property panel
## [1.0.6] - 2023-03-02
- OM-84829: Fixed searching against tags with spaces (multi-word tags)
## [1.0.5] - 2023-02-28
- OM-84243: Always search all assets
- OM-84245: Update tag list in search bar on search results
## [1.0.4] - 2023-02-16
- OM-66968: API for filtering assets
- Updated asset database; now includes 539 assets
## [1.0.3] - 2023-02-15
- OM-52823: The explorer looks for the asset database (asset_info.json) on omniverse://simready.ov.nvidia.com/Projects/
- Updated asset database; now includes 487 assets
## [1.0.2] - 2023-02-13
- Assets can be dropped onto the stage from the asset browser (requires omni.kit.widget.stage v2.7.24 which has not been released yet)
- Physics is enabled by default
- New Physx thumbnail badge is displayed when physics is enabled
- Updated asset database; now includes 317 assets (304 are simready)
- OM-81970: Open property panel by default
- OM-81973: Center widgets in property panel
- OM-81977: SimReady Explorer is docked when it starts
This required to not show the window after startup. Show from menu "Window/Browsers/Sim Ready".
- OM-81962: Remove gear button from search bar
omni.simready.explorer-1.0.2 requires the following extensions:
- omni.kit.widget.stage-2.7.24 (not available as of 2023-02-13)
- omni.kit.browser.core-2.2.2
- omni.kit.browser.folder.core-1.7.5
- omni.kit.widget.searchfield-1.1.0
## [1.0.1] - 2023-02-09
- Assets can be pre-configured with right-click menu options, or in the properties view.
- Drag-and-drop of pre-configured asset into the viewport
- Asset properties panel shows the following properties name, QCode, thumbnail, tags, physics behavior
- Asset properties panel updates when selected asset changes
- Supporting 158 assets on omniverse://simready.ov.nvidia.com/Projects/simready_content/common_assets/props/
## [1.0.0] - 2023-02-08
- Hierarchical Asset Categories are displayed
- Asset thumbnails are displayed, with tooltip showing asset name, dimensions and tags
- Asset property panel is displayed when user enables it through the top-right icon
- Search bar is populated with categories as tags when user clicks on a category in the Asset Category panel, and the assets are filtered
| 4,952 | Markdown | 40.621848 | 149 | 0.743336 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/docs/README.md | # Simready Explorer Extension [omni.simready.explorer]
A browser for SimReady Assets. | 86 | Markdown | 27.999991 | 54 | 0.813953 |
DigitalBotLab/App/kit-app/source/extensions/omni.simready.explorer/docs/Overview.md | # SimReady Explorer Developer Guide
## Overview
**SimReady Assets** are the building blocks of industrial virtual worlds. They are built on top of the Universal Scene Description (USD) platform and have accurate physical properties, behaviors, and connected data streams. They are comprised on multiple files such as USD layers, material description files (.mdl), textures, thumbnails, etc.
The **SimReady Explorer extension** allows for working with libraries of SimReady Assets, by enabling users to:
- Find assets by words matched against tags and asset names
- Configure asset behavior, appearance, etc in the browser, before they are assembled into a scene
- Assemble assets into virtual worlds through Omniverse applications such as USD Composer, DriveSim, Isaac Sim, etc.
Through the [**SimReady Explorer API**](./API.rst), developers can:
- Search for assets by matching search words against tags and asset names
- Configure various aspects of assets, as defined by the asset class
- Add the configured assets to a stage
## Finding assets
SimReady Assets have a name, tags, and labels. The labels are derived from the [Wiki Data](https://www.wikidata.org) database, and as such are backed by QCodes. Note that the labels and the QCode of an asset are also part of its list of tags. Both tags and labels can consist of multiple, space separated words.
Finding SimReady Assets is like querying a database. A list of search terms is each matched against the asset's name and tags. Partial matches of the search term in names, tags, and labels are also returned. For example, the search term "wood" would match names such as "recycledwoodpallete" and "crestwood_sofa".
```{eval-rst}
See the :doc:`find_assets() <omni.simready.explorer/omni.simready.explorer.find_assets>` API for details on how to programmatically search for assets.
```
## Configuring assets and adding them to a stage
The `find_assets()` API returns a list of `SimreadyAsset` objects, which can be added to the current stage with the desired behaviors enabled. The behaviors supported currently are the USD variant sets exposed by the asset.
When adding assets to a stage, they can be inserted at a given location, can be parented to another prim, or can be added at or even replace a list of prims.
```{eval-rst}
See the :doc:`add_asset_to_stage() <omni.simready.explorer/omni.simready.explorer.add_asset_to_stage>`, and the :doc:`add_asset_to_stage_using_prims() <omni.simready.explorer/omni.simready.explorer.add_asset_to_stage_using_prims>` APIs for details on how to programmatically add assets to the current stage.
```
# SimReady Explorer API Tutorial
The following code illustrates how to find assets, add them to the current scene, and even replace some of them with other assets. The code below can be executed from the Script Editor of any Omniverse application.
```{eval-rst}
.. literalinclude:: ../../../../source/extensions/omni.simready.explorer/samples/simready_explorer_api_sample.py
:language: python
:start-after: example-begin simready_explorer_api_sample
:end-before: example-end simready_explorer_api_sample
:dedent:
```
## Future enhancements
The SimReady Explorer API will be extended in the near future to allow defining custom asset classes with specific behaviors.
| 3,289 | Markdown | 68.999999 | 342 | 0.779264 |
DigitalBotLab/App/kit-app/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/__init__.py | # run startup tests first
from .test_app_startup import *
from .test import *
from .test_state_manager import *
| 112 | Python | 21.599996 | 33 | 0.75 |
DigitalBotLab/AssetProvider/README.md | # <img src="Images/logo.png" alt="Logo" width="50" height="50"> Digital Bot Lab: AssetProvider


# Experience thousands of robots using Nvidia Omniverse

The Digital Bot Lab's Asset Provider Extension is a cutting-edge solution designed to seamlessly connect our extensive digital robot collection from `ROS` with the powerful `NVIDIA Omniverse platform`. With our connector, users can effortlessly import digital robots in `.usd` format, enabling them to leverage the full potential of Omniverse applications.
## 1. Get Started
Experience the future of robotics with the Digital Bot Lab's Insiderobo Connector, where the connection between digital robots and Omniverse becomes effortless and transformative.

### 1.1 Install Omniverse USD Composer
This project is currently targeted for `Omniverse USD Composer`. Please follow the instructions to install it first:
[USD Composer Overview](https://docs.omniverse.nvidia.com/composer/latest/index.html#:~:text=NVIDIA%20Omniverse%E2%84%A2%20USD%20Composer,is%20based%20on%20Pixar's%20USD.)
### 1.2 Import the extension
To install the extension to Omniverse USD Composer:
First, clone the respository
```bash
git clone https://github.com/DigitalBotLab/AssetProvider
```
And now Open the `Omniverse USD Composer`, go to `Menu Bar` -> `Window` -> `Extensions` -> `Options` -> `Settings`
Add your `<path_to_this_repository>/AssetProvider/dbl-exts-asset/exts` to the settings

### 1.3 Enable the extension

At the `Third Party` filter, enable our extension.
Now, after opening the `Window` -> `Asset Store (Beta)` Tab, you can see this extension by filter the asset provider into `DIGITAL BOT LAB`.
## 2. Format: USD
Our digital robots are meticulously crafted and well-configured in .usd format, complete with physics, rigid bodies, and joints. This ensures a realistic and immersive experience when interacting with the robots within Omniverse.
## 3. ROS <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="Ros" width="70" height="70">
The Insiderobo Connector is built upon the foundation of the Robot Operating System (ROS), an open-source framework that empowers researchers and developers to easily build and reuse code across various robotics applications. This integration allows for enhanced collaboration, accelerated development, and seamless integration of digital robots into the Omniverse ecosystem.
## 4. License
Our project adheres to the Robot Operating System (ROS) framework, which enables us to develop and integrate robotic systems efficiently. We are proud to announce that our project is released under the BSD 3.0 license. This license ensures that our software is open-source, allowing users to freely use, modify, and distribute it while maintaining the necessary attribution and disclaimer requirements. By embracing ROS and the BSD 3.0 license, we aim to foster collaboration and innovation within the robotics community.
| 3,228 | Markdown | 50.253967 | 521 | 0.784077 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/config/extension.toml | [package]
authors = ["Digital Bot Lab"]
category = "services"
changelog = "docs/CHANGELOG.md"
version = "1.0.0"
title = "Digital Bot Lab Asset Provider"
description="Asset provider for Evermotion"
readme = "docs/README.md"
keywords = ["asset", "provider", "robot", "search", "digitalbotlab"]
icon = "data/logo.png"
preview_image = "data/preview.png"
repository = ""
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.assetprovider.digitalbotlab"
| 605 | TOML | 27.857142 | 105 | 0.722314 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/config/extension.gen.toml | [package]
exchange = true
partner = true | 40 | TOML | 12.666662 | 15 | 0.75 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/constants.py | IN_RELEASE = True
SETTING_ROOT = "/exts/omni.assetprovider.template/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
STORE_URL = "http://api.digitalbotlab.com/api/omniverse/assets" if IN_RELEASE else "http://localhost:8000/api/omniverse/assets"
THUMBNAIL_URL = "http://api.digitalbotlab.com/image/" if IN_RELEASE else "http://localhost:8000/image/"
DBL_ASSETPROVIDER_INTRO = "\n The Digital Bot Lab's Insiderobo Connector is \n a cutting-edge solution designed to seamlessly \n connect our extensive digital robot collection \n with the powerful NVIDIA Omniverse platform. \n\n Learn more about us: https://digitalbotlab.com/ \n Learn more about Omniverse: https://www.nvidia.com/en-us/omniverse/ \n Learn more about Insiderobo Connector: https://digitalbotlab.com/omniverse/asset-provider \n \n Contact us: [email protected]" | 836 | Python | 75.090902 | 482 | 0.773923 |
DigitalBotLab/AssetProvider/dbl-exts-asset/exts/omni.assetprovider.digitalbotlab/omni/assetprovider/digitalbotlab/extension.py | import importlib
import carb
import carb.settings
import carb.tokens
import omni.ui as ui
import omni.ext
from omni.services.browser.asset import get_instance as get_asset_services
from .model import DBLAssetProvider
from .constants import SETTING_STORE_ENABLE, IN_RELEASE, DBL_ASSETPROVIDER_INTRO
import aiohttp
import asyncio
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniAssetproviderDigitalbotlabExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.assetprovider.digitalbotlab] omni assetprovider digitalbotlab startup")
self._asset_provider = DBLAssetProvider()
self._asset_service = get_asset_services()
self._asset_service.register_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, True)
print("what", carb.settings.get_settings().get(SETTING_STORE_ENABLE))
self._window = ui.Window("Digital Bot Lab: AssetProvider", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.ImageWithProvider(
f"{EXTENSION_FOLDER_PATH}/data/logo.png",
width=30,
height=30,
)
ui.Label("Introduction:", height = 20)
#intro_field = ui.StringField(multiline = True, readonly = True)
model = ui.SimpleStringModel(DBL_ASSETPROVIDER_INTRO)
field = ui.StringField(model, multiline=True, readonly=True, height=200)
# intro_field.model.set_value()
with ui.VStack(visible= not IN_RELEASE):
ui.Button("debug_authenticate", height = 20, clicked_fn = self.debug_authenticate)
ui.Button("debug_token", height = 20, clicked_fn = self.debug_token)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
def on_shutdown(self):
print("[omni.assetprovider.digitalbotlab] omni assetprovider digitalbotlab shutdown")
self._asset_service.unregister_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, False)
self._asset_provider = None
self._asset_service = None
def debug_authenticate(self):
async def authenticate():
params = {"email": "[email protected]", "password": "97654321abc"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/auth/signin", json=params) as response:
self._auth_params = await response.json()
print("auth_params", self._auth_params)
self.token = self._auth_params["token"]
asyncio.ensure_future(authenticate())
def debug_token(self):
async def verify_token():
params = {"token": self.token, "asset": "test"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/omniverse/download", json=params) as response:
response = await response.json()
print("response", response)
asyncio.ensure_future(verify_token())
def debug(self):
print("debug")
STORE_URL = "http://localhost:8000/api/omniverse/assets"
params = {}
params["page"] = 1
async def search():
# Uncomment once valid Store URL has been provided
async with aiohttp.ClientSession() as session:
async with session.get(f"{STORE_URL}", params=params) as resp:
result = await resp.read()
result = await resp.json()
items = result
print("items", items)
asyncio.ensure_future(search()) | 4,414 | Python | 40.650943 | 119 | 0.614182 |
DigitalBotLab/Robots/README.md | # Robots
Repostory for all robots
| 34 | Markdown | 10.666663 | 24 | 0.794118 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/extension.py | import omni.ext
import omni.ui as ui
import omni.timeline
import omni.kit.app
import carb
from typing import Optional, List
import numpy as np
from pxr import Gf
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
from .kinova.kinova import Kinova
from .kinova.coffee_controller import CoffeeMakerController
from .kinova.numpy_utils import euler_angles_to_quat, quat_mul
# UI
from .ui.style import julia_modeler_style
from .ui.custom_multifield_widget import CustomMultifieldWidget
from .ui.custom_bool_widget import CustomBoolWidget
class ControlExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[control] control startup")
self.ext_id = ext_id
# set up fps limit
carb.settings.get_settings().set_float("/app/runLoops/main/rateLimitFrequency", 30)
carb.settings.get_settings().set_float("/app/runLoops/present/rateLimitFrequency", 30)
carb.settings.get_settings().set_bool("/rtx/ecoMode/enabled", True)
# ui
self._window = ui.Window("Robot control", width=300, height=300)
self._window.frame.style = julia_modeler_style
with self._window.frame:
with ui.VStack():
# ui.Button("Set Robot", height = 20, clicked_fn=self.set_robot)
ui.Line(height = 2)
ui.Button("Register Physics Event", height = 50, clicked_fn=self.register_physics_event)
with ui.HStack(height = 20):
ui.Label("Robot Prim Path:", width = 200)
self.robot_path_widget = ui.StringField(width = 300)
self.robot_path_widget.model.set_value("/World/kinova_gen3_7_hand/kinova")
with ui.HStack(height = 20):
self.server_widget = CustomBoolWidget(label="Connect to Server", default_value=False)
ui.Spacer(height = 9)
ui.Label("End Effector", height = 20)
with ui.HStack(height = 20):
self.ee_pos_widget = CustomMultifieldWidget(
label="Transform",
default_vals=[0, 0, 0],
height = 20,
)
ui.Spacer(height = 9)
with ui.HStack(height = 20):
self.ee_ori_widget = CustomMultifieldWidget(
label="Orient (Euler)",
default_vals=[90, 0.0, 90],
height = 20,
)
ui.Spacer(height = 9)
ui.Button("Update EE Target", height = 20, clicked_fn=self.update_ee_target)
ui.Button("Open/Close Gripper", height = 20, clicked_fn=self.toggle_gripper)
ui.Spacer(height = 9)
ui.Line(height = 2)
with ui.HStack(height = 20):
self.joint_read_widget = CustomMultifieldWidget(
label="Joint Angle (read only):",
sublabels=["j1", "j2", "j3", "j4", "j5", "j6", "j7"],
default_vals=[0.0] * 7,
read_only= True
)
with ui.HStack(height = 20):
self.ee_pos_read_widget = CustomMultifieldWidget(
label="EE Position(read only):",
sublabels=["x", "y", "z"],
default_vals=[0, 0, 0],
read_only= True
)
with ui.HStack(height = 20):
self.ee_ori_quat_read_widget = CustomMultifieldWidget(
label="EE Quaternion(read only):",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
read_only= True
)
# with ui.HStack(height = 20):
# self.ee_ori_euler_read_widget = CustomMultifieldWidget(
# label="EE Euler Rot(read only):",
# sublabels=["x", "y", "z"],
# default_vals=[0, 0, 0],
# read_only= True
# )
# vision part
ui.Spacer(height = 9)
ui.Line(height = 2)
ui.Button("Test vision", height = 20, clicked_fn = self.test_vision)
ui.Button("Draw vision", height = 20, clicked_fn = self.draw_vision)
ui.Button("Draw vision 2", height = 20, clicked_fn = self.draw_vision2)
ui.Spacer(height = 9)
ui.Line(height = 2)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
ui.Button("Debug2", height = 20, clicked_fn = self.debug2)
ui.Button("yh Debug", height = 20, clicked_fn = self.yuanhong_debug)
ui.Spacer(height = 9)
ui.Line(height = 2)
# robot
self.robot = None
self.controller = None
self.event_t = 0.0
# stream
self._is_stopped = True
self._tensor_started = False
def on_shutdown(self):
print("[control] control shutdown")
########################## events #######################################################
def register_physics_event(self):
print("register_physics_event")
# timeline
stream = omni.timeline.get_timeline_interface().get_timeline_event_stream()
self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event)
def _on_timeline_event(self, event):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
self._physics_update_sub = omni.physx.get_physx_interface().subscribe_physics_step_events(self._on_physics_step)
self._is_stopped = False
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._physics_update_sub = None
self._timeline_sub = None
self._is_stopped = True
self._tensor_started = False
self.robot = None
self.controller = None
def _can_callback_physics_step(self) -> bool:
if self._is_stopped:
return False
if self._tensor_started:
return True
self._tensor_started = True
self.set_robot()
return True
def _on_physics_step(self, dt):
self.event_t += dt # update time
if not self._can_callback_physics_step():
return
if self.controller:
# print("_on_physics_step")
self.controller.forward()
if self.event_t >= 1.0:
# update joint info
self.update_robot_ui()
self.event_t = 0.0
############################################# Robot #######################################
def update_ee_target(self):
print("update_ee_target")
if self.controller:
self.controller.update_event("move")
current_pos, current_rot = self.robot.end_effector.get_world_pose()
pos = [self.ee_pos_widget.multifields[i].model.as_float for i in range(3)]
rot = [self.ee_ori_widget.multifields[i].model.as_float for i in range(3)]
pos = np.array(current_pos) + np.array(pos)
rot = euler_angles_to_quat(rot, degrees=True)
# current_rot = np.array([current_rot[1], current_rot[2], current_rot[3], current_rot[0]])
# rot = quat_mul(current_rot, rot)
rot = np.array([rot[3], rot[0], rot[1], rot[2]])
print("updating controller ee target:", pos, rot)
self.controller.update_ee_target(pos, rot)
def set_robot(self):
print("set_robot")
# set robot
prim_path = self.robot_path_widget.model.as_string
self.robot = Kinova(prim_path = prim_path, name = "kinova_robot")
self.robot.initialize()
print("kinova_info", self.robot.num_dof)
print("kinova_gripper", self.robot.gripper._gripper_joint_num)
# set controller
self.controller = CoffeeMakerController("task_controller", self.robot, connect_server=self.server_widget.value)
def toggle_gripper(self):
print("Toggle Gripper")
if self.controller:
event = "open" if self.controller.event == "close" else "close"
self.controller.update_event(event)
######################### ui #############################################################
def update_robot_ui(self):
"""
read robot joint angles and update ui
"""
assert self.robot, "robot is not initialized"
joint_angles = self.robot.get_joint_positions()
joint_angles = [np.rad2deg(joint_angles[i]) for i in range(7)]
self.joint_read_widget.update(joint_angles)
self.ee_pos_read_widget.update(self.robot.end_effector.get_world_pose()[0])
rot_quat = self.robot.end_effector.get_world_pose()[1]
self.ee_ori_quat_read_widget.update(rot_quat)
# rot_euler = quat_to_euler_angles(rot_quat, degrees=True)
# print("rot_euler:", rot_euler)
# self.ee_ori_euler_read_widget.update(rot_euler[0])
def debug(self):
print("debug")
# if self.robot:
# self.controller.apply_high_level_action("pick_up_capsule")
# self.controller.apply_high_level_action("move_capsule_to_coffee_machine")
if self.robot:
self.controller.apply_high_level_action("pick_up_box")
def debug2(self):
print("debug2")
if self.robot:
# self.controller.apply_high_level_action("pick_up_capsule")
# self.controller.apply_high_level_action("move_capsule_to_coffee_machine")
# self.controller.apply_high_level_action("pick_up_papercup")
# self.controller.apply_high_level_action("open_coffee_machine_handle")
self.controller.apply_high_level_action("close_coffee_machine_handle")
self.controller.apply_high_level_action("press_coffee_machine_button")
# from omni.isaac.core.prims import XFormPrim
# from .kinova.utils import get_transform_mat_from_pos_rot
# stage = omni.usd.get_context().get_stage()
# base_prim = XFormPrim("/World/capsule")
# base_world_pos, base_world_rot = base_prim.get_world_pose()
# base_mat = get_transform_mat_from_pos_rot(base_world_pos, base_world_rot)
def yuanhong_debug(self):
# target_mat = get_transform_mat_from_pos_rot([-0.083, 0.43895, 0], [0.5] * 4)
# rel_mat = target_mat * base_mat.GetInverse()
# print("base_mat:", base_mat)
# print("target_mat:", target_mat)
# print("rel_mat:", rel_mat.ExtractTranslation(), rel_mat.ExtractRotationQuat())
print("yuanhong_debug")
if self.robot:
self.controller.apply_high_level_action("pick_up_papercup")
self.controller.apply_high_level_action("move_papercup_to_coffee_machine")
#obtain_robot_state = self.controller.obtain_robot_state()
# print("obtain_robot_state:", obtain_robot_state)
# from pxr import UsdGeom, Usd
# stage = omni.usd.get_context().get_stage()
# cup_prim = stage.GetPrimAtPath("/World/Simple_Paper_Cup")
# xformable = UsdGeom.Xformable(cup_prim)
# mat0 = xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
# pos = mat0.ExtractTranslation()
# print("cup pos:", pos)
pass
########################## vision ########################################################
def test_vision(self):
print("test_vision")
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url=None, vision_folder="I:\\Temp")
# self.vision_helper.get_image_from_webcam()
self.vision_helper.obtain_camera_transform(camara_path="/World/Camera")
t = self.vision_helper.camera_mat.ExtractTranslation()
print("camera offset", t)
foc = 1000
world_d = self.vision_helper.get_world_direction_from_camera_point(0, 0, foc, foc)
world_d= world_d.GetNormalized()
print("world_d:", world_d)
self.vision_helper.draw_debug_line(t, world_d)
self.vision_helper.get_hit_position(t, world_d, target_prim_path="/World/Desk")
# from omni.physx import get_physx_scene_query_interface
# t = carb.Float3(t[0], t[1], t[2])
# d = carb.Float3(world_d[0], world_d[1], world_d[2])
# get_physx_scene_query_interface().raycast_all(t, d, 100.0, self.report_all_hits)
# def report_all_hits(self, hit):
# stage = omni.usd.get_context().get_stage()
# from pxr import UsdGeom
# usdGeom = UsdGeom.Mesh.Get(stage, hit.rigid_body)
# print("hit:", hit.rigid_body, usdGeom.GetPrim().GetPath(), hit.position, hit.normal, hit.distance, hit.face_index)
# return True
def draw_vision(self):
# print("draw_vision")
# from omni.ui import scene as sc
# from omni.ui import color as cl
# from omni.kit.viewport.utility import get_active_viewport_window
# self._viewport_window = get_active_viewport_window()
# if hasattr(self, "scene_view"):
# self.scene_view.scene.clear()
# if self._viewport_window:
# self._viewport_window.viewport_api.remove_scene_view(self.scene_view)
# self.scene_view = None
# with self._viewport_window.get_frame(0):
# self.scene_view = sc.SceneView()
# self.scene_view.scene.clear()
# points_b = [[12500.0, 0, 0.0], [0.0, 0, 12500.0], [-12500.0, 0, 0.0], [-0.0, 0, -12500.0], [12500.0, 0, -0.0]]
# with self.scene_view.scene:
# transform = sc.Transform()
# # move_ges = MoveGesture(transform)
# with transform:
# for pt in points_b:
# sc.Curve([pt, [0, 0, 0]], thicknesses=[1.0], colors=[cl.green], curve_type=sc.Curve.CurveType.LINEAR)
# self._viewport_window.viewport_api.add_scene_view(self.scene_view)
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url="http://127.0.0.1:7860/run/predict",
vision_folder="I:\\Temp",
camera_prim_path="/World/Camera",
vision_model="fastsam")
self.vision_helper.capture_image(folder_path="I:\\Temp\\VisionTest", image_name="test")
return
def draw_vision2(self):
# print("draw_vision2")
from .vision.vision_helper import VisionHelper
self.vision_helper = VisionHelper(vision_url="http://127.0.0.1:7860/run/predict",
vision_folder="I:\\Temp",
camera_prim_path="/World/Camera",
vision_model="fastsam")
# self.vision_helper.capture_image(folder_path="I:\\Temp\\VisionTest", image_name="test")
# return
import cv2
import os
import numpy as np
from .vision.utils import find_bottom_point, find_left_point, get_projection, get_box_transform_from_point
img_path = None
print("os.listdir", os.listdir("I:\\Temp\\VisionTest"))
for item in os.listdir("I:\\Temp\\VisionTest"):
print("item:", item)
if item.endswith(".png") and item.startswith("test"):
img_path = os.path.join("I:\\Temp\\VisionTest", item)
break
assert img_path, "image not found"
print("img_path:", img_path)
image = cv2.imread(img_path)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = contours[0]
arclen = cv2.arcLength(contour, True)
# WARNING: 0.005 is a magic number
contour = cv2.approxPolyDP(contour, arclen*0.005, True)
cv2.drawContours(image, [contour], -1, (0, 255, 0), 2) # Green color, thickness 2
print("contour:", contour, len(contour))
# response_data = self.vision_helper.get_prediction_data("I:\\Temp\\0.jpg", "grey tea tower")
# print(response_data)
# response_data = {'data': ['[[[[736, 113]], [[608, 133]], [[591, 151]], [[590, 373]], [[620, 419]], [[646, 419]], [[741, 392]], [[790, 162]]]]'], 'is_generating': False, 'duration': 11.769976139068604, 'average_duration': 11.769976139068604}
# import json
# import numpy as np
# countour = json.loads(response_data["data"][0])
print("countour", contour)
points = np.array([p[0] for p in contour])
print("p0", points)
bottom_point = find_bottom_point(points)
left_point = find_left_point(points)
print("bottom_point", bottom_point)
image = cv2.circle(image, bottom_point, radius=10, color=(255, 0, 255), thickness=-1)
image = cv2.circle(image, left_point, radius=10, color=(255, 255, 0), thickness=-1)
cv2.imshow('Blue Contours', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#REFERENCE: Camera Calibration and 3D Reconstruction from Single Images Using Parallelepipeds
self.vision_helper.obtain_camera_transform(camara_path="/World/Camera")
camera_pos = self.vision_helper.camera_mat.ExtractTranslation()
print("camera offset", camera_pos)
foc = 910
bottom_d = self.vision_helper.get_world_direction_from_camera_point(bottom_point[0], 1080 - bottom_point[1], foc, foc)
bottom_d= bottom_d.GetNormalized()
print("bottom_d:", bottom_d)
left_d = self.vision_helper.get_world_direction_from_camera_point(left_point[0], 1080 - left_point[1], foc, foc)
left_d= left_d.GetNormalized()
print("left_d:", left_d)
self.vision_helper.draw_debug_line(camera_pos, left_d, length=10)
# self.vision_helper.get_hit_position(t, world_d, target_prim_path="/World/Desk")
box_transform, box_rotation = get_box_transform_from_point(camera_pos, bottom_d, left_d, affordance_z = -0.02)
print("box_transform:", box_transform)
print("box_rotation:", box_rotation)
stage = omni.usd.get_context().get_stage()
stage.DefinePrim("/World/box", "Xform")
mat = Gf.Matrix4d().SetScale(1) * \
Gf.Matrix4d().SetRotate(box_rotation) * \
Gf.Matrix4d().SetTranslate(Gf.Vec3d(box_transform[0], box_transform[1], box_transform[2]))
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/box",
new_transform_matrix=mat,
)
| 19,930 | Python | 41.862366 | 251 | 0.559508 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/rtc/test.py | import omni
import importlib
import carb.settings
class RTCTest():
def __init__(self):
manager = omni.kit.app.get_app().get_extension_manager()
self._webrtc_was_enabled = manager.is_extension_enabled("omni.services.streamclient.webrtc")
if not self._webrtc_was_enabled:
manager.set_extension_enabled_immediate("omni.services.streamclient.webrtc", True)
# self._webrtc_api = importlib.import_module("omni.physics.tensors")
def test_main(self):
from omni.services.client import AsyncClient
from omni.services.streamclient.webrtc.services.browser_frontend import example_page, redirect_url, router_prefix
frontend_port = carb.settings.get_settings().get_as_int("exts/omni.services.transport.server.http/port")
frontend_prefix = f"http://localhost:{frontend_port}{router_prefix}"
self._redirect_page_path = f"{frontend_prefix}{example_page}"
self._client_page_path = f"{frontend_prefix}{redirect_url}"
print("frontend_port", frontend_port)
print("frontend_prefix", frontend_prefix)
print("self._redirect_page_path", self._redirect_page_path)
print("self._client_page_path", self._client_page_path) | 1,230 | Python | 46.346152 | 121 | 0.692683 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/vision_helper.py | # send message to Kinova Server to control the real robot
try:
import cv2
except:
# omni.kit.pipapi extension is required
import omni.kit.pipapi
# It wraps `pip install` calls and reroutes package installation into user specified environment folder.
# That folder is added to sys.path.
# Note: This call is blocking and slow. It is meant to be used for debugging, development. For final product packages
# should be installed at build-time and packaged inside extensions.
omni.kit.pipapi.install(
package="opencv-python",
)
import cv2
from PIL import Image
import requests
import base64
import os
import omni.usd
import carb
from pxr import Gf, UsdGeom
import omni.timeline
import omni.graph.core as og
from omni.physx import get_physx_scene_query_interface
from omni.debugdraw import get_debug_draw_interface
CX = 1920/2 # principal point x
CY = 1080/2 # principal point y
class VisionHelper():
def __init__(self,
vision_url: str,
vision_folder:str,
camera_prim_path = "/OmniverseKit_Persp",
vision_model = "dino") -> None:
# vision
self.vision_url = vision_url
self.vision_folder = vision_folder
self.vision_model = vision_model
self.camera_prim_path = camera_prim_path
# stage
self.stage = omni.usd.get_context().get_stage()
def get_prediction_data(self, image_file: str, object_name: str):
"""
Get bounding box data from the Gradio server
"""
# Set the request payload
with open(image_file, "rb") as f:
encoded_string = base64.b64encode(f.read())
data_url = "data:image/png;base64," + encoded_string.decode("utf-8")
payload = {
"data": [
data_url, object_name
]
}
# Send the request to the Gradio server
response = requests.post(self.vision_url, json=payload)
# Get the response data as a Python object
response_data = response.json()
# Print the response data
# print(response_data)
return response_data
def get_image_from_webcam(self, image_name = "0.jpg"):
"""
Get image from webcam
"""
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame, 'RGB')
image.save(self.vision_folder + f"/{image_name}")
print("Image saved at path: " + self.vision_folder + f"/{image_name}")
cap.release()
def obtain_camera_transform(self, camara_path: str):
"""
Obtain camera transform
"""
camera_prim = omni.usd.get_context().get_stage().GetPrimAtPath(camara_path)
xformable = UsdGeom.Xformable(camera_prim)
self.camera_mat = xformable.ComputeLocalToWorldTransform(0)
def get_world_direction_from_camera_point(self, x, y, fx, fy):
"""
Get world direction from camera point
"""
# camera_point = Gf.Vec3d(x, y, 1)
# K = Gf.Matrix3d(fx, 0, 0, 0, fy, 0, CX, CY, 1)
# K_inverse = K.GetInverse()
Z = -1
R = self.camera_mat.ExtractRotationMatrix()
R_inverse = R.GetInverse()
# world_point = (camera_point * K_inverse - t) * R_inverse
D = Gf.Vec3d((CX - x) * Z / fx, (CY - y) * Z / fy, Z)
world_direction = R_inverse * D
return world_direction
def draw_debug_line(self, origin, direction, length = 1, node_path = "/World/PushGraph/make_array"):
"""
Draw debug line
"""
make_array_node = og.Controller.node(node_path)
if make_array_node.is_valid():
# print("draw debug line")
origin_attribute = make_array_node.get_attribute("inputs:input0")
target_attribute = make_array_node.get_attribute("inputs:input1")
size_attribute = make_array_node.get_attribute("inputs:arraySize")
# attr_value = og.Controller.get(attribute)
og.Controller.set(size_attribute, 2)
og.Controller.set(origin_attribute, [origin[0], origin[1], origin[2]])
og.Controller.set(target_attribute, [direction[0] * length + origin[0], direction[1] * length + origin[1], direction[2] * length + origin[2]])
# print("attr:", attr_value)
def get_hit_position(self, origin, direction, target_prim_path = "/World/Desk"):
"""
Get hit position
note: should be call while timeline is playing
"""
timeline = omni.timeline.get_timeline_interface()
assert timeline.is_playing(), "timeline is not playing"
def report_all_hits(hit):
usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
print("hit:", hit.rigid_body, usdGeom.GetPrim().GetPath(), hit.position, hit.normal, hit.distance, hit.face_index)
if usdGeom.GetPrim().GetPath().pathString == target_prim_path:
hit_position = hit.position
hit_position = None
t = carb.Float3(origin[0], origin[1], origin[2])
d = carb.Float3(direction[0], direction[1], direction[2])
# print("t:", t, "d:", d)
get_physx_scene_query_interface().raycast_all(t, d, 100.0, report_all_hits)
return hit_position
############################################# action #############################################
def capture_image(self, folder_path = "I:\\Temp\\VisionTest", image_name = "test"):
from omni.kit.capture.viewport import CaptureOptions, CaptureExtension
options = CaptureOptions()
options.file_name = image_name
options.file_type = ".png"
options.output_folder = str(folder_path)
options.camera = self.camera_prim_path
if not os.path.exists(options.output_folder):
pass
images = os.listdir(options.output_folder)
for item in images:
if item.endswith(options.file_type) and item.startswith(options.file_name):
os.remove(os.path.join(options.output_folder, item))
capture_instance = CaptureExtension().get_instance()
capture_instance.options = options
capture_instance.start() | 6,362 | Python | 36.650887 | 154 | 0.595253 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/utils.py | import numpy as np
import cv2
from pxr import Gf
BOX_SIZE = [0.071, 0.0965, 0.1198] # in cm
def find_bottom_point(points):
"""
Find the bottom point from a list of points
"""
bottom_point = points[0]
for point in points:
if point[1] > bottom_point[1]:
bottom_point = point
return bottom_point
def find_left_point(points):
"""
Find the left point from a list of points
"""
left_point = points[0]
for point in points:
if point[0] < left_point[0]:
left_point = point
return left_point
def get_projection(point, direction, z):
"""
Get projection
"""
t = (z - point[2]) / direction[2]
x = point[0] + direction[0] * t
y = point[1] + direction[1] * t
return np.array((x, y, z))
def get_box_transform_from_point(camera_position, bottom_direction, left_direction, affordance_z = 0):
"""
Get box points
"""
bottom_point = get_projection(camera_position, bottom_direction, affordance_z)
left_point = get_projection(camera_position, left_direction, affordance_z)
distance = np.linalg.norm(bottom_point - left_point)
closest_value = min(BOX_SIZE,key=lambda x:abs(x-distance))
print("distance: ", distance, bottom_point, left_point, "\n close to: ", closest_value)
direction = left_point - bottom_point
direction = direction / np.linalg.norm(direction)
direction = Gf.Vec3d(direction[0], direction[1], direction[2])
print("direction: ", direction)
# determine the box rotation
if closest_value == BOX_SIZE[0]:
direction_r = np.array([direction[1], -direction[0], 0])
right_point = bottom_point + direction_r * BOX_SIZE[1]
center_point = (left_point + right_point) / 2
rotation = Gf.Rotation(Gf.Vec3d(0, -1, 0), direction)
elif closest_value == BOX_SIZE[1]:
direction_r = np.array([direction[1], -direction[0], 0])
right_point = bottom_point + direction_r * BOX_SIZE[0]
center_point = (left_point + right_point) / 2
rotation = Gf.Rotation(Gf.Vec3d(-1, 0, 0), direction)
else:
center_point = (left_point + bottom_point) / 2
from_direction = Gf.Vec3d([BOX_SIZE[1], -BOX_SIZE[1], 0]).GetNormalized()
rotation = Gf.Rotation(from_direction, direction)
return center_point, rotation.GetQuat()
| 2,375 | Python | 32 | 102 | 0.620632 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/vision/gpt/chatgpt_apiconnect.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import omni.usd
import carb
import os
import aiohttp
import asyncio
from pxr import Sdf
from .prompts import system_input, user_input, assistant_input
from .deep_search import query_items
from .item_generator import place_greyboxes, place_deepsearch_results
async def chatGPT_call(prompt: str):
# Load your API key from an environment variable or secret management service
settings = carb.settings.get_settings()
apikey = settings.get_as_string("/persistent/exts/omni.example.airoomgenerator/APIKey")
my_prompt = prompt.replace("\n", " ")
# Send a request API
try:
parameters = {
"model": "gpt-3.5-turbo",
"messages": [
{"role": "system", "content": system_input},
{"role": "user", "content": user_input},
{"role": "assistant", "content": assistant_input},
{"role": "user", "content": my_prompt}
]
}
chatgpt_url = "https://api.openai.com/v1/chat/completions"
headers = {"Authorization": "Bearer %s" % apikey}
# Create a completion using the chatGPT model
async with aiohttp.ClientSession() as session:
async with session.post(chatgpt_url, headers=headers, json=parameters) as r:
response = await r.json()
text = response["choices"][0]["message"]['content']
except Exception as e:
carb.log_error("An error as occurred")
return None, str(e)
# Parse data that was given from API
try:
#convert string to object
data = json.loads(text)
except ValueError as e:
carb.log_error(f"Exception occurred: {e}")
return None, text
else:
# Get area_objects_list
object_list = data['area_objects_list']
return object_list, text
| 2,548 | Python | 35.942028 | 98 | 0.648744 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/ui/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["julia_modeler_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
ATTR_LABEL_WIDTH = 150
BLOCK_HEIGHT = 22
TAIL_WIDTH = 35
WIN_WIDTH = 400
WIN_HEIGHT = 930
# Pre-defined constants. It's possible to change them at runtime.
cl_window_bg_color = cl(0.2, 0.2, 0.2, 1.0)
cl_window_title_text = cl(.9, .9, .9, .9)
cl_collapsible_header_text = cl(.8, .8, .8, .8)
cl_collapsible_header_text_hover = cl(.95, .95, .95, 1.0)
cl_main_attr_label_text = cl(.65, .65, .65, 1.0)
cl_main_attr_label_text_hover = cl(.9, .9, .9, 1.0)
cl_multifield_label_text = cl(.65, .65, .65, 1.0)
cl_combobox_label_text = cl(.65, .65, .65, 1.0)
cl_field_bg = cl(0.18, 0.18, 0.18, 1.0)
cl_field_border = cl(1.0, 1.0, 1.0, 0.2)
cl_btn_border = cl(1.0, 1.0, 1.0, 0.4)
cl_slider_fill = cl(1.0, 1.0, 1.0, 0.3)
cl_revert_arrow_enabled = cl(.25, .5, .75, 1.0)
cl_revert_arrow_disabled = cl(.35, .35, .35, 1.0)
cl_transparent = cl(0, 0, 0, 0)
fl_main_label_attr_hspacing = 10
fl_attr_label_v_spacing = 3
fl_collapsable_group_spacing = 2
fl_outer_frame_padding = 15
fl_tail_icon_width = 15
fl_border_radius = 3
fl_border_width = 1
fl_window_title_font_size = 18
fl_field_text_font_size = 14
fl_main_label_font_size = 14
fl_multi_attr_label_font_size = 14
fl_radio_group_font_size = 14
fl_collapsable_header_font_size = 13
fl_range_text_size = 10
url_closed_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/closed.svg"
url_open_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/opened.svg"
url_revert_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/revert_arrow.svg"
url_checkbox_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_on.svg"
url_checkbox_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_off.svg"
url_radio_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_on.svg"
url_radio_btn_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_off.svg"
url_diag_bg_lines_texture = f"{EXTENSION_FOLDER_PATH}/icons/diagonal_texture_screenshot.png"
# D:\DBL\Robots\robot-exts-control\exts\control\icons\diagonal_texture_screenshot.png
print("url_revert_arrow_icon: ", EXTENSION_FOLDER_PATH, "-", url_revert_arrow_icon)
# The main style dict
julia_modeler_style = {
"Button::tool_button": {
"background_color": cl_field_bg,
"margin_height": 0,
"margin_width": 6,
"border_color": cl_btn_border,
"border_width": fl_border_width,
"font_size": fl_field_text_font_size,
},
"CollapsableFrame::group": {
"margin_height": fl_collapsable_group_spacing,
"background_color": cl_transparent,
},
# TODO: For some reason this ColorWidget style doesn't respond much, if at all (ie, border_radius, corner_flag)
"ColorWidget": {
"border_radius": fl_border_radius,
"border_color": cl(0.0, 0.0, 0.0, 0.0),
},
"Field": {
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"border_color": cl_field_border,
"border_width": fl_border_width,
},
"Field::attr_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": 2, # fl_field_text_font_size, # Hack to allow for a smaller field border until field padding works
},
"Field::attribute_color": {
"font_size": fl_field_text_font_size,
},
"Field::multi_attr_field": {
"padding": 4, # TODO: Hacky until we get padding fix
"font_size": fl_field_text_font_size,
},
"Field::path_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": fl_field_text_font_size,
},
"HeaderLine": {"color": cl(.5, .5, .5, .5)},
"Image::collapsable_opened": {
"color": cl_collapsible_header_text,
"image_url": url_open_arrow_icon,
},
"Image::collapsable_opened:hovered": {
"color": cl_collapsible_header_text_hover,
"image_url": url_open_arrow_icon,
},
"Image::collapsable_closed": {
"color": cl_collapsible_header_text,
"image_url": url_closed_arrow_icon,
},
"Image::collapsable_closed:hovered": {
"color": cl_collapsible_header_text_hover,
"image_url": url_closed_arrow_icon,
},
"Image::radio_on": {"image_url": url_radio_btn_on_icon},
"Image::radio_off": {"image_url": url_radio_btn_off_icon},
"Image::revert_arrow": {
"image_url": url_revert_arrow_icon,
"color": cl_revert_arrow_enabled,
},
"Image::revert_arrow:disabled": {"color": cl_revert_arrow_disabled},
"Image::checked": {"image_url": url_checkbox_on_icon},
"Image::unchecked": {"image_url": url_checkbox_off_icon},
"Image::slider_bg_texture": {
"image_url": url_diag_bg_lines_texture,
"border_radius": fl_border_radius,
"corner_flag": ui.CornerFlag.LEFT,
},
"Label::attribute_name": {
"alignment": ui.Alignment.RIGHT_TOP,
"margin_height": fl_attr_label_v_spacing,
"margin_width": fl_main_label_attr_hspacing,
"color": cl_main_attr_label_text,
"font_size": fl_main_label_font_size,
},
"Label::attribute_name:hovered": {"color": cl_main_attr_label_text_hover},
"Label::collapsable_name": {"font_size": fl_collapsable_header_font_size},
"Label::multi_attr_label": {
"color": cl_multifield_label_text,
"font_size": fl_multi_attr_label_font_size,
},
"Label::radio_group_name": {
"font_size": fl_radio_group_font_size,
"alignment": ui.Alignment.CENTER,
"color": cl_main_attr_label_text,
},
"Label::range_text": {
"font_size": fl_range_text_size,
},
"Label::window_title": {
"font_size": fl_window_title_font_size,
"color": cl_window_title_text,
},
"ScrollingFrame::window_bg": {
"background_color": cl_window_bg_color,
"padding": fl_outer_frame_padding,
"border_radius": 20 # Not obvious in a window, but more visible with only a frame
},
"Slider::attr_slider": {
"draw_mode": ui.SliderDrawMode.FILLED,
"padding": 0,
"color": cl_transparent,
# Meant to be transparent, but completely transparent shows opaque black instead.
"background_color": cl(0.28, 0.28, 0.28, 0.01),
"secondary_color": cl_slider_fill,
"border_radius": fl_border_radius,
"corner_flag": ui.CornerFlag.LEFT, # TODO: Not actually working yet OM-53727
},
# Combobox workarounds
"Rectangle::combobox": { # TODO: remove when ComboBox can have a border
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"border_color": cl_btn_border,
"border_width": fl_border_width,
},
"ComboBox::dropdown_menu": {
"color": cl_combobox_label_text, # label color
"padding_height": 1.25,
"margin": 2,
"background_color": cl_field_bg,
"border_radius": fl_border_radius,
"font_size": fl_field_text_font_size,
"secondary_color": cl_transparent, # button background color
},
"Rectangle::combobox_icon_cover": {"background_color": cl_field_bg}
}
| 7,702 | Python | 37.323383 | 121 | 0.636588 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/numpy_utils.py | import numpy as np
import math
def quat_to_euler_angles(q, degrees: bool = False):
"""Convert quaternion to Euler XYZ angles.
Args:
q (np.ndarray): quaternion (w, x, y, z).
degrees (bool, optional): Whether output angles are in degrees. Defaults to False.
Returns:
np.ndarray: Euler XYZ angles.
"""
q = q.reshape(-1, 4)
w, x, y, z = q[:, 0], q[:, 1], q[:, 2], q[:, 3]
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x * x + y * y))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y * y + z * z))
if degrees:
roll = np.degrees(roll)
pitch = np.degrees(pitch)
yaw = np.degrees(yaw)
return np.stack([roll, pitch, yaw], axis=-1)
def euler_angles_to_quat(euler_angles: np.ndarray, degrees: bool = False) -> np.ndarray:
"""Convert Euler XYZ angles to quaternion.
Args:
euler_angles (np.ndarray): Euler XYZ angles.
degrees (bool, optional): Whether input angles are in degrees. Defaults to False.
Returns:
np.ndarray: quaternion (w, x, y, z).
"""
roll, pitch, yaw = euler_angles
if degrees:
roll = math.radians(roll)
pitch = math.radians(pitch)
yaw = math.radians(yaw)
cr = np.cos(roll / 2.0)
sr = np.sin(roll / 2.0)
cy = np.cos(yaw / 2.0)
sy = np.sin(yaw / 2.0)
cp = np.cos(pitch / 2.0)
sp = np.sin(pitch / 2.0)
w = (cr * cp * cy) + (sr * sp * sy)
x = (sr * cp * cy) - (cr * sp * sy)
y = (cr * sp * cy) + (sr * cp * sy)
z = (cr * cp * sy) - (sr * sp * cy)
return np.array([w, x, y, z])
def orientation_error(desired, current):
cc = quat_conjugate(current)
q_r = quat_mul(desired, cc)
return q_r[:, 0:3] * np.sign(q_r[:, 3])[:, None]
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = np.stack([x, y, z, w], axis=-1).reshape(shape)
return quat
def normalize(x, eps: float = 1e-9):
return x / np.clip(np.linalg.norm(x, axis=-1), a_min=eps, a_max=None)[:, None]
def quat_unit(a):
return normalize(a)
def quat_from_angle_axis(angle, axis):
theta = (angle / 2)[:, None]
xyz = normalize(axis) * np.sin(theta)
w = np.cos(theta)
return quat_unit(np.concatenate([xyz, w], axis=-1))
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0)[:, None]
b = np.cross(q_vec, v) * q_w[:, None] * 2.0
c = q_vec * np.sum(q_vec * v, axis=1).reshape(shape[0], -1) * 2.0
return a + b + c
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape)
def quat_axis(q, axis=0):
basis_vec = np.zeros((q.shape[0], 3))
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
| 3,329 | Python | 27.706896 | 90 | 0.50766 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova_gripper.py | from typing import List, Callable
import numpy as np
from omni.isaac.manipulators.grippers.gripper import Gripper
from omni.isaac.core.utils.types import ArticulationAction
import omni.kit.app
class KinovaGripper(Gripper):
def __init__(
self,
end_effector_prim_path: str,
joint_prim_names: List[str],
joint_opened_positions: np.ndarray,
joint_closed_positions: np.ndarray,
action_deltas: np.ndarray = None,
) -> None:
Gripper.__init__(self, end_effector_prim_path=end_effector_prim_path)
self._joint_prim_names = joint_prim_names
self._gripper_joint_num = 6
self._joint_dof_indicies = np.array([None] * self._gripper_joint_num)
self._joint_opened_positions = joint_opened_positions
self._joint_closed_positions = joint_closed_positions
self._get_joint_positions_func = None
self._set_joint_positions_func = None
self._action_deltas = action_deltas
self._articulation_num_dofs = None
self._close_ratio = 1.0
return
@property
def joint_opened_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively when opened.
"""
return self._joint_opened_positions
@property
def joint_closed_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively when closed.
"""
return self._joint_closed_positions
@property
def joint_dof_indicies(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint dof indices in the articulation of the left finger joint and the right finger joint respectively.
"""
return self._joint_dof_indicies
@property
def joint_prim_names(self) -> List[str]:
"""
Returns:
List[str]: the left finger joint prim name and the right finger joint prim name respectively.
"""
return self._joint_prim_names
def initialize(
self,
articulation_apply_action_func: Callable,
get_joint_positions_func: Callable,
set_joint_positions_func: Callable,
dof_names: List,
physics_sim_view: omni.physics.tensors.SimulationView = None,
) -> None:
"""Create a physics simulation view if not passed and creates a rigid prim view using physX tensor api.
This needs to be called after each hard reset (i.e stop + play on the timeline) before interacting with any
of the functions of this class.
Args:
articulation_apply_action_func (Callable): apply_action function from the Articulation class.
get_joint_positions_func (Callable): get_joint_positions function from the Articulation class.
set_joint_positions_func (Callable): set_joint_positions function from the Articulation class.
dof_names (List): dof names from the Articulation class.
physics_sim_view (omni.physics.tensors.SimulationView, optional): current physics simulation view. Defaults to None
Raises:
Exception: _description_
"""
Gripper.initialize(self, physics_sim_view=physics_sim_view)
self._get_joint_positions_func = get_joint_positions_func
self._articulation_num_dofs = len(dof_names)
for index in range(len(dof_names)):
if dof_names[index] in self._joint_prim_names:
which_index = self._joint_prim_names.index(dof_names[index])
self._joint_dof_indicies[which_index] = index
# make sure that all gripper dof names were resolved
if None in self._joint_dof_indicies:
raise Exception("Not all gripper dof names were resolved to dof handles and dof indices.")
self._articulation_apply_action_func = articulation_apply_action_func
current_joint_positions = get_joint_positions_func()
if self._default_state is None:
self._default_state = np.array(
[0.0] * self._gripper_joint_num
)
self._set_joint_positions_func = set_joint_positions_func
return
def open(self) -> None:
"""Applies actions to the articulation that opens the gripper (ex: to release an object held).
"""
self._articulation_apply_action_func(self.forward(action="open"))
return
def close(self) -> None:
"""Applies actions to the articulation that closes the gripper (ex: to hold an object).
"""
self._articulation_apply_action_func(self.forward(action="close"))
return
def set_action_deltas(self, value: np.ndarray) -> None:
"""
Args:
value (np.ndarray): deltas to apply for finger joint positions when openning or closing the gripper.
[left, right]. Defaults to None.
"""
self._action_deltas = value
return
def get_action_deltas(self) -> np.ndarray:
"""
Returns:
np.ndarray: deltas that will be applied for finger joint positions when openning or closing the gripper.
[left, right]. Defaults to None.
"""
return self._action_deltas
def set_default_state(self, joint_positions: np.ndarray) -> None:
"""Sets the default state of the gripper
Args:
joint_positions (np.ndarray): joint positions of the left finger joint and the right finger joint respectively.
"""
self._default_state = joint_positions
return
def get_default_state(self) -> np.ndarray:
"""Gets the default state of the gripper
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively.
"""
return self._default_state
def post_reset(self):
Gripper.post_reset(self)
self._set_joint_positions_func(
positions=self._default_state, joint_indices=list(self._joint_dof_indicies)
)
return
def set_joint_positions(self, positions: np.ndarray) -> None:
"""
Args:
positions (np.ndarray): joint positions of the left finger joint and the right finger joint respectively.
"""
self._set_joint_positions_func(
positions=positions, joint_indices=list(self._joint_dof_indicies)
)
return
def get_joint_positions(self) -> np.ndarray:
"""
Returns:
np.ndarray: joint positions of the left finger joint and the right finger joint respectively.
"""
return self._get_joint_positions_func(joint_indices=list(self._joint_dof_indicies))
def forward(self, action: str) -> ArticulationAction:
"""calculates the ArticulationAction for all of the articulation joints that corresponds to "open"
or "close" actions.
Args:
action (str): "open" or "close" as an abstract action.
Raises:
Exception: _description_
Returns:
ArticulationAction: articulation action to be passed to the articulation itself
(includes all joints of the articulation).
"""
if action == "open":
target_joint_positions = [None] * self._articulation_num_dofs
if self._action_deltas is None:
for i in range(self._gripper_joint_num):
target_joint_positions[self._joint_dof_indicies[i]] = self._joint_opened_positions[i]
else:
current_joint_positions = self._get_joint_positions_func()
for i in range(self._gripper_joint_num):
current_finger_position = current_joint_positions[self._joint_dof_indicies[i]]
next_position = self.regulate_joint_position(
current_finger_position + self._action_deltas[i],
self._joint_opened_positions[i],
self._joint_closed_positions[i]
)
target_joint_positions[self._joint_dof_indicies[i]] = (
next_position
)
elif action == "close":
target_joint_positions = [None] * self._articulation_num_dofs
if self._action_deltas is None:
for i in range(self._gripper_joint_num):
target_joint_positions[self._joint_dof_indicies[i]] = self._joint_closed_positions[i] * self._close_ratio
else:
current_joint_positions = self._get_joint_positions_func()
for i in range(self._gripper_joint_num):
current_finger_position = current_joint_positions[self._joint_dof_indicies[i]]
next_position = self.regulate_joint_position(
current_finger_position - self._action_deltas[i],
self._joint_opened_positions[i],
self._joint_closed_positions[i]
)
target_joint_positions[self._joint_dof_indicies[i]] = (
next_position
)
else:
raise Exception("action {} is not defined for ParallelGripper".format(action))
# print("target_joint_positions", target_joint_positions)
return ArticulationAction(joint_positions=target_joint_positions)
def regulate_joint_position(self, joint_pos, open_pos, close_pos):
"""
Regulates the joint position to be within the range of the open and close positions.
"""
if open_pos > close_pos:
open_pos, close_pos = close_pos, open_pos
if joint_pos < open_pos:
joint_pos = open_pos
elif joint_pos > close_pos:
joint_pos = close_pos
return joint_pos
def apply_action(self, control_actions: ArticulationAction) -> None:
"""Applies actions to all the joints of an articulation that corresponds to the ArticulationAction of the finger joints only.
Args:
control_actions (ArticulationAction): ArticulationAction for the left finger joint and the right finger joint respectively.
"""
joint_actions = ArticulationAction()
if control_actions.joint_positions is not None:
joint_actions.joint_positions = [None] * self._articulation_num_dofs
for i in range(self._gripper_joint_num):
joint_actions.joint_positions[self._joint_dof_indicies[i]] = control_actions.joint_positions[i]
# if control_actions.joint_velocities is not None:
# joint_actions.joint_velocities = [None] * self._articulation_num_dofs
# joint_actions.joint_velocities[self._joint_dof_indicies[0]] = control_actions.joint_velocities[0]
# joint_actions.joint_velocities[self._joint_dof_indicies[1]] = control_actions.joint_velocities[1]
# if control_actions.joint_efforts is not None:
# joint_actions.joint_efforts = [None] * self._articulation_num_dofs
# joint_actions.joint_efforts[self._joint_dof_indicies[0]] = control_actions.joint_efforts[0]
# joint_actions.joint_efforts[self._joint_dof_indicies[1]] = control_actions.joint_efforts[1]
self._articulation_apply_action_func(control_actions=joint_actions)
return
def set_close_ratio(self, ratio):
"""
Sets the ratio of the closed position of the gripper.
"""
self._close_ratio = ratio | 11,753 | Python | 42.372694 | 135 | 0.610568 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova_socket.py | # send message to Kinova Server to control the real robot
import socket
class KinovaClient():
def __init__(self, HOST = "localhost", PORT = 9999) -> None:
# SOCK_DGRAM is the socket type to use for UDP sockets
self.host = HOST
self.port = PORT
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# send a test message to the server
message = "Hello: server!"
self.sock.sendto(message.encode(), (self.host, self.port))
self.sock.settimeout(10)
# wait for a response from the server
data, addr = self.sock.recvfrom(1024)
print("Socket Server and Client initialized")
# # check if the response is correct
# if data.decode() == "Hello, client!":
# print("Connected to UDPServer")
# else:
# print("Failed to connect to UDPServer")
def send_message(self, command: str, message: str):
print("Sent: {}".format(message))
self.sock.sendto(bytes(command + ":" + message + "\n", "utf-8"), (self.host, self.port))
received = str(self.sock.recv(1024), "utf-8")
print("received: {}".format(received))
return received | 1,222 | Python | 36.060605 | 96 | 0.590835 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/utils.py | import os
import json
import numpy as np
from pathlib import Path
import omni.kit.app
import omni.usd
from pxr import UsdGeom, Gf, Usd, UsdPhysics
EXTENSION_FOLDER_PATH = str(Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
).resolve())
def fix_damping_and_stiffness(kinova_path = "/World/kinova_gen3_7_hand/kinova", stiffness = 1e3, damping = 1e6):
print("fixing damping and stiffness")
# stiffness_name = "drive:angular:physics:stiffness"
# damping_name = "drive:angular:physics:damping"
stage = omni.usd.get_context().get_stage()
joint_prim_paths = [
"/base_link/Actuator1",
"/shoulder_link/Actuator2",
"/half_arm_1_link/Actuator3",
"/half_arm_2_link/Actuator4",
"/forearm_link/Actuator5",
"/spherical_wrist_1_link/Actuator6",
"/spherical_wrist_2_link/Actuator7",
]
for joint_prim_path in joint_prim_paths:
joint_prim = stage.GetPrimAtPath(kinova_path + joint_prim_path)
joint_driver = UsdPhysics.DriveAPI.Get(joint_prim, "angular")
joint_driver.GetStiffnessAttr().Set(stiffness)
joint_driver.GetDampingAttr().Set(damping)
def process_policy_config(mg_config_file):
"""
Process the policy config file to get the absolute path of the assets"""
mp_config_dir = os.path.dirname(mg_config_file)
with open(mg_config_file) as config_file:
config = json.load(config_file)
rel_assets = config.get("relative_asset_paths", {})
for k, v in rel_assets.items():
config[k] = os.path.join(mp_config_dir, v)
del config["relative_asset_paths"]
return config
def regulate_degree(degree: float, min_value: float = 0, max_value: float = 360, indegree: bool = True):
"""
Regulate the degree to be in the range of [min_value, max_value]"""
if not indegree:
degree = np.rad2deg(degree)
if degree < min_value:
degree += 360
elif degree > max_value:
degree -= 360
return degree
def get_transform_mat_from_pos_rot(p, q):
"""
Get transform matrix from position and rotation
"""
trans = Gf.Transform()
rotation = Gf.Rotation(Gf.Quatd(float(q[0]), float(q[1]), float(q[2]), float(q[3])))
trans.SetRotation(rotation)
trans.SetTranslation(Gf.Vec3d(float(p[0]), float(p[1]), float(p[2])))
return trans.GetMatrix()
def get_prim_pickup_transform(stage, prim_path: str, offset: Gf.Vec3d):
"""
Get the pickup transform of the prim with offset"""
prim = stage.GetPrimAtPath(prim_path)
xformable = UsdGeom.Xformable(prim)
mat0 = xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
target_pos = mat0.ExtractTranslation()
xaxis = -offset / offset.GetLength()
yaxis = Gf.Cross(Gf.Vec3d(0, 0, 1), xaxis)
m = Gf.Matrix4d()
m.SetRow(0, Gf.Vec4d(xaxis[0], yaxis[0], 0, 0.0))
m.SetRow(1, Gf.Vec4d(xaxis[1], yaxis[1], 0, 0.0))
m.SetRow(2, Gf.Vec4d(xaxis[2], yaxis[2], 1, 0.0))
m.SetRow(3, Gf.Vec4d(0.0, 0.0, 0.0, 1.0))
eye_pos = target_pos + offset
m = m * Gf.Matrix4d().SetTranslate(eye_pos)
print("translation: ", eye_pos)
print("rotation: ", m.ExtractRotationQuat())
return eye_pos, m.ExtractRotationQuat()
def generate_slerp_action_sequence(ori_pos, ori_quat, rel_rot,
sub_steps = 5, sub_duration = 50,
slerp_last = True, slerp_offset = [0 ,0, 0]):
"""
Generate slerp action sequence from relative position and rotation
"""
slerp_action_sequence = []
ori_pos = Gf.Vec3d(ori_pos[0], ori_pos[1], ori_pos[2])
rel_quat = Gf.Quatd(float(rel_rot[0]), float(rel_rot[1]), float(rel_rot[2]), float(rel_rot[3])).GetNormalized()
ori_quat = Gf.Quatd(float(ori_quat[0]), float(ori_quat[1]), float(ori_quat[2]), float(ori_quat[3])).GetNormalized()
identity_quat = Gf.Quatd(1, 0, 0, 0)
for i in range(1, sub_steps):
t = (i + int(slerp_last)) / sub_steps
quat_rel = Gf.Slerp(t, identity_quat, rel_quat)
p = (quat_rel * Gf.Quatd(0, ori_pos + Gf.Vec3d(*slerp_offset) * i) * quat_rel.GetInverse()).GetImaginary()
q = quat_rel * ori_quat
slerp_action_sequence.append(
{
'action_type': 'move',
'duration': sub_duration,
'position': [p[0], p[1], p[2]],
'orientation': [q.GetReal(), q.GetImaginary()[0], q.GetImaginary()[1], q.GetImaginary()[2]]
},
)
return slerp_action_sequence | 4,652 | Python | 36.224 | 119 | 0.60877 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/kinova.py | from typing import Optional, List
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units
import carb
from pxr import UsdPhysics
from .kinova_gripper import KinovaGripper
class Kinova(Robot):
def __init__(
self,
prim_path: str = "/World/kinova",
name: str = "kinova_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
deltas: Optional[np.ndarray] = None,
) -> None:
# self.prim_path = prim_path
prim = get_prim_at_path(prim_path)
assert prim.IsValid(), "Please load Kinova into the environment first"
self._end_effector = None
self._gripper = None
self._end_effector_prim_name = end_effector_prim_name
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
self._end_effector_prim_path = prim_path + "/robotiq_85_base_link"
gripper_dof_names = [
"finger_joint", "right_outer_knuckle_joint",
"left_inner_knuckle_joint", "right_inner_knuckle_joint",
#"left_outer_finger_joint", "right_outer_finger_joint",
"left_inner_finger_joint", "right_inner_finger_joint",
]
gripper_open_position = np.zeros(6)
gripper_closed_position = np.array([0.8757, -0.8757, 0.8757, -0.8757, -0.8757, 0.8757])
deltas = None # -gripper_closed_position / 5.0
self._gripper = KinovaGripper(
end_effector_prim_path=self._end_effector_prim_path,
joint_prim_names=gripper_dof_names,
joint_opened_positions=gripper_open_position,
joint_closed_positions=gripper_closed_position,
action_deltas=deltas,
)
return
@property
def end_effector(self) -> RigidPrim:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._end_effector
@property
def gripper(self) -> KinovaGripper:
"""[summary]
Returns:
ParallelGripper: [description]
"""
return self._gripper
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
super().initialize(physics_sim_view)
self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector")
self._end_effector.initialize(physics_sim_view)
self._gripper.initialize(
physics_sim_view=physics_sim_view,
articulation_apply_action_func=self.apply_action,
get_joint_positions_func=self.get_joint_positions,
set_joint_positions_func=self.set_joint_positions,
dof_names=self.dof_names,
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
self._gripper.post_reset()
for i in range(self.gripper._gripper_joint_num):
self._articulation_controller.switch_dof_control_mode(
dof_index=self.gripper.joint_dof_indicies[i], mode="position"
)
return
def fix_damping_and_stiffness(prim_path = "/World/kinova_gen3_7_hand/kinova", stiffness = 1e3, damping = 1e6):
print("fixing damping and stiffness")
# stiffness_name = "drive:angular:physics:stiffness"
# damping_name = "drive:angular:physics:damping"
joint_prim_paths = [
"/base_link/Actuator1",
"/shoulder_link/Actuator2",
"/half_arm_1_link/Actuator3",
"/half_arm_2_link/Actuator4",
"/forearm_link/Actuator5",
"/spherical_wrist_1_link/Actuator6",
"/spherical_wrist_2_link/Actuator7",
]
for joint_prim_path in joint_prim_paths:
joint_prim = get_prim_at_path(prim_path + joint_prim_path)
joint_driver = UsdPhysics.DriveAPI.Get(joint_prim, "angular")
joint_driver.GetStiffnessAttr().Set(stiffness)
joint_driver.GetDampingAttr().Set(damping)
| 4,842 | Python | 36.835937 | 116 | 0.584882 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/coffee_config.py | # task config for making coffee
kinova_action_config = {
"go_home": {
'base_prim': None,
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0.45666, 0.0, 0.43371],
'orientation': [0.5, 0.5, 0.5, 0.5], # wxyz
}
]
},
"open_coffee_machine_handle": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0, -0.4, 0],
'orientation': [-0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.29, 0],
'orientation': [-0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 100,
'ratio': 0.8,
},
# {
# 'action_type': 'slerp',
# 'duration': 300,
# 'sub_steps': 10,
# 'position': [0, -0.28, 0],
# 'orientation': [-0.5, 0.5, 0.5, 0.5],
# 'relative_rotation': [0.7372773, -0.6755902, 0, 0],
# 'slerp_last': True,
# 'slerp_offset': [0, -0.001, 0]
# },
# {
# 'action_type': 'close', #open
# 'duration': 100,
# 'ratio': 0.05,
# },
{
'action_type': 'move',
'duration': 200,
'position': [0.3, -0.5, 0.3],
'orientation': [0, 0.7071, 0.7071, 0],
},
]
},
"close_coffee_machine_handle": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'close', #open
'duration': 30,
'ratio': 0.95,
},
{
'action_type': 'move',
'duration': 200,
'position': [0, 0, 0.27],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'slerp',
'duration': 140,
'sub_steps': 7,
'position': [0, 0, 0.27],
'orientation': [0, 0.7071, 0.7071, 0],
'relative_rotation': [0.7372773, 0.675590, 0, 0],
'slerp_last': False,
'slerp_offset': [0, 0, 0]
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.35],
'orientation': [-0.4545, 0.5416, 0.5416, 0.4545],
},
]
},
"press_coffee_machine_button": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'close', #open
'duration': 30,
'ratio': 1.1,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.09, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 30,
'position': [-0, -0.09, 0.18],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 50,
'position': [-0, -0.09, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
{
'action_type': 'move',
'duration': 30,
'position': [-0, -0.3, 0.2],
'orientation': [0, 0.7071, 0.7071, 0],
},
]
},
"pick_up_capsule": {
'base_prim': '/World/k_cup',
'steps':[
{
'action_type': 'move',
'duration': 300,
'position': [-0.12, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.1],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.03],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 30,
'ratio': 0.6,
},
{
'action_type': 'move',
'duration': 60,
'position': [-0.12, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
]
},
"pick_up_papercup": {
'base_prim': '/World/papercup',
'steps':[
{
'action_type': 'move',
'duration': 300,
'position': [-0.15, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.1],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.00],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'close',
'duration': 60,
'ratio': 0.4,
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.15, 0.0, 0.3],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
]
},
"move_capsule_to_coffee_machine": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.02],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 60,
'position': [0, -0.218, 0.02],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'close', #open
'duration': 60,
'ratio': 0.05,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.3, 0.025],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.2, 0.4],
'orientation': [0, 0, 0.7071, 0.7071],
},
]
},
"move_papercup_to_coffee_machine": {
'base_prim': '/World/Keurig_1_5_add_hold/XformHandle',
'steps':[
{
'action_type': 'move',
'duration': 200,
'position': [0, -0.4, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.25, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'close', #open
'duration': 100,
'ratio': 0.05,
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.4, -0.2],
'orientation': [0, 0, 0.7071, 0.7071],
},
{
'action_type': 'move',
'duration': 100,
'position': [0, -0.4, 0.4],
'orientation': [0, 0, 0.7071, 0.7071],
},
]
},
##################################### blendid #########################################
"pick_up_box": {
'base_prim': '/World/tea_tower',
'steps':[
{
'action_type': 'close', #open
'duration': 50,
'ratio': 0.0,
},
{
'action_type': 'move',
'duration': 200,
'position': [-0.38, 0.0, 0.15],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
{
'action_type': 'move',
'duration': 100,
'position': [-0.28, 0.0, 0.15],
'orientation': [0.5, 0.5, 0.5, 0.5],
},
# {
# 'action_type': 'move',
# 'duration': 100,
# 'position': [-0.15, 0.0, 0.00],
# 'orientation': [0.5, 0.5, 0.5, 0.5],
# },
# {
# 'action_type': 'close',
# 'duration': 60,
# 'ratio': 0.4,
# },
# {
# 'action_type': 'move',
# 'duration': 100,
# 'position': [-0.15, 0.0, 0.3],
# 'orientation': [0.5, 0.5, 0.5, 0.5],
# },
]
},
} | 9,505 | Python | 29.565916 | 91 | 0.312151 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/coffee_controller.py | import omni.usd
from omni.isaac.core.controllers import BaseController
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.core.prims import XFormPrim
from .kinova import Kinova
from .rmpflow_controller import RMPFlowController
import numpy as np
from .numpy_utils import *
from .utils import regulate_degree, get_transform_mat_from_pos_rot, generate_slerp_action_sequence
import asyncio
from .kinova_socket import KinovaClient
from .coffee_config import kinova_action_config
class CoffeeMakerController(BaseController):
def __init__(self, name: str, kinova: Kinova, connect_server = False) -> None:
BaseController.__init__(self, name=name)
# env
self.stage = omni.usd.get_context().get_stage()
# event
self.event = "move" # action event
self.total_event_count = 0 # event time
self.event_elapsed = 0 # event elapsed time
self.event_pool = [] # event pool
self.robot = kinova
self.gripper = self.robot.gripper
self.cs_controller = RMPFlowController(name="cspace_controller", robot_articulation=self.robot)
# TODO:find height
self.ee_pos_target = np.array([0.0, 0.0, 1.0])
self.ee_ori_target = np.array([1.0, 0.0, 0.0, 0.0])
# connection
self.connect_server = connect_server
if connect_server:
self.client = KinovaClient()
self.sending_message = False
# add go home default action
# self.apply_high_level_action()
# self.sending_message = False
def add_event_to_pool(self, event: str, elapsed: int,
ee_pos: np.ndarray, ee_ori: np.ndarray, gripper_ratio: float = 1.0):
self.event_pool.append([event, elapsed, ee_pos, ee_ori, gripper_ratio])
def update_ee_target(self, pos, ori):
"""
Update End-Effector Target position and orientation
"""
self.ee_pos_target = pos
self.ee_ori_target = ori
def update_event(self, event: str):
"""
Update robot high-level event
"""
if event != self.event:
self.event = event
self.total_event_count = 0
################################## sync robot ##################################
def synchronize_robot(self):
"""
Send message to the Server to
"""
if not self.sending_message:
# get joint positions and gripper degree
all_positions = self.robot.get_joint_positions()
gripper_degree = all_positions[7] / 0.8757
joint_positions = [regulate_degree(e, indegree=False) for e in all_positions[:7]]
joint_positions = joint_positions + [gripper_degree]
assert len(joint_positions) == 8, "Invalid number of joint positions"
# send message
message = " ".join([str(e) for e in joint_positions])
self.sending_message = True
self.client.send_message("Control", message)
self.sending_message = False
def obtain_robot_state(self):
"""
Get robot state from the Server
"""
if not self.sending_message:
self.sending_message = True
answer_message = self.client.send_message("GetJoints", "NA")
self.sending_message = False
return [float(e) for e in answer_message.split(" ")]
def apply_high_level_action(self, action_name: str = "go_home"):
"""
Apply high-level action to the robot
"""
action = kinova_action_config[action_name]
if action['base_prim'] is None:
base_world_pos, base_world_rot = self.robot.get_world_pose()
else:
base_prim = XFormPrim(action['base_prim'])
base_world_pos, base_world_rot = base_prim.get_world_pose()
base_mat = get_transform_mat_from_pos_rot(base_world_pos, base_world_rot)
print("base_mat", base_mat)
for action_step in action['steps']:
step_type = action_step['action_type']
duration = action_step['duration']
if step_type == "move":
offset_mat = get_transform_mat_from_pos_rot(action_step['position'], action_step['orientation'])
print("offset_mat", offset_mat)
target_mat = offset_mat * base_mat
print("target_mat", target_mat.ExtractTranslation(), target_mat.ExtractRotationQuat())
target_pos = target_mat.ExtractTranslation()
target_rot = target_mat.ExtractRotationQuat()
pos_array = np.array([target_pos[0], target_pos[1], target_pos[2]])
rot_array = np.array([target_rot.GetReal(), target_rot.GetImaginary()[0], target_rot.GetImaginary()[1], target_rot.GetImaginary()[2]])
self.add_event_to_pool(step_type, duration, pos_array, rot_array)
elif step_type in ["close", "open"]:
gripper_ratio = action_step['ratio']
self.add_event_to_pool(step_type, duration, None, None, gripper_ratio)
elif step_type == "slerp":
slerp_action_sequence = generate_slerp_action_sequence(
action_step['position'],
action_step['orientation'],
action_step['relative_rotation'],
sub_steps=action_step['sub_steps'],
sub_duration=action_step['duration'] // action_step['sub_steps'],
slerp_last=action_step['slerp_last'],
slerp_offset=action_step['slerp_offset']
)
print("action_sequence", slerp_action_sequence)
for sub_action in slerp_action_sequence:
offset_mat = get_transform_mat_from_pos_rot(sub_action['position'], sub_action['orientation'])
target_mat = offset_mat * base_mat
target_pos = target_mat.ExtractTranslation()
target_rot = target_mat.ExtractRotationQuat()
pos_array = np.array([target_pos[0], target_pos[1], target_pos[2]])
rot_array = np.array([target_rot.GetReal(), target_rot.GetImaginary()[0], target_rot.GetImaginary()[1], target_rot.GetImaginary()[2]])
self.add_event_to_pool(sub_action['action_type'], sub_action['duration'], pos_array, rot_array)
def forward(self):
"""
Main function to update the robot
"""
# update event
if len(self.event_pool) > 0:
if self.event_elapsed <= 0:
event, elapsed, ee_pos, ee_ori, gripper_ratio = self.event_pool.pop(0)
print("event, elapsed, ee_pos, ee_ori ", event, elapsed, ee_pos, ee_ori, gripper_ratio)
self.update_event(event)
if self.event == "move":
self.update_ee_target(ee_pos, ee_ori)
elif self.event == "close":
self.gripper.set_close_ratio(gripper_ratio)
if self.connect_server:
self.synchronize_robot()
self.event_elapsed = elapsed
else:
if self.connect_server:
if self.total_event_count > 200 and self.total_event_count % (60 * 3) == 0:
self.synchronize_robot()
# print("coffee control event", self.event, self.event_elapsed)
if self.event == "move":
actions = self.cs_controller.forward(
target_end_effector_position=self.ee_pos_target,
target_end_effector_orientation=self.ee_ori_target)
elif self.event == "close":
actions = self.gripper.forward(action="close")
elif self.event == "open":
actions = self.gripper.forward(action="open")
self.robot.apply_action(actions)
# from omni.isaac.core.utils.types import ArticulationAction
# joint_actions = ArticulationAction()
# joint_actions.joint_positions = [0, 15, 180, -130, 0, 55, 90] + [0.8] * 6
# for i in range(13):
# joint_actions.joint_positions[i] = np.deg2rad(joint_actions.joint_positions[i])
# print("joint_actions", joint_actions)
# self.robot.apply_action(joint_actions)
self.total_event_count += 1 # update event time
self.event_elapsed -= 1 # update event elapsed time
# synchronize
# if self.connect_server:
# if self.total_event_count % 60 == 0:
# self.synchronize_robot()
# return actions
| 8,724 | Python | 39.393518 | 154 | 0.568317 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/validator.py | # validate real and simulated robot
class Validator():
def __init__(self, robot, robot_client) -> None:
"""
Initialize the validator
"""
self.robot = robot
self.robot_client = robot_client
| 246 | Python | 16.642856 | 52 | 0.552846 |
DigitalBotLab/Robots/robot-exts-control/exts/control/control/kinova/rmpflow/robot_descriptor.yaml | # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# The robot descriptor defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF, except when otherwise specified below under
# cspace_urdf_bridge
cspace:
- Actuator1
- Actuator2
- Actuator3
- Actuator4
- Actuator5
- Actuator6
root_link: Base_Link
default_q: [
0.00, 0.00, 0.00, 0.00, 0.00, 0.00
]
acceleration_limits: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
jerk_limits: [150.0, 150.0, 150.0, 150.0, 150.0, 150.0] | 1,026 | YAML | 31.093749 | 79 | 0.739766 |
DigitalBotLab/Robots/RobotServices/kinova_control.py | import sys
import os
import time
import threading
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.messages import Base_pb2, BaseCyclic_pb2, Common_pb2
# Maximum allowed waiting time during actions (in seconds)
TIMEOUT_DURATION = 20
# Create closure to set an event after an END or an ABORT
def check_for_end_or_abort(e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check
def angular_action_movement(base, joint_angles):
print("Starting angular action movement ...")
action = Base_pb2.Action()
action.name = "Example angular action movement"
action.application_data = ""
actuator_count = base.GetActuatorCount()
# move to specified location
for joint_id in range(actuator_count.count):
joint_angle = action.reach_joint_angles.joint_angles.joint_angles.add()
joint_angle.joint_identifier = joint_id
joint_angle.value = joint_angles[joint_id]
e = threading.Event()
notification_handle = base.OnNotificationActionTopic(
check_for_end_or_abort(e),
Base_pb2.NotificationOptions()
)
print("Executing action")
base.ExecuteAction(action)
print("Waiting for movement to finish ...")
finished = e.wait(TIMEOUT_DURATION)
base.Unsubscribe(notification_handle)
for joint_id in range(actuator_count.count):
joint_angle = action.reach_joint_angles.joint_angles.joint_angles
# print("joint_angle: ", joint_angle)
if finished:
print("Angular movement completed")
else:
print("Timeout on action notification wait")
return finished
def cartesian_action_movement(base, base_cyclic):
print("Starting Cartesian action movement ...")
action = Base_pb2.Action()
action.name = "Example Cartesian action movement"
action.application_data = ""
feedback = base_cyclic.RefreshFeedback()
cartesian_pose = action.reach_pose.target_pose
cartesian_pose.x = feedback.base.tool_pose_x # (meters)
cartesian_pose.y = feedback.base.tool_pose_y - 0.1 # (meters)
cartesian_pose.z = feedback.base.tool_pose_z - 0.2 # (meters)
cartesian_pose.theta_x = feedback.base.tool_pose_theta_x # (degrees)
cartesian_pose.theta_y = feedback.base.tool_pose_theta_y # (degrees)
cartesian_pose.theta_z = feedback.base.tool_pose_theta_z # (degrees)
e = threading.Event()
notification_handle = base.OnNotificationActionTopic(
check_for_end_or_abort(e),
Base_pb2.NotificationOptions()
)
print("Executing action")
base.ExecuteAction(action)
print("Waiting for movement to finish ...")
finished = e.wait(TIMEOUT_DURATION)
base.Unsubscribe(notification_handle)
if finished:
print("Cartesian movement completed")
else:
print("Timeout on action notification wait")
return finished
def GripperCommand(base, target_position):
# Create the GripperCommand we will send
gripper_command = Base_pb2.GripperCommand()
finger = gripper_command.gripper.finger.add()
# Close the gripper with position increments
gripper_command.mode = Base_pb2.GRIPPER_POSITION
finger.finger_identifier = 1
finger.value = target_position
# print("Going to position {:0.2f}...".format(finger.value))
base.SendGripperCommand(gripper_command)
return True
class GripperCommandExample:
def __init__(self, router, proportional_gain = 2.0):
self.proportional_gain = proportional_gain
self.router = router
# Create base client using TCP router
self.base = BaseClient(self.router)
def ExampleSendGripperCommands(self, target_position):
# Create the GripperCommand we will send
gripper_command = Base_pb2.GripperCommand()
finger = gripper_command.gripper.finger.add()
# Close the gripper with position increments
gripper_command.mode = Base_pb2.GRIPPER_POSITION
finger.finger_identifier = 1
finger.value = target_position
# print("Going to position {:0.2f}...".format(finger.value))
self.base.SendGripperCommand(gripper_command)
return True
# # Set speed to open gripper
# print ("Opening gripper using speed command...")
# gripper_command.mode = Base_pb2.GRIPPER_SPEED
# finger.value = 0.1
# self.base.SendGripperCommand(gripper_command)
# gripper_request = Base_pb2.GripperRequest()
# # Wait for reported position to be opened
# gripper_request.mode = Base_pb2.GRIPPER_POSITION
# while True:
# gripper_measure = self.base.GetMeasuredGripperMovement(gripper_request)
# if len (gripper_measure.finger):
# print("Current position is : {0}".format(gripper_measure.finger[0].value))
# if gripper_measure.finger[0].value < 0.01:
# break
# else: # Else, no finger present in answer, end loop
# break
# # Set speed to close gripper
# print ("Closing gripper using speed command...")
# gripper_command.mode = Base_pb2.GRIPPER_SPEED
# finger.value = -0.1
# self.base.SendGripperCommand(gripper_command)
# # Wait for reported speed to be 0
# gripper_request.mode = Base_pb2.GRIPPER_SPEED
# while True:
# gripper_measure = self.base.GetMeasuredGripperMovement(gripper_request)
# if len (gripper_measure.finger):
# print("Current speed is : {0}".format(gripper_measure.finger[0].value))
# if gripper_measure.finger[0].value == 0.0:
# break
# else: # Else, no finger present in answer, end loop
# break
class GripperFeedback:
def __init__(self, base, base_cyclic, proportional_gain = 2.0, force_min = 10, force_max = 30):
"""
GripperFeedback class constructor.
Inputs:
kortex_api.RouterClient router: TCP router
kortex_api.RouterClient router_real_time: Real-time UDP router
float proportional_gain: Proportional gain used in control loop (default value is 2.0)
Outputs:
None
Notes:
- Actuators and gripper initial position are retrieved to set initial positions
- Actuator and gripper cyclic command objects are created in constructor. Their
references are used to update position and speed.
"""
self.proportional_gain = proportional_gain
###########################################################################################
# UDP and TCP sessions are used in this example.
# TCP is used to perform the change of servoing mode
# UDP is used for cyclic commands.
#
# 2 sessions have to be created: 1 for TCP and 1 for UDP
###########################################################################################
# Create base client using TCP router
self.base = base
# Create base cyclic client using UDP router.
self.base_cyclic = base_cyclic
# Create base cyclic command object.
self.base_command = BaseCyclic_pb2.Command()
self.base_command.frame_id = 0
self.base_command.interconnect.command_id.identifier = 0
self.base_command.interconnect.gripper_command.command_id.identifier = 0
# Add motor command to interconnect's cyclic
self.motorcmd = self.base_command.interconnect.gripper_command.motor_cmd.add()
# Set gripper's initial position velocity and force
base_feedback = self.base_cyclic.RefreshFeedback()
self.motorcmd.position = base_feedback.interconnect.gripper_feedback.motor[0].position
self.motorcmd.velocity = 0
self.motorcmd.force = force_min
self.force_min = force_min
self.force_max = force_max
for actuator in base_feedback.actuators:
self.actuator_command = self.base_command.actuators.add()
self.actuator_command.position = actuator.position
self.actuator_command.velocity = 0.0
self.actuator_command.torque_joint = 0.0
self.actuator_command.command_id = 0
print("Position = ", actuator.position)
# Save servoing mode before changing it
self.previous_servoing_mode = self.base.GetServoingMode()
# Set base in low level servoing mode
servoing_mode_info = Base_pb2.ServoingModeInformation()
servoing_mode_info.servoing_mode = Base_pb2.LOW_LEVEL_SERVOING
self.base.SetServoingMode(servoing_mode_info)
def Cleanup(self):
"""
Restore arm's servoing mode to the one that
was effective before running the example.
Inputs:
None
Outputs:
None
Notes:
None
"""
# Restore servoing mode to the one that was in use before running the example
self.base.SetServoingMode(self.previous_servoing_mode)
def grip(self, target_position):
if target_position > 100.0:
target_position = 100.0
if target_position < 0.0:
target_position = 0.0
current_force = self.force_min
self.motorcmd.position = target_position
# self.motorcmd.force = self.force_max
return True
def Goto(self, target_position):
"""
Position gripper to a requested target position using a simple
proportional feedback loop which changes torque according to error
between target position and current gripper position
Inputs:
float target_position: position (0% - 100%) to send gripper to.
Outputs:
Returns True if gripper was positionned successfully, returns False
otherwise.
Notes:
- This function blocks until position is reached.
- If target position exceeds 100.0, its value is changed to 100.0.
- If target position is below 0.0, its value is set to 0.0.
"""
if target_position > 100.0:
target_position = 100.0
if target_position < 0.0:
target_position = 0.0
while True:
try:
base_feedback = self.base_cyclic.Refresh(self.base_command)
# Calculate speed according to position error (target position VS current position)
position_error = target_position - base_feedback.interconnect.gripper_feedback.motor[0].position
print("target pos:", target_position)
# If positional error is small, stop gripper
if abs(position_error) < 1.5:
position_error = 0
self.motorcmd.velocity = 0
self.base_cyclic.Refresh(self.base_command)
return True
else:
self.motorcmd.velocity = self.proportional_gain * abs(position_error)
if self.motorcmd.velocity > 100.0:
self.motorcmd.velocity = 100.0
self.motorcmd.position = target_position
except Exception as e:
print(str(e))
return False
time.sleep(0.001)
return True
| 12,010 | Python | 36.652038 | 112 | 0.618068 |
DigitalBotLab/Robots/RobotServices/kinova_server.py | import socketserver
import utilities
import sys, os
from numpy import interp
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.messages import Base_pb2
from kinova_control import angular_action_movement, GripperFeedback, GripperCommand
# import files
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# initialize connection argument
class KinovaUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def setup(self):
self.joint_target = 0.0
def handle(self):
# obtain message from Isaac Sim
data = self.request[0].strip()
socket = self.request[1]
print("recieving data from omniverse:", data)
command, message = data.split(b':')
if command.startswith(b'Hello'):
response = "Connect with isaac sim"
print("establish connection with isaac sim")
elif command.startswith(b'Control'):
joint_positions = self.process_data(message)
success = "succeed" if self.control_robot(joint_positions) else "failed"
response = f"The action {success}"
elif command.startswith(b'GetJoints'):
joint_angles = self.get_joint_status()
response = " ".join([str(e) for e in joint_angles])
socket.sendto(response.encode('utf-8'), self.client_address)
def process_data(self, data: str):
"""
Process data as Kinova command to control the real robot
data is comprised of 7(body) + 1(gripper) dimensions
"""
joint_positions = [float(e) for e in data.split()]
return joint_positions
def control_robot(self, joint_positions):
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
base = BaseClient(router)
# base_cyclic = BaseCyclicClient(router_real_time)
# gripper = GripperFeedback(base, base_cyclic)
success = True
success &= angular_action_movement(base, joint_positions[:7])
# gripper.Cleanup()
print("go to position", joint_positions[7])
joint_target = min(max(0, joint_positions[7]), 1)
# if joint_target != self.joint_target:
# self.joint_target = joint_target
success &= GripperCommand(base, joint_target)
# gripper.Cleanup()
# gripper_request = Base_pb2.GripperRequest()
# gripper_request.mode = Base_pb2.GRIPPER_POSITION
# gripper_measure = base.GetMeasuredGripperMovement(gripper_request)
# print("gripper position is at", gripper_measure)
return success
def get_joint_status(self):
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
# Create required services
base = BaseClient(router)
joint_angles = base.GetMeasuredJointAngles().joint_angles
# print("Joint angles: ", len(joint_angles), joint_angles[0], joint_angles)
joint_angles = [e.value for e in joint_angles]
gripper_request = Base_pb2.GripperRequest()
gripper_request.mode = Base_pb2.GRIPPER_POSITION
gripper_measure = base.GetMeasuredGripperMovement(gripper_request)
# print("gripper position is at", gripper_measure)
print("joint_angles and gripper position", joint_angles, gripper_measure)
return joint_angles + [gripper_measure]
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
args = utilities.parseConnectionArguments()
with socketserver.UDPServer((HOST, PORT), KinovaUDPHandler) as server:
server.serve_forever()
| 4,417 | Python | 41.07619 | 91 | 0.63233 |
DigitalBotLab/Robots/RobotServices/utilities.py | import argparse
from kortex_api.TCPTransport import TCPTransport
from kortex_api.UDPTransport import UDPTransport
from kortex_api.RouterClient import RouterClient, RouterClientSendOptions
from kortex_api.SessionManager import SessionManager
from kortex_api.autogen.messages import Session_pb2
def parseConnectionArguments(parser = argparse.ArgumentParser()):
parser.add_argument("--ip", type=str, help="IP address of destination", default="192.168.1.10")
parser.add_argument("-u", "--username", type=str, help="username to login", default="admin")
parser.add_argument("-p", "--password", type=str, help="password to login", default="admin")
return parser.parse_args()
class DeviceConnection:
TCP_PORT = 10000
UDP_PORT = 10001
@staticmethod
def createTcpConnection(args):
"""
returns RouterClient required to create services and send requests to device or sub-devices,
"""
return DeviceConnection(args.ip, port=DeviceConnection.TCP_PORT, credentials=(args.username, args.password))
@staticmethod
def createUdpConnection(args):
"""
returns RouterClient that allows to create services and send requests to a device or its sub-devices @ 1khz.
"""
return DeviceConnection(args.ip, port=DeviceConnection.UDP_PORT, credentials=(args.username, args.password))
def __init__(self, ipAddress, port=TCP_PORT, credentials = ("","")):
self.ipAddress = ipAddress
self.port = port
self.credentials = credentials
self.sessionManager = None
# Setup API
self.transport = TCPTransport() if port == DeviceConnection.TCP_PORT else UDPTransport()
self.router = RouterClient(self.transport, RouterClient.basicErrorCallback)
# Called when entering 'with' statement
def __enter__(self):
self.transport.connect(self.ipAddress, self.port)
if (self.credentials[0] != ""):
session_info = Session_pb2.CreateSessionInfo()
session_info.username = self.credentials[0]
session_info.password = self.credentials[1]
session_info.session_inactivity_timeout = 10000 # (milliseconds)
session_info.connection_inactivity_timeout = 2000 # (milliseconds)
self.sessionManager = SessionManager(self.router)
print("Logging as", self.credentials[0], "on device", self.ipAddress)
self.sessionManager.CreateSession(session_info)
return self.router
# Called when exiting 'with' statement
def __exit__(self, exc_type, exc_value, traceback):
if self.sessionManager != None:
router_options = RouterClientSendOptions()
router_options.timeout_ms = 1000
self.sessionManager.CloseSession(router_options)
self.transport.disconnect()
| 2,888 | Python | 36.51948 | 116 | 0.674169 |
DigitalBotLab/Robots/VisionServices/segment_anything/utils.py | import numpy as np
import matplotlib.pyplot as plt
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels==1]
neg_points = coords[labels==0]
ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
def show_box(box, ax):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) | 966 | Python | 42.954544 | 128 | 0.606625 |
DigitalBotLab/Robots/VisionServices/segment_anything/sam_app.py | import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
import os
import gradio as gr
import json
from segment_anything import sam_model_registry, SamPredictor
SEGMENT_ANYTHING_FOLDER = "C:\\Users\\zhaoy\\Downloads"#"I:/Research/semgent-anything"
MODEL_TYPE = "vit_b" #"vit_b"
SAM_CHECKPOINT = os.path.join(SEGMENT_ANYTHING_FOLDER, "sam_vit_b_01ec64.pth") # sam_vit_h_4b8939 # sam_vit_b_01ec64
device = "cuda"
sam = sam_model_registry[MODEL_TYPE](checkpoint=SAM_CHECKPOINT)
sam.to(device=device)
predictor = SamPredictor(sam)
def segment_with_points(
image,
input_point_x,
input_point_y,
shape = "cuboid",
input_label = np.array([1]),
shape_contour_count = 6,
debug_plot = True,
):
predictor.set_image(image)
input_points = np.array([[input_point_x, input_point_y]])
masks, scores, logits = predictor.predict(
point_coords=input_points,
point_labels=input_label,
multimask_output=True,
)
print("mask", masks.shape, "scores", scores.shape, "logits", logits.shape)
# only return the first mask
target_mask = masks[0].astype(np.uint8)
target_contours, _ = cv2.findContours(target_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
target_contour_count = len(target_contours)
for mask in masks:
# get contours
contours, _ = cv2.findContours(mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0 and len(contours[0]) < target_contour_count:
target_mask = mask
target_contours = contours
target_contour_count = len(contours)
if debug_plot:
cv2.drawContours(image, target_contours, -1, (255, 255, 255), 2)
cv2.imshow('target_contours', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
mask = target_mask
contour = max(target_contours, key=cv2.contourArea)
arclen = cv2.arcLength(contour, True)
if shape == "cuboid":
for ratio in [0.01, 0.02, 0.005, 0.01, 0.02, 0.05, 0.1]:
epsilon = ratio * arclen
approx = cv2.approxPolyDP(contour, epsilon, True)
if len(approx) == shape_contour_count:
break
else: # bounding box
x, y, w, h = cv2.boundingRect(contour)
approx = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
print("approx", approx, approx.shape)
if debug_plot:
temp = cv2.drawContours(image, [approx], -1, (255, 0, 0), 1)
temp = cv2.resize(temp, (960, 540))
cv2.imshow('Final Contours', temp)
cv2.waitKey(0)
cv2.destroyAllWindows()
return json.dumps(approx.tolist())
cond_img_e = gr.Image(label="Input image", type='numpy', image_mode = "RGB")
input_point_x = gr.Number(label="input h", value = 0)
input_point_y = gr.Number(label="input w", value = 0)
if __name__ == "__main__":
demo = gr.Interface(
segment_with_points,
inputs=[cond_img_e,
input_point_x,
input_point_y,
"text"
],
outputs="text",
title="Segment Anything",
)
demo.launch(share = False)
| 3,184 | Python | 29.625 | 116 | 0.608668 |
DigitalBotLab/Robots/VisionServices/dino/dino_app.py | import argparse
import numpy as np
import gradio as gr
import torch
# Grounding DINO
from GroundingDINO.groundingdino.models import build_model
from GroundingDINO.groundingdino.util.slconfig import SLConfig
from GroundingDINO.groundingdino.util.utils import clean_state_dict
from GroundingDINO.groundingdino.util.inference import predict
from GroundingDINO.groundingdino.datasets import transforms as T
# segment anything
# from segment_anything import build_sam, SamPredictor
# import cv2
import numpy as np
from huggingface_hub import hf_hub_download
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"
ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"
def load_model_hf(repo_id, filename, ckpt_config_filename, device='cpu'):
cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)
args = SLConfig.fromfile(cache_config_file)
model = build_model(args)
args.device = device
try:
checkpoint = torch.load(cache_file, map_location='cpu')
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
except:
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
checkpoint = torch.load(cache_file, map_location='cpu')
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
# cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
# checkpoint = torch.load(cache_file, map_location='cpu')
# log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
print("Model loaded from {} \n => {}".format(cache_file, log))
_ = model.eval()
return model
def transform_image(image_pil):
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image
def run_gdino(image, text_prompt, box_threshold, text_threshold):
w, h = image.size
print(image.size)
image_pil = image.convert("RGB")
image = transform_image(image_pil)
groundingdino_model = load_model_hf(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename)
boxes, scores, labels = predict(
model=groundingdino_model,
image=image,
caption=text_prompt,
box_threshold=box_threshold,
text_threshold=text_threshold
)
def to_center(x):
x *= np.array([w, h, w, h])
a = x[2] / 2
b = x[3] / 2
return np.array([x[0]-a, x[1]-b, x[2]+x[0], x[3]+x[1]])
if boxes.shape[0] == 0:
return ""
boxes = boxes.cpu().detach().numpy()
pixel_coord = np.apply_along_axis(to_center, 1, boxes)
scores = scores.cpu().detach().numpy()
print(list(pixel_coord), list(scores))
record = []
for box, score, label in zip(list(np.around(pixel_coord).astype("int")), list(scores), labels):
# print("box", box)
# print("score", score)
record.append(str(list(box)) + "_" + "{:.3f}".format(score) + "_" + str(label))
return str(record)
if __name__ == "__main__":
demo = gr.Interface(
run_gdino,
inputs=[gr.Image(source='upload', type="pil"), "text", gr.Slider(0, 1, value=0.3), gr.Slider(0, 1, value=0.25)],
outputs="text",
title="Grounded Dino",
examples=[
],
)
demo.launch(share = True)
| 3,487 | Python | 28.811966 | 117 | 0.641812 |
DigitalBotLab/Robots/VisionServices/owl_vit/owl_app.py | import torch
import cv2
import gradio as gr
import numpy as np
from transformers import OwlViTProcessor, OwlViTForObjectDetection
# Use GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32").to(device)
model.eval()
processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
def query_image(img, text_queries, score_threshold):
text_queries = text_queries
text_queries = text_queries.split(",")
target_sizes = torch.Tensor([img.shape[:2]])
inputs = processor(text=text_queries, images=img, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
outputs.logits = outputs.logits.cpu()
outputs.pred_boxes = outputs.pred_boxes.cpu()
results = processor.post_process(outputs=outputs, target_sizes=target_sizes)
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
# font = cv2.FONT_HERSHEY_SIMPLEX
# for box, score, label in zip(boxes, scores, labels):
# box = [int(i) for i in box.tolist()]
# if score >= score_threshold:
# img = cv2.rectangle(img, pt1 = (box[0], box[1]), pt2 = (box[2], box[3]), color = (255,0,0), thickness = 5)
# if box[3] + 25 > 768:
# y = box[3] - 10
# else:
# y = box[3] + 25
# img = cv2.putText(
# img, text_queries[label], (box[0], y), font, 1, (255,0,0), 2, cv2.LINE_AA
# )
records = []
for box, score, label in zip(boxes, scores, labels):
# print(box, score, label)
if score >= score_threshold:
records.append(str(box.long().tolist()) + "_" + "{:.3f}".format(score.item()) + "_" + str(label.item()))
return str(records)
description = """
Gradio demo for <a href="https://huggingface.co/docs/transformers/main/en/model_doc/owlvit">OWL-ViT</a>,
introduced in <a href="https://arxiv.org/abs/2205.06230">Simple Open-Vocabulary Object Detection
with Vision Transformers</a>.
\n\nYou can use OWL-ViT to query images with text descriptions of any object.
To use it, simply upload an image and enter comma separated text descriptions of objects you want to query the image for. You
can also use the score threshold slider to set a threshold to filter out low probability predictions.
\n\nOWL-ViT is trained on text templates,
hence you can get better predictions by querying the image with text templates used in training the original model: *"photo of a star-spangled banner"*,
*"image of a shoe"*. Refer to the <a href="https://arxiv.org/abs/2103.00020">CLIP</a> paper to see the full list of text templates used to augment the training data.
\n\n<a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb">Colab demo</a>
"""
demo = gr.Interface(
query_image,
inputs=[gr.Image(), "text", gr.Slider(0, 1, value=0.1)],
outputs="text",
title="Zero-Shot Object Detection with OWL-ViT",
description=description,
examples=[
],
)
demo.launch(share = True) | 3,249 | Python | 38.634146 | 165 | 0.659588 |
DigitalBotLab/Robots/VisionServices/owl_vit/README.md | ---
title: OWL-ViT Demo
emoji: 🔥
colorFrom: yellow
colorTo: yellow
sdk: gradio
sdk_version: 3.1.3
app_file: app.py
pinned: false
license: apache-2.0
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
| 251 | Markdown | 16.999999 | 96 | 0.752988 |
DigitalBotLab/Robots/VisionServices/fastsam/fastsam_app.py | from ultralytics import YOLO
import gradio as gr
import torch
from utils.my_tools import fast_process
from utils.tools import format_results, box_prompt, point_prompt, text_prompt
from PIL import ImageDraw
import numpy as np
import os
import json
FASTSAM_FOLDER = "I:/Research/FastSAM/"
# Load the pre-trained model
model = YOLO(os.path.join(FASTSAM_FOLDER,'weights/FastSAM.pt'))
device = torch.device(
"cuda"
if torch.cuda.is_available()
else "mps"
if torch.backends.mps.is_available()
else "cpu"
)
def segment_everything(
input,
text="",
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
withContours=True,
use_retina=True,
mask_random_color=True,
):
input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))
results = model(input,
device=device,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)
if len(text) > 0:
results = format_results(results[0], 0)
annotations, _ = text_prompt(results, text, input, device=device,
clip_model_path=os.path.join(FASTSAM_FOLDER,'weights/CLIP_ViT_B_32.pt')
)
annotations = np.array([annotations])
else:
annotations = results[0].masks.data
contour_str = fast_process(annotations=annotations,
image=input,
device=device,
scale=(1024 // input_size),
better_quality=better_quality,
mask_random_color=mask_random_color,
bbox=None,
use_retina=use_retina,
)
return json.dumps(contour_str.tolist())
cond_img_e = gr.Image(label="Input", type='pil')
cond_img_p = gr.Image(label="Input with points", type='pil')
cond_img_t = gr.Image(label="Input with text", type='pil',
value = os.path.join(FASTSAM_FOLDER,"examples/0.jpg"))
segm_img_e = gr.Image(label="Segmented Image", interactive=False, type='pil')
segm_img_p = gr.Image(label="Segmented Image with points", interactive=False, type='pil')
segm_img_t = gr.Image(label="Segmented Image with text", interactive=False, type='pil')
global_points = []
global_point_label = []
if __name__ == "__main__":
demo = gr.Interface(
segment_everything,
inputs=[cond_img_t,
gr.Textbox(label="text prompt", value="grey tea tower"),
# 1024,
# 0.7,
# 0.25,
# False,
# True,
# True,
],
outputs="text",
title="FastSAM",
)
demo.launch(share = False)
| 3,071 | Python | 28.538461 | 107 | 0.554217 |
DigitalBotLab/Robots/VisionServices/fastsam/README.md | ---
title: FastSAM
emoji: 🐠
colorFrom: pink
colorTo: indigo
sdk: gradio
sdk_version: 3.35.2
app_file: app_gradio.py
pinned: false
license: apache-2.0
---
# Fast Segment Anything
Official PyTorch Implementation of the <a href="https://github.com/CASIA-IVA-Lab/FastSAM">.
The **Fast Segment Anything Model(FastSAM)** is a CNN Segment Anything Model trained by only 2% of the SA-1B dataset published by SAM authors. The FastSAM achieve a comparable performance with
the SAM method at **50× higher run-time speed**.
## License
The model is licensed under the [Apache 2.0 license](LICENSE).
## Acknowledgement
- [Segment Anything](https://segment-anything.com/) provides the SA-1B dataset and the base codes.
- [YOLOv8](https://github.com/ultralytics/ultralytics) provides codes and pre-trained models.
- [YOLACT](https://arxiv.org/abs/2112.10003) provides powerful instance segmentation method.
- [Grounded-Segment-Anything](https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything) provides a useful web demo template.
## Citing FastSAM
If you find this project useful for your research, please consider citing the following BibTeX entry.
```
@misc{zhao2023fast,
title={Fast Segment Anything},
author={Xu Zhao and Wenchao Ding and Yongqi An and Yinglong Du and Tao Yu and Min Li and Ming Tang and Jinqiao Wang},
year={2023},
eprint={2306.12156},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
``` | 1,451 | Markdown | 30.565217 | 192 | 0.740868 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/tools.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
import os
import sys
import clip
def convert_box_xywh_to_xyxy(box):
x1 = box[0]
y1 = box[1]
x2 = box[0] + box[2]
y2 = box[1] + box[3]
return [x1, y1, x2, y2]
def segment_image(image, bbox):
image_array = np.array(image)
segmented_image_array = np.zeros_like(image_array)
x1, y1, x2, y2 = bbox
segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
segmented_image = Image.fromarray(segmented_image_array)
black_image = Image.new("RGB", image.size, (255, 255, 255))
# transparency_mask = np.zeros_like((), dtype=np.uint8)
transparency_mask = np.zeros(
(image_array.shape[0], image_array.shape[1]), dtype=np.uint8
)
transparency_mask[y1:y2, x1:x2] = 255
transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
black_image.paste(segmented_image, mask=transparency_mask_image)
return black_image
def format_results(result, filter=0):
annotations = []
n = len(result.masks.data)
for i in range(n):
annotation = {}
mask = result.masks.data[i] == 1.0
if torch.sum(mask) < filter:
continue
annotation["id"] = i
annotation["segmentation"] = mask.cpu().numpy()
annotation["bbox"] = result.boxes.data[i]
annotation["score"] = result.boxes.conf[i]
annotation["area"] = annotation["segmentation"].sum()
annotations.append(annotation)
return annotations
def filter_masks(annotations): # filter the overlap mask
annotations.sort(key=lambda x: x["area"], reverse=True)
to_remove = set()
for i in range(0, len(annotations)):
a = annotations[i]
for j in range(i + 1, len(annotations)):
b = annotations[j]
if i != j and j not in to_remove:
# check if
if b["area"] < a["area"]:
if (a["segmentation"] & b["segmentation"]).sum() / b[
"segmentation"
].sum() > 0.8:
to_remove.add(j)
return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove
def get_bbox_from_mask(mask):
mask = mask.astype(np.uint8)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
x1, y1, w, h = cv2.boundingRect(contours[0])
x2, y2 = x1 + w, y1 + h
if len(contours) > 1:
for b in contours:
x_t, y_t, w_t, h_t = cv2.boundingRect(b)
# 将多个bbox合并成一个
x1 = min(x1, x_t)
y1 = min(y1, y_t)
x2 = max(x2, x_t + w_t)
y2 = max(y2, y_t + h_t)
h = y2 - y1
w = x2 - x1
return [x1, y1, x2, y2]
def fast_process(
annotations, args, mask_random_color, bbox=None, points=None, edges=False
):
if isinstance(annotations[0], dict):
annotations = [annotation["segmentation"] for annotation in annotations]
result_name = os.path.basename(args.img_path)
image = cv2.imread(args.img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_h = image.shape[0]
original_w = image.shape[1]
if sys.platform == "darwin":
plt.switch_backend("TkAgg")
plt.figure(figsize=(original_w/100, original_h/100))
# Add subplot with no margin.
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(image)
if args.better_quality == True:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(
mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
)
annotations[i] = cv2.morphologyEx(
mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
)
if args.device == "cpu":
annotations = np.array(annotations)
fast_show_mask(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
points=points,
point_label=args.point_label,
retinamask=args.retina,
target_height=original_h,
target_width=original_w,
)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
fast_show_mask_gpu(
annotations,
plt.gca(),
random_color=args.randomcolor,
bbox=bbox,
points=points,
point_label=args.point_label,
retinamask=args.retina,
target_height=original_h,
target_width=original_w,
)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
if args.withContours == True:
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask["segmentation"]
annotation = mask.astype(np.uint8)
if args.retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, hierarchy = cv2.findContours(
annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
for contour in contours:
contour_all.append(contour)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
plt.imshow(contour_mask)
save_path = args.output
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.axis("off")
fig = plt.gcf()
plt.draw()
try:
buf = fig.canvas.tostring_rgb()
except AttributeError:
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
cols, rows = fig.canvas.get_width_height()
img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3)
cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
# CPU post process
def fast_show_mask(
annotation,
ax,
random_color=False,
bbox=None,
points=None,
point_label=None,
retinamask=True,
target_height=960,
target_width=960,
):
msak_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
# 将annotation 按照面积 排序
areas = np.sum(annotation, axis=(1, 2))
sorted_indices = np.argsort(areas)
annotation = annotation[sorted_indices]
index = (annotation != 0).argmax(axis=0)
if random_color == True:
color = np.random.random((msak_sum, 1, 1, 3))
else:
color = np.ones((msak_sum, 1, 1, 3)) * np.array(
[30 / 255, 144 / 255, 255 / 255]
)
transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6
visual = np.concatenate([color, transparency], axis=-1)
mask_image = np.expand_dims(annotation, -1) * visual
show = np.zeros((height, weight, 4))
h_indices, w_indices = np.meshgrid(
np.arange(height), np.arange(weight), indexing="ij"
)
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
show[h_indices, w_indices, :] = mask_image[indices]
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
# draw point
if points is not None:
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
s=20,
c="y",
)
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
s=20,
c="m",
)
if retinamask == False:
show = cv2.resize(
show, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
ax.imshow(show)
def fast_show_mask_gpu(
annotation,
ax,
random_color=False,
bbox=None,
points=None,
point_label=None,
retinamask=True,
target_height=960,
target_width=960,
):
msak_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
areas = torch.sum(annotation, dim=(1, 2))
sorted_indices = torch.argsort(areas, descending=False)
annotation = annotation[sorted_indices]
# 找每个位置第一个非零值下标
index = (annotation != 0).to(torch.long).argmax(dim=0)
if random_color == True:
color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device)
else:
color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor(
[30 / 255, 144 / 255, 255 / 255]
).to(annotation.device)
transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6
visual = torch.cat([color, transparency], dim=-1)
mask_image = torch.unsqueeze(annotation, -1) * visual
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
show = torch.zeros((height, weight, 4)).to(annotation.device)
h_indices, w_indices = torch.meshgrid(
torch.arange(height), torch.arange(weight), indexing="ij"
)
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
show[h_indices, w_indices, :] = mask_image[indices]
show_cpu = show.cpu().numpy()
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
# draw point
if points is not None:
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
s=20,
c="y",
)
plt.scatter(
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
s=20,
c="m",
)
if retinamask == False:
show_cpu = cv2.resize(
show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
ax.imshow(show_cpu)
# clip
@torch.no_grad()
def retriev(
model, preprocess, elements, search_text: str, device
) -> int:
preprocessed_images = [preprocess(image).to(device) for image in elements]
tokenized_text = clip.tokenize([search_text]).to(device)
stacked_images = torch.stack(preprocessed_images)
image_features = model.encode_image(stacked_images)
text_features = model.encode_text(tokenized_text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
probs = 100.0 * image_features @ text_features.T
return probs[:, 0].softmax(dim=0)
def crop_image(annotations, image_like):
if isinstance(image_like, str):
image = Image.open(image_like)
else:
image = image_like
ori_w, ori_h = image.size
mask_h, mask_w = annotations[0]["segmentation"].shape
if ori_w != mask_w or ori_h != mask_h:
image = image.resize((mask_w, mask_h))
cropped_boxes = []
cropped_images = []
not_crop = []
filter_id = []
# annotations, _ = filter_masks(annotations)
# filter_id = list(_)
for _, mask in enumerate(annotations):
if np.sum(mask["segmentation"]) <= 100:
filter_id.append(_)
continue
bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox
cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片
# cropped_boxes.append(segment_image(image,mask["segmentation"]))
cropped_images.append(bbox) # 保存裁剪的图片的bbox
return cropped_boxes, cropped_images, not_crop, filter_id, annotations
def box_prompt(masks, bbox, target_height, target_width):
h = masks.shape[1]
w = masks.shape[2]
if h != target_height or w != target_width:
bbox = [
int(bbox[0] * w / target_width),
int(bbox[1] * h / target_height),
int(bbox[2] * w / target_width),
int(bbox[3] * h / target_height),
]
bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0
bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0
bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w
bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h
# IoUs = torch.zeros(len(masks), dtype=torch.float32)
bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
orig_masks_area = torch.sum(masks, dim=(1, 2))
union = bbox_area + orig_masks_area - masks_area
IoUs = masks_area / union
max_iou_index = torch.argmax(IoUs)
return masks[max_iou_index].cpu().numpy(), max_iou_index
def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理
h = masks[0]["segmentation"].shape[0]
w = masks[0]["segmentation"].shape[1]
if h != target_height or w != target_width:
points = [
[int(point[0] * w / target_width), int(point[1] * h / target_height)]
for point in points
]
onemask = np.zeros((h, w))
masks = sorted(masks, key=lambda x: x['area'], reverse=True)
for i, annotation in enumerate(masks):
if type(annotation) == dict:
mask = annotation['segmentation']
else:
mask = annotation
for i, point in enumerate(points):
if mask[point[1], point[0]] == 1 and point_label[i] == 1:
onemask[mask] = 1
if mask[point[1], point[0]] == 1 and point_label[i] == 0:
onemask[mask] = 0
onemask = onemask >= 1
return onemask, 0
def text_prompt(annotations, text, img_path, device, clip_model_path = "./weights/CLIP_ViT_B_32.pt"):
cropped_boxes, cropped_images, not_crop, filter_id, annotations_ = crop_image(
annotations, img_path
)
clip_model, preprocess = clip.load(clip_model_path, device=device)
scores = retriev(
clip_model, preprocess, cropped_boxes, text, device=device
)
max_idx = scores.argsort()
max_idx = max_idx[-1]
max_idx += sum(np.array(filter_id) <= int(max_idx))
return annotations_[max_idx]["segmentation"], max_idx
| 15,038 | Python | 33.974419 | 102 | 0.579864 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/my_tools.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
def fast_process(
annotations,
image,
device,
scale,
better_quality=False,
mask_random_color=True,
bbox=None,
use_retina=True,
# withContours=True, # must true
):
if isinstance(annotations[0], dict):
annotations = [annotation['segmentation'] for annotation in annotations]
original_h = image.height
original_w = image.width
if better_quality:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
if device == 'cpu':
annotations = np.array(annotations)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask['segmentation']
annotation = mask.astype(np.uint8)
if use_retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
arclen = cv2.arcLength(contour, True)
# WARNING: 0.005 is a magic number
approx = cv2.approxPolyDP(contour, arclen*0.005, True)
print("approx!!", approx.shape)
contour_all.append(approx)
print("contour_all!!!", contour_all)
return np.array(contour_all)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
image = image.convert('RGBA')
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_contour, (0, 0), overlay_contour)
return image
| 2,438 | Python | 31.959459 | 111 | 0.608696 |
DigitalBotLab/Robots/VisionServices/fastsam/utils/tools_gradio.py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import torch
def fast_process(
annotations,
image,
device,
scale,
better_quality=False,
mask_random_color=True,
bbox=None,
use_retina=True,
withContours=True,
):
if isinstance(annotations[0], dict):
annotations = [annotation['segmentation'] for annotation in annotations]
original_h = image.height
original_w = image.width
if better_quality:
if isinstance(annotations[0], torch.Tensor):
annotations = np.array(annotations.cpu())
for i, mask in enumerate(annotations):
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
if device == 'cpu':
annotations = np.array(annotations)
inner_mask = fast_show_mask(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
retinamask=use_retina,
target_height=original_h,
target_width=original_w,
)
else:
if isinstance(annotations[0], np.ndarray):
annotations = torch.from_numpy(annotations)
inner_mask = fast_show_mask_gpu(
annotations,
plt.gca(),
random_color=mask_random_color,
bbox=bbox,
retinamask=use_retina,
target_height=original_h,
target_width=original_w,
)
if isinstance(annotations, torch.Tensor):
annotations = annotations.cpu().numpy()
if withContours:
contour_all = []
temp = np.zeros((original_h, original_w, 1))
for i, mask in enumerate(annotations):
if type(mask) == dict:
mask = mask['segmentation']
annotation = mask.astype(np.uint8)
if use_retina == False:
annotation = cv2.resize(
annotation,
(original_w, original_h),
interpolation=cv2.INTER_NEAREST,
)
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
arclen = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, arclen*0.005, True)
contour_all.append(approx)
print("contour_all", contour_all)
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
contour_mask = temp / 255 * color.reshape(1, 1, -1)
image = image.convert('RGBA')
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_inner, (0, 0), overlay_inner)
if withContours:
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
image.paste(overlay_contour, (0, 0), overlay_contour)
return image
# CPU post process
def fast_show_mask(
annotation,
ax,
random_color=False,
bbox=None,
retinamask=True,
target_height=960,
target_width=960,
):
mask_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
# 将annotation 按照面积 排序
areas = np.sum(annotation, axis=(1, 2))
sorted_indices = np.argsort(areas)[::1]
annotation = annotation[sorted_indices]
index = (annotation != 0).argmax(axis=0)
if random_color == True:
color = np.random.random((mask_sum, 1, 1, 3))
else:
color = np.ones((mask_sum, 1, 1, 3)) * np.array([30 / 255, 144 / 255, 255 / 255])
transparency = np.ones((mask_sum, 1, 1, 1)) * 0.6
visual = np.concatenate([color, transparency], axis=-1)
mask_image = np.expand_dims(annotation, -1) * visual
mask = np.zeros((height, weight, 4))
h_indices, w_indices = np.meshgrid(np.arange(height), np.arange(weight), indexing='ij')
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
mask[h_indices, w_indices, :] = mask_image[indices]
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1))
if retinamask == False:
mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
return mask
def fast_show_mask_gpu(
annotation,
ax,
random_color=False,
bbox=None,
retinamask=True,
target_height=960,
target_width=960,
):
device = annotation.device
mask_sum = annotation.shape[0]
height = annotation.shape[1]
weight = annotation.shape[2]
areas = torch.sum(annotation, dim=(1, 2))
sorted_indices = torch.argsort(areas, descending=False)
annotation = annotation[sorted_indices]
# 找每个位置第一个非零值下标
index = (annotation != 0).to(torch.long).argmax(dim=0)
if random_color == True:
color = torch.rand((mask_sum, 1, 1, 3)).to(device)
else:
color = torch.ones((mask_sum, 1, 1, 3)).to(device) * torch.tensor(
[30 / 255, 144 / 255, 255 / 255]
).to(device)
transparency = torch.ones((mask_sum, 1, 1, 1)).to(device) * 0.6
visual = torch.cat([color, transparency], dim=-1)
mask_image = torch.unsqueeze(annotation, -1) * visual
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
mask = torch.zeros((height, weight, 4)).to(device)
h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight))
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
# 使用向量化索引更新show的值
mask[h_indices, w_indices, :] = mask_image[indices]
mask_cpu = mask.cpu().numpy()
if bbox is not None:
x1, y1, x2, y2 = bbox
ax.add_patch(
plt.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
)
)
if retinamask == False:
mask_cpu = cv2.resize(
mask_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
)
return mask_cpu
| 6,221 | Python | 33.566666 | 111 | 0.596046 |
DigitalBotLab/InsideroboConnector/README.md | # <img src="Images/logo.png" alt="Logo" width="50" height="50"> Digital Bot Lab: InsideroboConnector


# Overview: Bridging the Gap Between Digital Robots and Omniverse

The Digital Bot Lab's Insiderobo Connector is a cutting-edge solution designed to seamlessly connect our extensive digital robot collection with the powerful NVIDIA Omniverse platform. With our connector, users can effortlessly import digital robots in .usd format, enabling them to leverage the full potential of Omniverse applications.
## 1. Get Started
Experience the future of robotics with the Digital Bot Lab's Insiderobo Connector, where the connection between digital robots and Omniverse becomes effortless and transformative.

### 1.1 Install Omniverse USD Composer
This project is currently targeted for `Omniverse USD Composer`. Please follow the instructions to install it first:
[USD Composer Overview](https://docs.omniverse.nvidia.com/composer/latest/index.html#:~:text=NVIDIA%20Omniverse%E2%84%A2%20USD%20Composer,is%20based%20on%20Pixar's%20USD.)
### 1.2 Import the extension
To install the extension to Omniverse USD Composer:
First, clone the respository
```bash
git clone https://github.com/DigitalBotLab/InsideroboConnector
```
And now Open the `Omniverse USD Composer`, go to `Menu Bar` -> `Windows` -> `Extensions` -> `+` ->
Add your `<path_to_this_repository>/AssetProvider/dbl-exts-asset/exts`
## 2. Format: USD
Our digital robots are meticulously crafted and well-configured in .usd format, complete with physics, rigid bodies, and joints. This ensures a realistic and immersive experience when interacting with the robots within Omniverse.
## 3. ROS <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="Ros" width="70" height="70">
The Insiderobo Connector is built upon the foundation of the Robot Operating System (ROS), an open-source framework that empowers researchers and developers to easily build and reuse code across various robotics applications. This integration allows for enhanced collaboration, accelerated development, and seamless integration of digital robots into the Omniverse ecosystem.
## 4. License
Our project adheres to the Robot Operating System (ROS) framework, which enables us to develop and integrate robotic systems efficiently. We are proud to announce that our project is released under the BSD 3.0 license. This license ensures that our software is open-source, allowing users to freely use, modify, and distribute it while maintaining the necessary attribution and disclaimer requirements. By embracing ROS and the BSD 3.0 license, we aim to foster collaboration and innovation within the robotics community.
| 2,893 | Markdown | 52.592592 | 521 | 0.790529 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/config/extension.toml | [package]
authors = ["NVIDIA"]
category = "services"
changelog = "docs/CHANGELOG.md"
description = "Digital Bot Lab Asset Provider"
icon = "data/digitalbotlab.png"
keywords = ["asset", "provider", "sketchfab"]
preview_image = "data/preview.png"
readme = "docs/README.md"
repository = ""
title = "Asset Provider for Digital Bot Lab"
version = "1.0.10"
[dependencies]
"omni.services.browser.asset" = {}
[[python.module]]
name = "omni.asset_provider.sketchfab"
[settings]
exts."omni.asset_provider.digitalbotlab".enable = true
exts."omni.asset_provider.digitalbotlab".providerId = "Digital_Bot_Lab"
exts."omni.asset_provider.digitalbotlab".keepOriginalPageSize = true
exts."omni.asset_provider.digitalbotlab".maxCountPerPage = 24
exts."omni.asset_provider.digitalbotlab".minThumbnailSize = 256
exts."omni.asset_provider.digitalbotlab".searchUrl = "https://api.sketchfab.com/v3/search"
exts."omni.asset_provider.digitalbotlab".modelsUrl = "https://api.sketchfab.com/v3/models"
exts."omni.asset_provider.digitalbotlab".authorizeUrl = "https://sketchfab.com/oauth2/authorize/"
exts."omni.asset_provider.digitalbotlab".accessTokenUrl = "https://sketchfab.com/oauth2/token/"
exts."omni.asset_provider.digitalbotlab".clientId = "eQcrihd32CeYmF9evsYEGXZr8vynHA82DW7SzJw2"
exts."omni.asset_provider.digitalbotlab".clientSecret = ""
[[test]]
dependencies = ["omni.services.client", "omni.client"]
| 1,389 | TOML | 38.714285 | 97 | 0.768179 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/config/extension.gen.toml |
[package]
archivePath = "http://d4i3qtqj3r0z5.cloudfront.net/omni.asset_provider.digitalbotlab-1.0.10.zip"
repository = "https://gitlab-master.nvidia.com/omniverse/kit-extensions/kit-browsers"
[package.publish]
date = 1669118155
kitVersion = "104.0+release.95826.4b36ab32.tc"
buildNumber = "101.1.0+master.1103.15b266ed.tc"
repoName = "kit-browsers"
[package.authors]
0 = "Yizhou Zhao <[email protected]>"
| 472 | TOML | 32.785712 | 100 | 0.677966 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/constants.py | SETTING_ROOT = "/exts/omni.asset_provider.digitalbotlab/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
| 105 | Python | 34.333322 | 57 | 0.761905 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/extension.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
import carb.settings
import omni.ui as ui
from omni.services.browser.asset import get_instance as get_asset_services
from .sketchfab import SketchFabAssetProvider
from .constants import SETTING_STORE_ENABLE
import asyncio
import aiohttp
class DigitalBotLabAssetProviderExtension(omni.ext.IExt):
""" Sketchfab Asset Provider extension.
"""
def on_startup(self, ext_id):
self._asset_provider = SketchFabAssetProvider()
self._asset_service = get_asset_services()
self._asset_service.register_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, True)
self._window = ui.Window("DBL Debug", width=300, height=300)
with self._window.frame:
with ui.VStack():
#ui.Label("Prim Path:", width = 100)
ui.Button("Debug", height = 20, clicked_fn = self.debug)
ui.Button("Debug", height = 20, clicked_fn = self.debug_token)
def on_shutdown(self):
self._asset_service.unregister_store(self._asset_provider)
carb.settings.get_settings().set(SETTING_STORE_ENABLE, False)
self._asset_provider = None
self._asset_service = None
def debug(self):
async def authenticate():
params = {"email": "[email protected]", "password": "97654321abc"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/auth/signin", json=params) as response:
self._auth_params = await response.json()
print("auth_params", self._auth_params)
self.token = self._auth_params["token"]
asyncio.ensure_future(authenticate())
def debug_token(self):
async def verify_token():
params = {"token": self.token, "asset": "test"}
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8000/api/omniverse/download", json=params) as response:
response = await response.json()
print("response", response)
asyncio.ensure_future(verify_token()) | 2,660 | Python | 41.238095 | 113 | 0.644737 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/sketchfab.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""SketchFab asset store implementation."""
from typing import Dict, List, Optional, Union, Tuple, Callable
import carb
import carb.settings
import os
import asyncio
import omni.client
from omni.services.browser.asset import BaseAssetStore, AssetModel, SearchCriteria, ProviderModel
from .constants import SETTING_ROOT, SETTING_STORE_ENABLE
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data")
class SketchFabAssetProvider(BaseAssetStore):
"""
SketchFab asset provider implementation.
For documentation on the search API, see the online interactive API at:
https://docs.sketchfab.com/data-api/v3/index.html#!/search/get_v3_search_type_models
.. note:
SketchFab does not return search results in no search query has been provided. In other words, navigating through
the pre-defined categories will not display any results from SketchFab, as no search terms have been submitted in
that context.
"""
def __init__(self) -> None:
"""
Constructor.
Returns:
None
"""
print("DigitalBotLabAssetProvider.__init__")
settings = carb.settings.get_settings()
self._provider_id = settings.get_as_string(SETTING_ROOT + "providerId")
super().__init__(store_id=self._provider_id)
self._keep_page_size = settings.get_as_bool(SETTING_ROOT + "keepOriginalPageSize")
self._max_count_per_page = settings.get_as_int(SETTING_ROOT + "maxCountPerPage")
self._min_thumbnail_size = settings.get_as_int(SETTING_ROOT + "minThumbnailSize")
self._search_url = settings.get_as_string(SETTING_ROOT + "searchUrl")
self._models_url = settings.get_as_string(SETTING_ROOT + "modelsUrl")
self._authorize_url = settings.get_as_string(SETTING_ROOT + "authorizeUrl")
self._access_token_url = settings.get_as_string(SETTING_ROOT + "accessTokenUrl")
self._client_id = settings.get_as_string(SETTING_ROOT + "clientId")
self._client_secret = settings.get_as_string(SETTING_ROOT + "clientSecret")
self._auth_params = None
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(
name=self._store_id, icon=f"{DATA_PATH}/logo.png", enable_setting=SETTING_STORE_ENABLE
)
def authorized(self) -> bool:
if self._auth_params:
return self._auth_params.get("access_token", None)
return False
async def authenticate(self, username: str, password: str):
params = {"grant_type": "password", "client_id": self._client_id, "username": username, "password": password}
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.post(self._access_token_url, params=params) as response:
self._auth_params = await response.json()
def get_access_token(self) -> str:
if self._auth_params:
return self._auth_params.get("access_token", None)
return None
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
assets: List[AssetModel] = []
if self._keep_page_size:
required_count = (
search_criteria.page.size
if search_criteria.page.size < self._max_count_per_page
else self._max_count_per_page
)
else:
required_count = search_criteria.page.size
params = {
"type": "models",
"q": "",
"downloadable": "true",
"cursor": (search_criteria.page.number - 1) * required_count,
"sort_by": "-likeCount"
}
if search_criteria.keywords:
params["q"] = " ".join(search_criteria.keywords)
if search_criteria.filter.categories:
category = self._pick_category(categories=search_criteria.filter.categories)
if category:
if params["q"] == "":
params["q"] = category.lower()
else:
params["q"] += f" {category.lower()}"
# if search_criteria.sort and len(search_criteria.sort) >= 2:
# sort_field, sort_order = search_criteria.sort
# # Add other properties if SketchFab supports more sorting options in the future.
# if sort_field in ["published_at"]:
# params["sort_by"] = sort_field
# if sort_order.lower() == "desc":
# params["sort_by"] = f"-likeCount"
# The SketchFab API limits the number of search results per page to at most 24
to_continue = True
while required_count > 0:
params["count"] = min(self._max_count_per_page, required_count)
(page_assets, to_continue) = await self._search_one_page(params)
if page_assets:
params["cursor"] += params["count"]
required_count -= params["count"]
assets.extend(page_assets)
if not to_continue:
break
else:
break
return (assets, to_continue)
async def _search_one_page(self, params: Dict) -> Tuple[List[AssetModel], bool]:
"""
Search one page. Max 24 assets.
Args:
params (Dict): Search parameters.
Returns:
List[AssetModel]: List of searched assets.
bool: True means more results to be searched. False means end of search.
"""
to_continue = True
items = []
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(self._search_url, params=params) as response:
results = await response.json()
cursors = results.get("cursors", {})
# If no more resutls
to_continue = cursors["next"] is not None
items = results.get("results", [])
assets: List[AssetModel] = []
for item in items:
item_categories = [x.get("name", "") for x in item.get("categories", [])]
item_tags = [x.get("name", "") for x in item.get("tags", [])]
item_thumbnails = [x for x in item.get("thumbnails", {}).get("images", [])]
item_thumbnail = self._pick_most_appropriate_thumbnail(item_thumbnails)
if item.get("isDownloadable"):
download_url = f"{self._models_url}/{item.get('uid')}/download"
else:
download_url = ""
if item_thumbnail is not None:
assets.append(
AssetModel(
identifier=item.get("uid"),
name=item.get("name"),
version="",
published_at=item.get("publishedAt"),
categories=item_categories,
tags=item_tags,
vendor=self._provider_id,
download_url=download_url,
product_url=item.get("viewerUrl", ""),
# price=0.0, # SketchFab does not display price for assets when using the search API.
thumbnail=item_thumbnail,
)
)
return (assets, to_continue)
def _sanitize_categories(self, categories: List[str]) -> List[str]:
"""
Sanitize the given list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories to sanitize.
Returns:
List[str]: Sanitized category names from the given list of categories.
"""
sanitized_categories: List[str] = []
for category in categories:
if category.startswith("/"):
category = category[1:]
category_keywords = category.split("/")
sanitized_categories.extend(category_keywords)
return sanitized_categories
def _pick_category(self, categories: List[str]) -> Optional[str]:
"""
Pick the most appropriate category from the list of ``SearchCriteria`` categories.
Args:
categories (List[str]): List of ``SearchCriteria`` categories from which to pick the most appropriate
category for a search.
Returns:
Optional[str]: The most appropriate category from the given list of ``SearchCriteria`` categories, or
``None`` if no category could be identified.
"""
sanitized_categories = self._sanitize_categories(categories=categories)
if sanitized_categories:
return sanitized_categories[-1]
return None
def _pick_most_appropriate_thumbnail(self, thumbnails: List[Dict[str, Union[str, int]]]) -> Optional[str]:
"""
Pick the most appropriate thumbnail URL from the list of provided image metadata abot the asset.
Args:
thumbnails (): List of image metadata about the asset.
Returns:
Optional[str]: The URL of the image thumbnail to use for the asset, or ``None`` if no suitable thumbnail was
found.
"""
high_res_thumbnails: List[Dict[str, Union[str, int]]] = []
low_res_thumbnails: List[Dict[str, Union[str, int]]] = []
# Sort the thumbnails in 2 buckets (whether higher resolution than desired, or lower than desired):
for thumbnail in thumbnails:
thumbnail_width: Optional[int] = thumbnail.get("width")
thumbnail_height: Optional[int] = thumbnail.get("height")
if thumbnail_width is not None and thumbnail_height is not None:
if thumbnail_width >= self._min_thumbnail_size and thumbnail_height >= self._min_thumbnail_size:
high_res_thumbnails.append(thumbnail)
else:
low_res_thumbnails.append(thumbnail)
# Pick the most appropriate thumbnail within the list of high-res candidates:
if high_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = high_res_thumbnails[0]
for thumbnail in high_res_thumbnails:
if thumbnail.get("width") < candidate_thumbnail.get("width") and thumbnail.get(
"height"
) < candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
# Pick the largest thumbnail within the list of low-res candidates:
if low_res_thumbnails:
candidate_thumbnail: Dict[str, Union[str, int]] = low_res_thumbnails[0]
for thumbnail in low_res_thumbnails:
if thumbnail.get("width") > candidate_thumbnail.get("width") and thumbnail.get(
"height"
) > candidate_thumbnail.get("height"):
candidate_thumbnail = thumbnail
return candidate_thumbnail.get("url")
return None
async def _download(self, asset: AssetModel, dest_url: str, on_progress_fn: Callable[[float], None] = None) -> Dict:
""" Downloads an asset from the asset store.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `download` function that will wrap this function in a timeout.
"""
ret_value = {"url": None}
if not (asset and asset.download_url):
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
return ret_value
import aiohttp
async with aiohttp.ClientSession() as session:
headers = {"Authorization": "Bearer %s" % self.get_access_token()}
async with session.get(asset.download_url, headers=headers) as response:
results = await response.json()
# Parse downloaded response; see https://sketchfab.com/developers/download-api/downloading-models
if "usdz" in results:
download_url = results["usdz"].get("url")
else:
ret_value["status"] = omni.client.Result.ERROR_NOT_FOUND
carb.log_error(f"[{asset.name}] Invalid download url: {asset.download_url}!")
carb.log_info(f"addtional result: {results}")
return ret_value
content = bytearray()
# Download content from the given url
downloaded = 0
async with session.get(download_url) as response:
size = int(response.headers.get("content-length", 0))
if size > 0:
async for chunk in response.content.iter_chunked(1024 * 512):
content.extend(chunk)
downloaded += len(chunk)
if on_progress_fn:
on_progress_fn(float(downloaded) / size)
else:
if on_progress_fn:
on_progress_fn(0)
content = await response.read()
if on_progress_fn:
on_progress_fn(1)
if response.ok:
# Write to destination
filename = os.path.basename(download_url.split("?")[0])
dest_url = f"{dest_url}/{filename}"
(result, list_entry) = await omni.client.stat_async(dest_url)
if result == omni.client.Result.OK:
# If dest file already exists, use asset identifier in filename to different
dest_url = dest_url[:-5] + "_" + str(asset.identifier) + ".usdz"
ret_value["status"] = await omni.client.write_file_async(dest_url, content)
ret_value["url"] = dest_url
else:
carb.log_error(f"[{asset.name}] access denied: {download_url}")
ret_value["status"] = omni.client.Result.ERROR_ACCESS_DENIED
return ret_value
def destroy(self):
self._auth_params = None
| 14,630 | Python | 41.906158 | 120 | 0.582707 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/omni/asset_provider/sketchfab/tests/test_sketchfab.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from typing import Any
import omni.kit.test
import aiohttp
from unittest.mock import patch
from typing import Dict
from omni.services.browser.asset import SearchCriteria, AssetModel
from ..sketchfab import SketchFabAssetProvider
class MockHeader:
def __init__(self):
pass
def get(self, attr: str, default: Any):
return default
class MockResponse:
def __init__(self, json: Dict = {}, data: str = ""):
self._json = json
self._data = data
self.headers = MockHeader()
@property
def ok(self):
return True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
async def json(self) -> Dict:
return self._json
async def read(self) -> str:
return self._data
class SketchfabTestCase(omni.kit.test.AsyncTestCaseFailOnLogError):
VALID_USERNAME = "username"
VALID_PASSWORD = "password"
VALID_ACCESS_TOKEN = "access_token"
DOWNLOADED_CONTENT = "abc def"
def _mock_aiohttp_post_impl(self, url: str, params: Dict = None):
import carb.settings
from ..constants import SETTING_ROOT
settings = carb.settings.get_settings()
if url == settings.get_as_string(SETTING_ROOT + "accessTokenUrl"):
# Auth endpoint
if params["username"] == self.VALID_USERNAME and params["password"] == self.VALID_PASSWORD:
return MockResponse(json={"access_token": self.VALID_ACCESS_TOKEN})
else:
return MockResponse(json={"error": "invalid_grant", "error_description": "Invalid credentials given."})
return MockResponse(json={})
def _mock_aiohttp_get_impl(self, url: str, headers: Dict = None):
if headers is not None:
self.assertTrue(self.VALID_ACCESS_TOKEN in headers["Authorization"])
if url.endswith("download"):
return MockResponse(json={"usdz": {"url": url.split("?")[0]}})
else:
return MockResponse(data=self.DOWNLOADED_CONTENT)
async def _mock_write_file_impl(self, url: str, buffer):
return omni.client.Result.OK
# NOTE: this test is disabled by default to avoid reaching out to Turbosquid continiously during our tests.
async def notest_search_no_criteria(self):
"""Test listing first page assets."""
store = SketchFabAssetProvider()
RESULTS_COUNT = 50
(result, *_) = await store.search(search_criteria=SearchCriteria(), search_timeout=60)
self.assertEqual(len(result), RESULTS_COUNT)
async def test_authentication_succeeds(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = self.VALID_PASSWORD
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
self.assertTrue(under_test.authorized())
async def test_authentication_fails(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = "invalid_password"
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
self.assertFalse(under_test.authorized())
async def test_download_succeeds(self):
"""Test listing first page assets."""
under_test = SketchFabAssetProvider()
username = self.VALID_USERNAME
password = self.VALID_PASSWORD
with patch.object(aiohttp.ClientSession, "post", side_effect=self._mock_aiohttp_post_impl):
await under_test.authenticate(username, password)
with patch.object(aiohttp.ClientSession, "get", side_effect=self._mock_aiohttp_get_impl):
with patch("omni.client.write_file_async", side_effect=self._mock_write_file_impl) as mock_write_file:
asset = AssetModel(
identifier="1c54053d-49dd-4e18-ba46-abbe49a905b0",
name="car-suv-1",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/suv"],
tags=["vehicle", "cars", "suv"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-suv-1.usdz?download",
product_url="https://acme.org/products/purchase/car-suv-1",
price=10.99,
thumbnail="https://images.com/thumbnails/256x256/car-suv-1.png",
)
dest_url = "C:/Users/user/Downloads"
results = await under_test.download(asset, dest_url)
expected_filename = os.path.basename(asset.download_url.split("?")[0])
expected_url = f"{dest_url}/{expected_filename}"
mock_write_file.assert_called_once_with(expected_url, self.DOWNLOADED_CONTENT)
self.assertEqual(results["status"], omni.client.Result.OK)
self.assertEqual(results["url"], expected_url)
| 5,692 | Python | 38.811189 | 119 | 0.636683 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.0.1] - 2023-09-13
### Changed
- Initialize extension
| 312 | Markdown | 30.299997 | 168 | 0.730769 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/README.md | # Insiderobo Asset Provider [omni.asset_provider.digitalbotlab] | 63 | Markdown | 62.999937 | 63 | 0.84127 |
DigitalBotLab/InsideroboConnector/Exts/~omni.asset_provider.digitalbotlab/docs/index.rst | Base Job Facility
#################
This Extension offers a facility offering base job features.
:platform: windows-x86_64, linux-x86_64
:members:
:private-members:
:special-members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 304 | reStructuredText | 20.785713 | 60 | 0.661184 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/insiderobo/license/extension.py | import omni.ext
import omni.ui as ui
import omni.usd
from pxr import Sdf
import os
# import omni.kit.window.file
from .params import LICENSE2PATH
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[insiderobo.license] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class InsideroboLicenseExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[insiderobo.license] insiderobo license startup")
self._count = 0
self._window = ui.Window("Insiderobo License", width=300, height=300)
with self._window.frame:
with ui.VStack():
with ui.HStack(height = 20):
ui.Label("Prim Path:", width = 100)
self.prim_path_ui = ui.StringField()
with ui.HStack(height = 20):
ui.Label("License Name:", width = 100)
self.license_name_ui = ui.StringField()
self.license_name_ui.model.set_value("kinova")
with ui.HStack(height = 20):
ui.Label("License Path:", width = 100)
# self.license_path_ui = ui.StringField()
# self.license_name = self.license_name_ui.model.get_value_as_string()
# self.license_path_ui.model.set_value(LICENSE2PATH[self.license_name])
ui.Button("Add License to Prim", height = 20, clicked_fn=self.add_license)
def add_license(self):
print("adding license")
stage = omni.usd.get_context().get_stage()
prim_path = self.prim_path_ui.model.get_value_as_string()
# if the prim path is empty, use the default prim path
if prim_path == "":
prim_path = stage.GetDefaultPrim().GetPath().pathString
self.license_name = self.license_name_ui.model.get_value_as_string()
license_path = LICENSE2PATH[self.license_name] #self.license_path_ui.model.get_value_as_string()
prim = stage.GetPrimAtPath(prim_path)
# load the license file into string
license_file = open(license_path, "r")
license_text = license_file.read()
print("license text: ", license_text)
attribute_name = f"{self.license_name}_license"
if not prim.HasAttribute(attribute_name):
# create a new attribute on the prim
prim.CreateAttribute(attribute_name, Sdf.ValueTypeNames.String, False).Set(license_text)
# save the stage
omni.usd.get_context().get_stage().Save()
license_file.close()
def debug(self):
print("[insiderobo.license] insiderobo license debug: ")
def on_shutdown(self):
print("[insiderobo.license] insiderobo license shutdown")
| 3,364 | Python | 38.588235 | 119 | 0.615933 |
DigitalBotLab/InsideroboConnector/Exts/insiderobo.license/insiderobo/license/params.py | # parameters
import os
# Get the absolute path of the current script file
script_path = os.path.abspath(__file__)
# Get the directory of the script file
script_directory = os.path.dirname(script_path)
LICENSE2PATH = {
"kinova": os.path.join(script_directory, "licences", "KINOVA_LICENSE.txt"),
"ufactory": os.path.join(script_directory, "licences", "UFACTORY_LICENSE.txt"),
"digitalbotlab": os.path.join(script_directory, "licences", "DIGITALBOTLAB_EXTENDED_LICENSE.txt"),
"universal_robot": os.path.join(script_directory, "licences", "UR_LICENESE_INFO.txt"),
"franka": os.path.join(script_directory, "licences", "FRANKA_LICENSE.txt"),
} | 660 | Python | 40.312497 | 102 | 0.719697 |
DigitalBotLab/3DRotationCalculator/Extension/exts/rotaiton.calculator/rotaiton/calculator/extension.py | import omni.ext
import omni.ui as ui
from .ui.custom_multifield_widget import CustomMultifieldWidget
from pxr import Gf
import numpy as np
from .numpy_utils import euler_angles_to_quat
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class RotaitonCalculatorExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[rotaiton.calculator] rotaiton calculator startup")
self._count = 0
self._window = ui.Window("3D Rotation Calculator", width=300, height=100, visible=True)
with self._window.frame:
with ui.VStack():
with ui.CollapsableFrame("Quaternion mul", collapsed=False, height = 0):
with ui.VStack():
with ui.HStack(height = 20):
self.q1_widget = CustomMultifieldWidget(
label="Quaternion 1:",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
)
with ui.HStack(height = 20):
self.q2_widget = CustomMultifieldWidget(
label="Quaternion 2:",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
)
ui.Line(height = 5)
with ui.HStack(height = 20):
self.q_widget = CustomMultifieldWidget(
label="Result:",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
read_only= True
)
self.q_str_widget = ui.StringField(width = 200, height = 20)
ui.Line(height = 5)
with ui.HStack():
ui.Button("Quaternion mul", height = 20, clicked_fn=self.quaternioin_mul)
with ui.CollapsableFrame("Euler to Quaternion", collapsed=False, height = 0):
with ui.VStack():
with ui.HStack(height = 20):
self.euler_widget = CustomMultifieldWidget(
label="Euler angles (degree):",
sublabels=[ "roll", "pitch", "yaw"],
default_vals=[0, 0, 0],
)
ui.Line(height = 5)
with ui.HStack(height = 20):
self.quat_widget = CustomMultifieldWidget(
label="Quaternion:",
sublabels=[ "w", "x", "y", "z"],
default_vals=[1, 0, 0, 0],
read_only= True
)
ui.Button("Euler to Quat", height = 20, clicked_fn=self.euler2quat)
def on_shutdown(self):
print("[rotaiton.calculator] rotaiton calculator shutdown")
def quaternioin_mul(self):
print("quaternioin_mul")
q1 = [self.q1_widget.multifields[i].model.as_float for i in range(4)] # wxyz
q1 = Gf.Quatf(q1[0], q1[1], q1[2], q1[3])
q2 = [self.q2_widget.multifields[i].model.as_float for i in range(4)] # wxyz
q2 = Gf.Quatf(q2[0], q2[1], q2[2], q2[3])
q = q1 * q2
self.q_widget.update([q.GetReal(), *q.GetImaginary()])
self.q_str_widget.model.set_value(str(q))
def euler2quat(self):
print("euler2quat")
euler = [self.euler_widget.multifields[i].model.as_float for i in range(3)] # roll pitch yaw
q = euler_angles_to_quat(euler, degrees=True).tolist()
self.quat_widget.update(q) | 4,260 | Python | 49.129411 | 119 | 0.482629 |
DigitalBotLab/3DRotationCalculator/Extension/exts/rotaiton.calculator/rotaiton/calculator/ui/controller.py | # controller
import carb
class UIController():
w = False
s = False
a = False
d = False
q = False
e = False
up = False
down = False
left = False
right = False
# Controller.scale = 0.1
left_control = False
def __init__(self) -> None:
self.user_control = 0.25
self.network_control = 0.25
UIController.reset_movement()
@classmethod
def reset_movement(cls):
UIController.w = False
UIController.s = False
UIController.a = False
UIController.d = False
UIController.q = False
UIController.e = False
UIController.up = False
UIController.down = False
UIController.left = False
UIController.right = False
# Controller.left_control = False
def handle_keyboard_event(self, event):
if (
event.type == carb.input.KeyboardEventType.KEY_PRESS
or event.type == carb.input.KeyboardEventType.KEY_REPEAT
):
# print("event input", event.input)
if event.input == carb.input.KeyboardInput.W:
UIController.w = True
if event.input == carb.input.KeyboardInput.S:
UIController.s = True
if event.input == carb.input.KeyboardInput.A:
UIController.a = True
if event.input == carb.input.KeyboardInput.D:
UIController.d = True
if event.input == carb.input.KeyboardInput.Q:
UIController.q = True
if event.input == carb.input.KeyboardInput.E:
UIController.e = True
if event.input == carb.input.KeyboardInput.UP:
UIController.up = True
if event.input == carb.input.KeyboardInput.DOWN:
UIController.down = True
if event.input == carb.input.KeyboardInput.LEFT:
UIController.left = True
if event.input == carb.input.KeyboardInput.RIGHT:
UIController.right = True
if event.input == carb.input.KeyboardInput.LEFT_CONTROL:
UIController.left_control = True
if event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# print("event release", event.input)
if event.input == carb.input.KeyboardInput.W:
UIController.w = False
if event.input == carb.input.KeyboardInput.S:
UIController.s = False
if event.input == carb.input.KeyboardInput.A:
UIController.a = False
if event.input == carb.input.KeyboardInput.D:
UIController.d = False
if event.input == carb.input.KeyboardInput.Q:
UIController.q = False
if event.input == carb.input.KeyboardInput.E:
UIController.e = False
if event.input == carb.input.KeyboardInput.UP:
UIController.up = False
if event.input == carb.input.KeyboardInput.DOWN:
UIController.down = False
if event.input == carb.input.KeyboardInput.LEFT:
UIController.left = False
if event.input == carb.input.KeyboardInput.RIGHT:
UIController.right = False
if event.input == carb.input.KeyboardInput.LEFT_CONTROL:
UIController.left_control = False
def PoolUserControl(self):
return self.user_control
def PoolNetworkControl(self):
return 0.1 if UIController.w else 0.25
def QueryMove(self):
move = [0, 0, 0]
if UIController.w:
move[0] += 1
if UIController.s:
move[0] -= 1
if UIController.a:
move[1] += 1
if UIController.d:
move[1] -= 1
if UIController.q:
move[2] -= 1
if UIController.e:
move[2] += 1
return move
def QueryRotation(self):
rotation = [0, 0]
if UIController.up:
rotation[0] += 1
if UIController.down:
rotation[0] -= 1
if UIController.left:
rotation[1] += 1
if UIController.right:
rotation[1] -= 1
return rotation
def QueryGripper(self):
if not UIController.left_control:
return 1 # open
else:
return -1 # close | 4,435 | Python | 30.239436 | 68 | 0.544081 |
NVIDIA-AI-IOT/isaac_camera_benchmark/camera_benchmark.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import carb
from omni.isaac.kit import SimulationApp
import sys
import csv
from datetime import datetime
import json
import argparse
parser = argparse.ArgumentParser(description="Ros2 Bridge Sample")
parser.add_argument('--config_path',
default='config.json',
help='Path to the world to create a navigation mesh.')
args, unknown = parser.parse_known_args()
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
DEFAULT_CONFIG = {
'record': False,
'simulation': {"renderer": "RayTracedLighting", "headless": False},
'camera': [
{'translate': [-1, 5, 1], 'resolution': [640, 480]},
{'translate': [-1, 1, 6], 'resolution': [640, 480]},
{'translate': [-1, 7, 3], 'resolution': [640, 480]},
# {'translate': [1, 2, 3], 'resolution': [640, 480]},
]
}
def read_config(filename):
try:
with open(filename, 'r') as file:
config_data = json.load(file)
except FileNotFoundError:
print(
f"Config file '{filename}' not found. Using default configuration.")
return DEFAULT_CONFIG
# Update default config with values from the file
config = DEFAULT_CONFIG.copy()
config.update(config_data)
return config
# Load config file
config = read_config(args.config_path)
simulation_app = SimulationApp(config['simulation'])
import omni
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import stage, extensions, nucleus
from pxr import Gf, UsdGeom, Usd
from omni.kit.viewport.utility import get_active_viewport
import omni.graph.core as og
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
import threading
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import Image
def create_camera(translate=[-1, 5, 1], resolution=[640, 480], number_camera=0):
camera_stage_path = "/Camera" + f"{number_camera}"
ros_camera_graph_path = ROS_CAMERA_GRAPH_PATH + f"{number_camera}"
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(camera_stage_path, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(translate[0], translate[1], translate[2]))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
viewport_name = f"Viewport{number_camera}" if number_camera != 0 else ""
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ros_camera_graph_path,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setViewportResolution", "omni.isaac.core_nodes.IsaacSetViewportResolution"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:execOut", "setViewportResolution.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("createViewport.outputs:viewport", "setViewportResolution.inputs:viewport"),
("setViewportResolution.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:name", viewport_name),
("createViewport.inputs:viewportId", number_camera),
("setViewportResolution.inputs:width", resolution[0]),
("setViewportResolution.inputs:height", resolution[1]),
("setCamera.inputs:cameraPrim", f"{camera_stage_path}"),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", f"/Camera{number_camera}/rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", f"/Camera{number_camera}/camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", f"/Camera{number_camera}/depth"),
("cameraHelperDepth.inputs:type", "depth"),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
return xform_api
class BenchmarkCamera(Node):
def __init__(self, config):
super().__init__("benchmark_camera_node")
# Run ROS2 node in a separate thread
executor = rclpy.executors.MultiThreadedExecutor()
executor.add_node(self)
executor_thread = threading.Thread(target=executor.spin, daemon=True)
executor_thread.start()
# Chech if record is enable
current_date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.file_path = f'benchmark_camera_{current_date}.csv' if 'record' in config and config['record'] else ''
# Init variables
self.last_printed_tn = 0
self.msg_t0 = -1
self.msg_tn = 0
self.window_size = 10000 # window_size
self.times = []
self.fps = 0
# Get camera list from config file
self.xform_api_camera = []
self.simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
if 'camera' not in config or len(config['camera']) == 0:
carb.log_error("There are no camera in list, please add one in config file")
simulation_app.close()
sys.exit()
for idx, camera in enumerate(config['camera']):
self.xform_api_camera += [create_camera(translate=camera['translate'], resolution=camera['resolution'], number_camera=idx)]
self.subscription = self.create_subscription(
Image, # Replace with the actual message type you're subscribing to
'/Camera0/rgb', # Replace with the actual topic name
self.callback_hz,
qos_profile_sensor_data,
)
def callback_hz(self, msg):
curr_rostime = self.get_clock().now()
if curr_rostime.nanoseconds == 0:
if len(self.times) > 0:
print('time has reset, resetting counters')
self.times = []
return
curr = curr_rostime.nanoseconds
msg_t0 = self.msg_t0
if msg_t0 < 0 or msg_t0 > curr:
self.msg_t0 = curr
self.msg_tn = curr
self.times = []
else:
self.times.append(curr - self.msg_tn)
self.msg_tn = curr
if len(self.times) > self.window_size:
self.times.pop(0)
def plot_benchmark(self, fps):
if not self.times:
return
elif self.last_printed_tn == 0:
self.last_printed_tn = self.msg_tn
return
elif self.msg_tn < self.last_printed_tn + 1e9:
return
# Get frequency every one minute
n = len(self.times)
mean = sum(self.times) / n
rate = 1. / mean if mean > 0. else 0
self.last_printed_tn = self.msg_tn
# Print benchmark
rate_print = rate * 1e9
self.get_logger().info(
f"ROS avg: {rate_print:.3f} Hz - Isaac SIM FPs: {fps:.2f}")
# Print benchmark to csv file
if self.file_path:
self.csv_writer.writerow([rate_print, fps])
def run_simulation(self):
# Need to initialize physics getting any articulation..etc
self.simulation_context.initialize_physics()
self.simulation_context.play()
frame = 0
# Dock all viewports
n_vieports = len(self.xform_api_camera)
if n_vieports > 1:
viewport = omni.ui.Workspace.get_window('Viewport')
for idx in reversed(range(1, n_vieports)):
viewport_idx = omni.ui.Workspace.get_window(f"Viewport{idx}")
viewport_idx.dock_in(viewport, omni.ui.DockPosition.RIGHT)
# Open csv file
if self.file_path:
csv_file = open(self.file_path, 'w', newline='')
# Create a CSV writer object
self.csv_writer = csv.writer(csv_file)
self.get_logger().info(f'Recording benchmark to {self.file_path}')
# Run simulation
while simulation_app.is_running():
# Run with a fixed step size
self.simulation_context.step(render=True)
# rclpy.spin_once(self, timeout_sec=0.0)
if self.simulation_context.is_playing():
if self.simulation_context.current_time_step_index == 0:
self.simulation_context.reset()
# Get viewport fps and plot benchmark
viewport_api = get_active_viewport()
self.plot_benchmark(viewport_api.fps)
# Rotate camera by 0.5 degree every frame
for xform_api in self.xform_api_camera:
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
frame = frame + 1
# Cleanup
if self.file_path:
csv_file.close()
self.simulation_context.stop()
simulation_app.close()
if __name__ == "__main__":
rclpy.init()
# Start simulation
subscriber = BenchmarkCamera(config)
subscriber.run_simulation()
# Cleanup
rclpy.shutdown()
# EOF
| 13,144 | Python | 41.540453 | 135 | 0.629793 |
NVIDIA-AI-IOT/isaac_camera_benchmark/README.md | # isaac_camera_benchmark
This tool run a simple test to check the performance of your desktop on Isaac SIM.

You can run multiple test and read:
* Camera performance from 1 to more
* Change resolution
* ROS2
This tool will plot on your bash the ROS topic frequency average and the FPS from Isaac SIM.
This tool will run a set of camera on your Isaac SIM environment and will start to rotate every camera autonomously.
## Hardware required
Workstation:
1. x86/64 machine
2. Install Ubuntu 20.04 or Ubuntu 22.04
3. NVIDIA Graphic card with RTX
4. Display
5. Keyboard and Mouse
### Run demo
Clone this repository and move to repository folder
```console
git clone https://github.com/nvidia_iot/isaac_camera_benchmark.git
cd isaac_camera_benchmark
```
Run the installer
```console
./run_camera_benchmark.sh
```
#### NVIDIA Isaac SIM
Follow the documentation on NVIDIA Isaac SIM [Workstation install](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html)
1. Download the [Omniverse Launcher](https://www.nvidia.com/en-us/omniverse/)
2. [Install Omniverse Launcher](https://docs.omniverse.nvidia.com/prod_launcher/prod_launcher/installing_launcher.html)
3. Install [Cache](https://docs.omniverse.nvidia.com/prod_nucleus/prod_utilities/cache/installation/workstation.html) from the Omniverse Launcher
4. Install [Nucleus](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/workstation/installation.html) from the Omniverse Launcher
Open Omniverse Launcher

Move to Library and choice "Omniverse Isaac SIM" and download the latest 2023.1 version

## Setup Isaac Camera Benchmark
To add a new camera or change the benchmark simulation, you can simply create a new file `config.json` that override the default configuration.
### Add a new camera
Make a new field called `camera` and for each camera add these fields:
* `translate` position camera inside the environment
* `resolution` Camera resolution, suggested
* 640 x 480
* 1024 x 768
* **FHD** - 1920 x 1080
* **2K** - 2560 x 1440
* **4K** - 3840 x 2160
Example a json file can be composed in this way
```json
{
"camera": [
{"translate": [-1, 5, 1], "resolution": [640, 480]},
{"translate": [0, 0, 0], "resolution": [1024, 768]},
]
}
```
### Change simulation type
On the `simulation` field you can change the simulation configuration, example make it headless like the example below
```json
{
"simulation": {"renderer": "RayTracedLighting", "headless": true}
}
```
### Export benchmark to csv file
If you want to export the output in a csv file you can enable the option that automatically generate a file with name `benchmark_camera_<CURRENT DATE>.csv`
```json
{
"record": true
}
```
## Record camera output
If you want to record in a ros2 bag file all camera you can simply run
```console
./ros2record.sh
```
That simply export all camera in a **rosbag2** folder, the output will be like the picture below.

All ros2bag file will be available on folder `isaac_camera_benchmark/rosbag`.
| 3,365 | Markdown | 27.525423 | 155 | 0.743239 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.