file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
terrylincn/omniskeleton/README.md | # omniskeleton
this project is not maintained, use terrylincn/cn.appincloud.skeleton stead.
| 92 | Markdown | 29.99999 | 76 | 0.826087 |
terrylincn/omniskeleton/cn/appincloud/skeleton/hand_tracker.py | import filecmp
import cv2
import mediapipe as mp
import sys
def hand_process(cam_id=0):
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
# For webcam input:
cap = cv2.VideoCapture(cam_id)
with mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
print(hand_landmarks)
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Flip the image horizontally for a selfie-view display.
cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
def landmark2text(landmark):
txt = ""
for mark in landmark:
one = "{} {} {}".format(mark.x,mark.y,mark.z)
if txt != "":
txt += " "
txt += one
return txt + "\n"
def handpose2kpts(videofile):
txt = ""
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
# For webcam input:
cap = cv2.VideoCapture(videofile)
with mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
break
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
handedness = [
handedness.classification[0].label
for handedness in results.multi_handedness
]
if "Left" not in handedness:
continue
index = 0
for hand_landmarks in results.multi_hand_landmarks:
#only output right hand
if handedness.index("Left") == index:
txt += landmark2text(hand_landmarks.landmark)
index += 1
cap.release()
return txt
class HandTracker:
def __init__(self):
mp_hands = mp.solutions.hands
self._hands = mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
def process(self, image):
left_hand = None
right_hand = None
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = self._hands.process(image)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
print(hand_landmarks)
return left_hand, right_hand
if __name__ == "__main__":
#cam_id = int(sys.argv[1])
#hand_process(cam_id)
filepath = sys.argv[1]
outfile = sys.argv[2]
txt = handpose2kpts(filepath)
with open(outfile, "w") as fp:
fp.write(txt) | 4,731 | Python | 35.4 | 94 | 0.5316 |
terrylincn/omniskeleton/cn/appincloud/skeleton/extension.py | import omni.ext
import omni.ui as ui
from .skeletonutils import addRootToUsd, copyAnim, copyAnimToUsd, copyRotation, copySkel
import uuid
import carb
import os
from pxr import Usd,UsdSkel,AnimationSchema
from omni.kit.window.popup_dialog import MessageDialog
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[cn.appincloud.skeleton] MyExtension startup")
self._window = ui.Window("Avatar convert", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.Label("add root joint to skeleton")
def on_add_joint_click():
print("add clicked!")
self._on_assign_selected()
def _copy_skel_click():
print("copy skel clicked!")
self._copy_skel()
def _copy_anim_click():
print("copy anim clicked!")
self._copy_anim()
def _copy_rot_click():
print("copy rot clicked!")
self._copy_rot()
ui.Button("Add Root Joint", clicked_fn=lambda: on_add_joint_click())
ui.Button("Copy Skeleton", clicked_fn=lambda: _copy_skel_click())
ui.Button("Copy Animation", clicked_fn=lambda: _copy_anim_click())
ui.Button("Copy Rotation", clicked_fn=lambda: _copy_rot_click())
def on_shutdown(self):
print("[cn.appincloud.skeleton] MyExtension shutdown")
def _on_assign_selected(self):
# find currently seleced joint
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
selection = usd_context.get_selection()
selected_prims = selection.get_selected_prim_paths()
skeleton = None
if len(selected_prims) > 0:
prim = stage.GetPrimAtPath(selected_prims[0])
print(prim)
if AnimationSchema.SkelJoint(prim):
skeleton, joint_token = AnimationSchema.SkelJoint(prim).GetJoint()
elif UsdSkel.Skeleton(prim):
print("skeleton")
#skeleton = UsdSkel.Skeleton(prim)
#addJoint(skeleton)
#print(skeleton.GetRestTransformsAttr().Get())
elif UsdSkel.Root(prim):
print("skeleton root", selected_prims[0])
file_url = usd_context.get_stage_url()
prim_url = omni.usd.get_url_from_prim(prim)
print(file_url, prim_url)
if prim_url is not None and file_url != prim_url:
usd_context.open_stage(prim_url)
stage = usd_context.get_stage()
prim_path = "/World/Hips0"
prim = stage.GetPrimAtPath(prim_path)
else:
prim_url = file_url
if prim_url is None or prim_url.startswith("omniverse:"):
tmp_dir = carb.tokens.get_tokens_interface().resolve("${shared_documents}/capture/temp")
tmp_fname = "stage_test_" + str(uuid.uuid4()) + ".usda"
tmp_fpath = os.path.normpath(os.path.abspath(os.path.join(tmp_dir, tmp_fname)))
else:
tmp_fpath = prim_url.replace(".usd",".usda")
root = UsdSkel.Root(prim)
prims = prim.GetChildren()
for subprim in prims:
if UsdSkel.Skeleton(subprim):
skel = UsdSkel.Skeleton(subprim)
addRootToUsd(skel, tmp_fpath)
#stage = Usd.Stage.Open(tmp_fpath)
print("loading...")
stage = usd_context.open_stage(tmp_fpath)
usd_context.attach_stage_with_callback(stage)
break
else:
dialog = MessageDialog(title="no prim selected", message="please select a prim", disable_okay_button=True, disable_cancel_button=False)
dialog.show()
def _copy_skel(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
selection = usd_context.get_selection()
selected_prims = selection.get_selected_prim_paths()
skeleton = None
if len(selected_prims) > 1:
prim1 = stage.GetPrimAtPath(selected_prims[0])
prim2 = stage.GetPrimAtPath(selected_prims[1])
print(prim1, prim2)
copySkel(prim1, prim2)
def _copy_anim(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
selection = usd_context.get_selection()
selected_prims = selection.get_selected_prim_paths()
if len(selected_prims) > 1:
_source_skeleton = UsdSkel.Skeleton(stage.GetPrimAtPath(selected_prims[0]))
_target_skeleton = UsdSkel.Skeleton(stage.GetPrimAtPath(selected_prims[1]))
copyAnim(_source_skeleton, _target_skeleton, "/World/testanim")
else:
return
def _copy_rot(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
selection = usd_context.get_selection()
selected_prims = selection.get_selected_prim_paths()
if len(selected_prims) > 0:
_source_skeleton = UsdSkel.Skeleton(stage.GetPrimAtPath(selected_prims[0]))
copyRotation(_source_skeleton, "/World/testanim")
else:
return | 5,939 | Python | 44.343511 | 147 | 0.572319 |
terrylincn/omniskeleton/cn/appincloud/skeleton/skeletonutils.py |
import omni
from pxr import Usd, UsdSkel, Vt, Gf
import numpy as np
import copy
from omni.anim.retarget.core.scripts.utils import (
convert_matrix_to_trans_rots,
convert_trans_rots_to_pxr
)
import carb
import os
import uuid
root_rest_translations = ((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1))
def addJoint(skel):
def translation2transform(vec):
t = np.eye(4)
t[:-1, -1] = vec
return t.T
skel_cache = UsdSkel.Cache()
skel_query = skel_cache.GetSkelQuery(skel)
joint_tokens = skel.GetJointsAttr().Get()#skel_query.GetJointOrder()
root_t = copy.deepcopy(root_rest_translations)
rest_translations = [root_t] + np.asarray(skel.GetRestTransformsAttr().Get())
bind_translations = [root_t] + np.asarray(skel.GetBindTransformsAttr().Get())
rest_transforms = Vt.Matrix4dArray.FromNumpy(
rest_translations
#np.array([translation2transform(x) for x in rest_translations])
)
bind_transforms = Vt.Matrix4dArray.FromNumpy(
bind_translations
#np.array([translation2transform(x) for x in bind_translations])
)
joint_tokens = ["root"] + ["root/" + token for token in joint_tokens]
skel_cache.Clear()
skel.GetRestTransformsAttr().Set(rest_transforms)
skel.GetBindTransformsAttr().Set(bind_transforms)
skel.GetJointsAttr().Set(joint_tokens)
"""
anim = UsdSkel.Animation.Define(stage, root_path + "/Skeleton/Anim")
anim.GetJointsAttr().Set(joint_tokens)
binding = UsdSkel.BindingAPI.Apply(skel.GetPrim())
binding.CreateAnimationSourceRel().SetTargets([anim.GetPrim().GetPath()])
binding = UsdSkel.BindingAPI.Apply(skel_root.GetPrim())
binding.CreateSkeletonRel().SetTargets([skel.GetPrim().GetPath()])
"""
def save_as_usda(fpath):
omni.usd.get_context().save_as_stage(fpath)
def add_root(content, skelroot="Hips0", joint="Hips"):
content = content.replace(skelroot, "skelroot")
content = content.replace(joint, "Root/" + joint)
content = content.replace("uniform matrix4d[] restTransforms = [", "uniform matrix4d[] restTransforms = [((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)),")
content = content.replace("uniform matrix4d[] bindTransforms = [", "uniform matrix4d[] bindTransforms = [((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)),")
content = content.replace('uniform token[] joints = [', 'uniform token[] joints = ["Root",')
return content
def load_usda(fpath):
with open(fpath, 'r') as fp:
content = fp.read()
return content
def save_usda(fpath, content):
with open(fpath, 'w') as fp:
fp.write(content)
def addRootToUsd(skel, fpath):
save_as_usda(fpath)
content = load_usda(fpath)
content = add_root(content)
save_usda(fpath, content)
def copySkel(source_prim, target_prim):
#interpolation = "vertex"
elementSize = 4
source_binding = UsdSkel.BindingAPI.Apply(source_prim)
target_binding = UsdSkel.BindingAPI.Apply(target_prim)
joints = source_binding.GetJointsAttr()
target_binding.CreateJointsAttr().Set(joints.Get())
jointIndices = source_binding.GetJointIndicesAttr()
target_binding.CreateJointIndicesAttr().Set(jointIndices.Get())
target_binding.CreateJointIndicesPrimvar(constant=False,elementSize=elementSize)
jointWeights = source_binding.GetJointWeightsAttr()
target_binding.CreateJointWeightsAttr().Set(jointWeights.Get())
target_binding.CreateJointWeightsPrimvar(constant=False,elementSize=elementSize)
geomBind = source_binding.GetGeomBindTransformAttr()
target_binding.CreateGeomBindTransformAttr().Set(geomBind.Get())
skelRel = source_binding.GetSkeletonRel().GetTargets()
target_binding.CreateSkeletonRel().SetTargets(skelRel)
def convert_to_trans_rots(translations1, rotations1):
translations: List[carb.Float3] = []
rotations: List[carb.Float4] = []
for trans in translations1:
translations.append(carb.Float3(trans[0], trans[1], trans[2]))
for quat in rotations1:
rotations.append(carb.Float4(quat.imaginary[0], quat.imaginary[1], quat.imaginary[2], quat.real))
return translations, rotations
def copyAnim(source_skeleton, target_skeleton, prim_path):
retarget_controller = omni.anim.retarget.core.RetargetController(
"",
source_skeleton.GetPath().pathString,
target_skeleton.GetPath().pathString
)
if (source_skeleton and target_skeleton):
stage = omni.usd.get_context().get_stage()
time_code = omni.timeline.get_timeline_interface().get_current_time() * stage.GetTimeCodesPerSecond()
# copy source transform
source_skel_cache = UsdSkel.Cache()
source_skel_query = source_skel_cache.GetSkelQuery(source_skeleton)
source_transforms = source_skel_query.ComputeJointLocalTransforms(time_code)
source_translations, source_rotations = convert_matrix_to_trans_rots(source_transforms)
joint_tokens = target_skeleton.GetJointsAttr().Get()
#skel_cache = UsdSkel.Cache()
#skel_query = skel_cache.GetSkelQuery(target_skeleton)
target_translations, target_rotations = retarget_controller.retarget(source_translations, source_rotations)
t1, t2, t3 =convert_trans_rots_to_pxr(target_translations, target_rotations)
""" this only gets one
source_binding = UsdSkel.BindingAPI.Apply(source_skeleton.GetPrim())
source_prim = source_binding.GetAnimationSource()
source_anim = UsdSkel.Animation.Get(stage, source_prim.GetPath())
source_translations = source_anim.GetTranslationsAttr().Get()
source_rotations = source_anim.GetRotationsAttr().Get()
source_translations, source_rotations = convert_to_trans_rots(source_translations, source_rotations)
"""
suurce_anim_query = source_skel_query.GetAnimQuery()
#source_skel_anim = UsdSkel.Animation(suurce_anim_query.GetPrim())
jtt = suurce_anim_query.GetJointTransformTimeSamples()
prim = suurce_anim_query.GetPrim()
prim_path = prim.GetPath().pathString + "_target"
carb.log_info("jtt:{}".format(jtt))
target_anim = UsdSkel.Animation.Define(stage, prim_path)
target_anim.CreateJointsAttr().Set(joint_tokens)
transAttr = target_anim.CreateTranslationsAttr()
rotAttr = target_anim.CreateRotationsAttr()
scalAttr = target_anim.CreateScalesAttr()
tt1 = {}
tt2 = {}
for jt in jtt:
source_transforms = suurce_anim_query.ComputeJointLocalTransforms(Usd.TimeCode(jt))
source_translations, source_rotations = convert_matrix_to_trans_rots(source_transforms)
carb.log_info("time:{} source_translations:{} source_rotations:{}".format(jt, source_translations, source_rotations))
# retarget
target_translations: List[carb.Float3] = []
target_rotations: List[carb.Float4] = []
target_translations, target_rotations = retarget_controller.retarget(source_translations, source_rotations)
tt1[jt] = target_translations
tt2[jt] = target_rotations
target_translations, target_rotations, target_scales = convert_trans_rots_to_pxr(target_translations, target_rotations)
transAttr.Set(target_translations, Usd.TimeCode(jt))
rotAttr.Set(target_rotations, Usd.TimeCode(jt))
scalAttr.Set(target_scales, Usd.TimeCode(jt))
"""
omni.usd.get_context().new_stage()
new_stage = omni.usd.get_context().get_stage()
target_anim = UsdSkel.Animation.Define(new_stage, "/World")
target_anim.CreateJointsAttr().Set(joint_tokens)
target_anim.CreateTranslationsAttr().Set(t1)
target_anim.CreateRotationsAttr().Set(t2)
target_anim.CreateScalesAttr().Set(t3)
"""
return tt1, tt2
def add_timesamples(translations, rotations, content):
content = content.replace('SkelAnimation "World"', 'SkelAnimation "World"(apiSchemas = ["AnimationSkelBindingAPI"])')
txt1 = '\nfloat3[] translations.timeSamples = {'
for key, translation in translations.items():
txt1 += '{}:{},\n'.format(key, translation).replace("carb.Float3","")
txt1 += '}\n'
txt2 = '\nquatf[] rotations.timeSamples = {'
for key, rotation in rotations.items():
txt2 += '{}:{},\n'.format(key, rotation).replace("carb.Float4","")
txt2 += '}\n'
carb.log_info(txt1)
carb.log_info(txt2)
newcontent = content[:content.rfind("}")-1] + txt1 + txt2 + '}'
return newcontent
def copyAnimToUsd(source_skeleton, target_skeleton, prim_path):
tmp_dir = carb.tokens.get_tokens_interface().resolve("${shared_documents}/capture/temp")
tmp_fname = "stage_test_" + str(uuid.uuid4()) + ".usda"
fpath = os.path.normpath(os.path.abspath(os.path.join(tmp_dir, tmp_fname)))
tt1, tt2 = copyAnim(source_skeleton, target_skeleton, prim_path)
save_as_usda(fpath)
content = load_usda(fpath)
content = add_timesamples(tt1, tt2, content)
save_usda(fpath, content)
def extract_transforms(source_transforms):
source_translations = []
source_rotations = []
for source_transform in source_transforms:
source_translations.append(source_transform.ExtractTranslation())
source_rotations.append(source_transform.ExtractRotation())
return source_translations, source_rotations
def copyRotation(source_skeleton, prim_path):
if (source_skeleton):
stage = omni.usd.get_context().get_stage()
time_code = omni.timeline.get_timeline_interface().get_current_time() * stage.GetTimeCodesPerSecond()
# copy source transform
source_skel_cache = UsdSkel.Cache()
source_skel_query = source_skel_cache.GetSkelQuery(source_skeleton)
#source_transforms = source_skel_query.ComputeJointLocalTransforms(time_code)
#source_translations, source_rotations = convert_matrix_to_trans_rots(source_transforms)
source_transforms = source_skeleton.GetBindTransformsAttr().Get()
source_transforms = source_skeleton.GetRestTransformsAttr().Get()
source_translations, source_rotations = convert_matrix_to_trans_rots(source_transforms)
source_translations, source_rotations, source_scales = convert_trans_rots_to_pxr(source_translations, source_rotations)
joint_tokens = source_skeleton.GetJointsAttr().Get()
target_anim = UsdSkel.Animation.Define(stage, prim_path)
target_anim.CreateJointsAttr().Set(joint_tokens)
target_anim.CreateTranslationsAttr().Set(source_translations)
target_anim.CreateRotationsAttr().Set(source_rotations)
target_anim.CreateScalesAttr().Set(source_scales) | 11,078 | Python | 45.746835 | 153 | 0.66447 |
tangkangqi/omnivese-ai-kit-IAT-course/README.md | [# omnivese-ai-kit-IAT-course
课程目标
本课程旨在向开发者介绍如何使用NVIDIA Omniverse Kit开发AI扩展插件。通过学习Kit的基础概念、API以及环境配置、微服务、调用AI应用、插件发布等知识,学员将能够开发出自己的AI文生贴图插件。
课程时长
总时长:2小时
课程安排
第一部分:Kit基础概念 (30分钟)
- Kit核心组件、架构概览 (10分钟)
- Kit API介绍 (10分钟)
- Kit USD介绍 (10分钟)
第二部分:Kit开发 (45分钟)
- Kit UI交互开发 (15分钟)
- 演示如何开发用户界面,包括基本的UI组件和交互逻辑。
- Extension.toml介绍 (10分钟)
- 介绍如何使用Extension.toml文件配置和管理Kit扩展
- 使用AI功能 (20分钟)
- Kit如何安装python pip包 (5分钟)
- 介绍在Kit环境中安装和管理Python依赖。
- Kit微服务开发 (5分钟)
- 如何开发和使用Kit微服务,以支持复杂的AI功能。
- 创建和导入Kit插件 (10分钟)
- 演示如何创建Kit插件,并将其导入到Omniverse中使用
第三部分:案例:AI文生贴图插件开发 (45分钟)
- SDXL文生图代码介绍 (10分钟)
- 介绍如何利用AI生成图像。
- Kit 新建Object和贴图 (10分钟)
- 演示如何使用Kit Python API新建对象和应用贴图。
- 插件UI开发和调试 (15分钟)
- 文本生图插件发布 (10分钟)
请注意,课程内容和时间分配可能根据实际情况进行调整。
学员将获得相关的学习材料和代码示例
附录:课程运行环境详细配置
为了确保课程的顺利进行,以下是详细的环境配置要求:
系统环境:
- 操作系统:Windows 10
- GPU驱动:版本 537.13
- CUDA版本:12.3
- Omniverse环境:
- OVE已安装
- Omniverse USD Composer版本为2023.2.0
- 开发环境:
- Visual Studio Code (Vscode),安装日期为2024年3月12日的最新版本
AI开发环境:
- Python环境管理:
- Miniconda,安装日期为2024年3月12日的最新版本
- 在Miniconda中创建Python 3.12环境
- 深度学习库:
- 安装PyTorch 2.2.0及相关库, 参考安装指南:PyTorch Get Started - Previous Versions
- 文生图模型下载:
- 从Huggingface下载LCM模型。模型链接:LCM_Dreamshaper_v7
网络环境:
- 需要能够访问Google的网络环境,以便于AI代码运行时能够高速访问GitHub、Huggingface和pip官方源。
注意事项:
- 确保在安装和配置环境之前,您的系统满足上述所有要求。
- 安装CUDA和GPU驱动时,请遵循官方指南,确保版本兼容性。
- 在配置Python环境时,确保使用的是Miniconda创建的指定版本环境,以避免版本冲突。
- 在下载和使用Huggingface模型时,请确保遵守相关使用协议和条件。
- 确保网络环境稳定,以便于顺利完成模型下载和库安装。
附录:
课程运行环境:
Windows 10 系统
GPU Driver: 537.13
CUDA版本: 12.3
OVE 已安装,Omniverse USD Composer 2023.2.0
Vscode , 安装24/3/12 最新版本
AI 环境:
- Miniconda, 安装24/3/12 最新版本
- Miniconda 中创建python 3.12 环境:
- 安装torch2.2.0, 参考: https://pytorch.org/get-started/previous-versions/
- pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cu121
- Huggingface 下载LCM 模型:https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7/tree/main
能够访问google的网络环境 ( AI 代码运行,需要github,huggingface, pip 官方源高速访问)
](https://vj0y7kxa0d.feishu.cn/docx/TKqKdBqHioxUFvx36j9cjZpOnFd)
| 2,068 | Markdown | 22.781609 | 115 | 0.765474 |
tangkangqi/omnivese-ai-kit-IAT-course/exts/iat.diffuser/iat/diffuser/extension.py | import omni.ext
import omni.ui as ui
from pathlib import Path
import omni.kit.viewport.utility as vp_utils
from PIL import Image
import omni.kit.notification_manager as notifier
from pxr import UsdGeom, UsdShade, UsdLux, Vt, Gf, Sdf, Usd, UsdUtils, Tf
import asyncio
import requests
import numpy as np
import shutil
import os
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
import random
import string
import logging
logger = logging.getLogger(__name__)
logger.info("123")
logger.warning("456")
# import omni.kit.pipapi
# omni.kit.pipapi.install("requests")
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[iat.diffuser] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class IatDiffuserExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self) -> None:
super().__init__()
self._name = 'IATDiffuser'
self._path = omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
self.outdir = Path(self._path) / '_output_images'
self.outdir.mkdir(exist_ok=True, parents=True)
self.cachedir = Path(self._path) / '_model_cache'
self.cachedir.mkdir(exist_ok=True, parents=True)
self.model_id = ''
self.prev_imgpath = Path(self._path) / "data/dummy.png"
self.imgpath = Path(self._path) / "data/dummy.png"
self._image_shape = (512, 512)
self.prev_texture_image = None
self.texture_batch = []
self.texture_image = self._load_image_from_file(self.imgpath)
self.texture_mask = None
self.diffuser_pipe = None
self._seamless = True
self._loaded_scene = False
# self._loaded_scene = True
self._logged_in = False
self._image_provider = ui.ByteImageProvider()
self.i2i_strength = 0.5
self.batch_size = 4
self.mini_image_providers = [ui.ByteImageProvider() for _ in range(self.batch_size)]
self.imgpath_batch = ['' for _ in range(self.batch_size)]
self._usd_context = omni.usd.get_context()
self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event
)
def on_startup(self, ext_id):
# ext_id is the current extension id. It can be used with the extension manager to query additional information,
# such as where this extension is located in the filesystem.
print(f"{self} startup", flush=True)
self._window = ui.Window(self._name, width=500, height=500)
self.build_ui()
def on_shutdown(self):
print("[iat.diffuser] iat diffuser shutdown")
def build_ui(self):
with self._window.frame:
with ui.VStack():
with ui.Frame(height=50):
with ui.VStack():
self.text_box = ui.StringField(style={'font_size': 30})
self.text_box.model.set_value('an old tree bark')
with ui.HStack():
generate_button = ui.Button('Text to Image', height=40)
generate_button.set_clicked_fn(self.inference)
i2i_button = ui.Button('Image to Image', height=40)
i2i_button.set_clicked_fn(self.i2i_inference)
undo_button = ui.Button('Undo', height=40)
undo_button.set_clicked_fn(self._on_undo_click)
with ui.HStack():
ui.Label("diffusion service url:", style={'font_size': 15})
self.url_box = ui.StringField(style={'font_size': 20}, width=450)
self.url_box.model.set_value('http://127.0.0.1:8000')
ui.Spacer(height=5)
with ui.HStack():
ui.Label('Scale')
ui.Spacer(width=5)
ui.Label('Strength')
ui.Spacer(width=5)
ui.Label('Batch size')
with ui.Frame(height=50):
with ui.VStack():
# ui.Spacer(height=10)
with ui.HStack():
model_button = ui.Button(f"Load models", height=40)
model_button.set_clicked_fn(self._load_model)
ui.Spacer(width=10)
image_button = ui.Button(f"Select image", height=40)
image_button.set_clicked_fn(self._on_select_image_click)
with ui.Frame(height=450):
with ui.VStack():
image_provider = ui.ImageWithProvider(self._image_provider) #, fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT)
self.set_image_provider(self.texture_image)
### STAGE & USD ###
def _on_stage_event(self, evt):
if evt.type == int(omni.usd.StageEventType.OPENED):
print(f"{self} Stage opened")
self._on_stage_opened()
elif evt.type == int(omni.usd.StageEventType.ASSETS_LOADED):
self._hide_stage_defaults()
def _on_stage_opened(self):
self._load_scene()
def _load_scene(self):
if self._loaded_scene:
return
self._usd_context = omni.usd.get_context()
stage = self._usd_context.get_stage()
preset_path = str(Path(self._path) / "data/scene.usd")
root_layer = stage.GetRootLayer()
root_layer.subLayerPaths = [preset_path]
# HACK: move prims to /World (default prim) from /Environment to allow changes to visibility
cube = stage.GetPrimAtPath("/Environment/Cube")
if cube:
omni.kit.commands.execute("MovePrimCommand", path_from="/Environment/Cube", path_to="/World/Cube")
cube = stage.GetPrimAtPath("/World/Cube")
cube.GetAttribute("visibility").Set("inherited")
sphere = stage.GetPrimAtPath("/Environment/Sphere")
if cube:
omni.kit.commands.execute("MovePrimCommand", path_from="/Environment/Sphere", path_to="/World/Sphere")
sphere = stage.GetPrimAtPath("/World/Sphere")
sphere.GetAttribute("visibility").Set("inherited")
vp = vp_utils.get_active_viewport()
vp.set_active_camera("/World/Camera")
self._loaded_scene = True
def _hide_stage_defaults(self):
stage = omni.usd.get_context().get_stage()
ground_plane = stage.GetPrimAtPath("/Environment/GroundPlane")
if ground_plane:
print(f"{self} hiding /Environment/GroundPlane")
ground_plane.GetAttribute("visibility").Set("invisible") # hide ground plane
ground_plane = stage.GetPrimAtPath("/Environment/Plane")
if ground_plane:
print(f"{self} hiding /Environment/Plane")
ground_plane.GetAttribute("visibility").Set("invisible") # hide ground plane
## Update Materials
def _update_material(self, material_path, params):
stage = self._usd_context.get_stage()
# ctx = omni.usd.get_context()
# stage = ctx.get_stage()
# selection = ctx.get_selection().get_selected_prim_paths()
material = stage.GetPrimAtPath(material_path)
logger.warn((f"{self} material: {material}"))
shader = UsdShade.Shader(omni.usd.get_shader_from_material(material.GetPrim(), True))
logger.warn(f"{self} shader: {shader}")
# For each parameter, write to material
for param, value in params.items():
logger.warn(f"{self} creating & getting input: {param}")
shader.CreateInput(param, Sdf.ValueTypeNames.Asset)
shader.GetInput(param).Set(value)
## Click select image
def _on_select_image_click(self):
"""Show filepicker after load image is clicked"""
self._filepicker = omni.kit.window.filepicker.FilePickerDialog(
f"{self}/Select Image",
click_apply_handler=lambda f, d: asyncio.ensure_future(self._on_image_selection(f, d)),
)
try:
self._filepicker.navigate_to(os.path.expanduser("~/"))
except Exception:
print(f"could not find {os.path.expanduser('~')}")
self._filepicker.refresh_current_directory()
omni.kit.window.filepicker
def _on_undo_click(self):
print(self.imgpath)
print(self.prev_imgpath)
self.imgpath = self.prev_imgpath
self.texture_image = self._load_image_from_file(self.imgpath)
self.set_image_provider(self.texture_image)
self._update_material('/Environment/Looks/OmniPBR', {"diffuse_texture": str(self.imgpath) })
def _on_model_select_click(self):
pass
async def _on_image_selection(self, filename, dirname):
"""Load the selected image."""
selections = self._filepicker.get_current_selections()
if os.path.isfile(selections[0]):
self.imgpath = selections[0]
else:
print('Select a valid image file.')
return
print(f"{self} Loading image from: {self.imgpath}")
self.texture_image = self._load_image_from_file(self.imgpath)
self.set_image_provider(self.texture_image)
self._update_material('/Environment/Looks/OmniPBR', {"diffuse_texture": str(self.imgpath) })
self._filepicker.hide()
self._window.frame.rebuild()
def _load_image_from_file(self, imgpath):
img = Image.open(imgpath)
# w, h = img.size
# min_size = min(w, h)
img = img.resize(self._image_shape)
return img
def refresh_image(self):
self.image_box.set_style({'image_url': str(self.imgpath)})
### MODEL ###
def _load_model(self, new_model_id=None, new_model_inpaint_id=None):
pass
def i2i_inference(self):
pass
def generate(self, prompt):
base_url = self.url_box.model.get_value_as_string()
url = "%s/txt2img/%s"%(base_url, prompt)
# res = {'file_name': 'res.jpg', 'prompt': 'how are you', 'time': 11.930987119674683}
res = requests.get(url).json()
print(res)
im_url = "%s/download/%s"%(base_url, res["file_name"])
r = requests.get(im_url)
open(os.path.join(SCRIPT_PATH, "data", "res.jpg"), "wb").write(r.content)
# shutil.copy(os.path.join(SCRIPT_PATH, "data", "res.jpg"), os.path.join(self.outdir, res["file_name"]))
# random_name_str = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
image_num = len(os.listdir(self.outdir))
fpath = os.path.join(self.outdir, "%04d-%s"%(image_num, res["file_name"]))
shutil.copy(os.path.join(SCRIPT_PATH, "data", "res.jpg"), fpath)
return r.content
def inference(self):
prompt = self.text_box.model.get_value_as_string()
print(f"{self} {prompt}")
# self.texture_batch = self.diffuser_pipe([prompt]*self.batch_size).images
self.texture_batch = self.generate(prompt=prompt)
# texture update
self.prev_imgpath = self.imgpath
self.imgpath = os.path.join(SCRIPT_PATH, "data", "res.jpg")
self.texture_image = self._load_image_from_file(os.path.join(SCRIPT_PATH, "data", "res.jpg"))
logger.warn("diffusion images: %s"%(self.imgpath))
self._load_scene()
self._update_material('/Environment/Looks/OmniPBR', {"diffuse_texture": str(self.imgpath) })
self.set_image_provider(self.texture_image)
def set_image_provider(self, img):
if isinstance(img, Image.Image):
img = np.asarray(img, dtype=np.uint8)
elif isinstance(img, np.ndarray):
pass
else:
print('Unknown image format.')
# Create alpha channel since ImageProvider expects a 4-channel image
alpha_channel = np.ones_like(img[:,:,[0]], dtype=np.uint8) * 255
if img.shape[2] == 3 :
img = np.concatenate([img, alpha_channel], axis=2)
print('updating image provider')
self._image_provider.set_data_array(img, (img.shape[0], img.shape[1])) | 12,784 | Python | 40.644951 | 141 | 0.59332 |
tangkangqi/omnivese-ai-kit-IAT-course/iat_diffuser_server/pipline.py | from fastapi import FastAPI
from fastapi import File, UploadFile
from fastapi.responses import FileResponse
import time
import shutil
import torch
from diffusers import DiffusionPipeline
app = FastAPI()
def load_lcm_pipe():
pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7")
# To save GPU memory, torch.float16 can be used, but it may compromise image quality.
pipe.to(torch_device="cuda", torch_dtype=torch.float32)
return pipe
pipe = load_lcm_pipe()
@app.get("/download/{name_file}")
def download_file(name_file: str):
return FileResponse(path= "data/" + name_file, media_type='application/octet-stream', filename=name_file)
def gen_image(prompt):
num_inference_steps = 4
images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0, lcm_origin_steps=50, output_type="pil").images
print(len(images))
image = images[0]
fname = "%s.jpg"%("-".join(prompt.split(" ")))
image.save("data/" + fname)
shutil.copy("data/" + fname, "data/res.jpg")
return fname
def test_infer(self):
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
gen_image(prompt)
@app.get("/txt2img/{prompt}")
def get_image(prompt: str):
t0 = time.time()
fname = gen_image(prompt)
return {"file_name": fname, "prompt": prompt, "time": time.time() - t0} | 1,376 | Python | 30.295454 | 140 | 0.699128 |
j3soon/omni-nerf-extension/compose.yaml | version: "3"
services:
nerfstudio-renderer:
image: j3soon/nerfstudio-renderer
build:
context: nerfstudio_renderer
args:
- CUDA_VERSION=11.8.0
- CUDA_ARCHITECTURES=86
- OS_VERSION=22.04
- SERVER_PORT=10001
container_name: nerfstudio-renderer
ports:
- "10001:10001"
environment:
- DISPLAY=$DISPLAY
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- ./nerfstudio_renderer/src:/src:ro
- ./assets:/workspace:ro
- cache:/home/user/.cache
shm_size: '6gb'
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
pygame-viewer:
image: j3soon/pygame-viewer
build:
context: pygame_viewer
container_name: pygame-viewer
stdin_open: true
tty: true
network_mode: host
environment:
- DISPLAY=$DISPLAY
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- ./pygame_viewer:/src:ro
isaac-sim-viewer:
# Ref: https://github.com/j3soon/isaac-extended?tab=readme-ov-file#docker-container-with-display
image: j3soon/isaac-sim-viewer
build:
context: extension
container_name: isaac-sim-viewer
entrypoint: [bash]
stdin_open: true
tty: true
network_mode: host
environment:
- ACCEPT_EULA=Y
- PRIVACY_CONSENT=Y
- DISPLAY=$DISPLAY
volumes:
- ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw
- ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw
- ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw
- ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw
- ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw
- ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw
- ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw
- ~/docker/isaac-sim/documents:/root/Documents:rw
- /tmp/.X11-unix:/tmp/.X11-unix
- ./assets:/workspace
- ./extension:/src
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
cache:
| 2,191 | YAML | 27.102564 | 100 | 0.599726 |
j3soon/omni-nerf-extension/extension/exts/omni.nerf.viewport/omni/nerf/viewport/extension.py | import platform
import cv2
import numpy as np
import omni.ext
import omni.ui as ui
import omni.usd
import rpyc
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[omni.nerf.viewport] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniNerfViewportExtension(omni.ext.IExt):
def __init__(self):
super().__init__()
self.is_python_supported: bool = platform.python_version().startswith("3.10")
"""The Python version must match the backend version for RPyC to work."""
self.camera_position: Gf.Vec3d = None
self.camera_rotation: Gf.Vec3d = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
# To see the Python print output in Omniverse Code, open the `Script Editor`.
# In Isaac Sim, see the startup console instead.
print("[omni.nerf.viewport] omni nerf viewport startup")
# Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd/stage/get-current-stage.html
self.usd_context = omni.usd.get_context()
# Subscribe to event streams
# Ref: https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/event_streams.html
# Ref: https://docs.omniverse.nvidia.com/kit/docs/kit-manual/104.0/carb.events/carb.events.IEventStream.html#carb.events.IEventStream.create_subscription_to_pop_by_type
# Listen to rendering events. Only triggered when the viewport is rendering is updated.
# Will not be triggered when no viewport is visible on the screen.
# Examples on using `get_rendering_event_stream` can be found by installing Isaac Sim
# and searching for `get_rendering_event_stream` under `~/.local/share/ov/pkg/isaac_sim-2023.1.1`.
self.rendering_event_stream = self.usd_context.get_rendering_event_stream()
self.rendering_event_delegate = self.rendering_event_stream.create_subscription_to_pop(
self._on_rendering_event, name="NeRF Viewport Update"
)
# TODO: Consider subscribing to update events
# Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/events.html#subscribe-to-update-events
# Allocate memory
self.rgba_w, self.rgba_h = 1280, 720 # Follow default camera resolution 1280x720
self.rgba = np.ones((self.rgba_h, self.rgba_w, 4), dtype=np.uint8) * 128
"""RGBA image buffer. The shape is (H, W, 4), following the NumPy convention."""
self.rgba[:,:,3] = 255
# Init RPyC connection
if self.is_python_supported:
self.init_rpyc()
# Build UI
self.build_ui()
def init_rpyc(self):
# TODO: Make the following configurable
host = 'localhost'
port = 10001
model_config_path = '/workspace/outputs/poster/nerfacto/DATE_TIME/config.yml'
model_checkpoint_path = '/workspace/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt'
device = 'cuda'
self.rpyc_conn = rpyc.classic.connect(host, port)
self.rpyc_conn.execute('from nerfstudio_renderer import NerfStudioRenderQueue')
self.rpyc_conn.execute('from pathlib import Path')
self.rpyc_conn.execute('import torch')
self.rpyc_conn.execute(f'rq = NerfStudioRenderQueue(model_config_path=Path("{model_config_path}"), checkpoint_path="{model_checkpoint_path}", device=torch.device("{device}"))')
def build_ui(self):
"""Build the UI. Should be called upon startup."""
# Please refer to the `Omni::UI Doc` tab in Omniverse Code for efficient development.
# Ref: https://youtu.be/j1Pwi1KRkhk
# Ref: https://github.com/NVIDIA-Omniverse
# Ref: https://youtu.be/dNLFpVhBrGs
self.ui_window = ui.Window("NeRF Viewport", width=self.rgba_w, height=self.rgba_h)
with self.ui_window.frame:
with ui.ZStack():
# Camera Viewport
# Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.kit.viewport.docs/latest/overview.html#simplest-example
# Don't create a new viewport widget as below, since the viewport widget will often flicker.
# Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/release-notes/known-limits.html
# ```
# from omni.kit.widget.viewport import ViewportWidget
# self.ui_viewport_widget = ViewportWidget(
# resolution = (640, 360),
# width = 640,
# height = 360,
# )
# self.viewport_api = self.ui_viewport_widget.viewport_api
# ````
# Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/python-snippets/viewport/change-viewport-active-camera.html
# Instead, the viewport is obtained from the active viewport in new renderings.
# NeRF Viewport
# Examples on using ByteImageProvider can be found by installing Isaac Sim
# and searching for `set_bytes_data` under `~/.local/share/ov/pkg/isaac_sim-2023.1.1`.
# Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html
# Ref: https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ImageWithProvider.html
self.ui_nerf_provider = ui.ByteImageProvider()
# TODO: Potentially optimize with `set_bytes_data_from_gpu`
self.ui_nerf_img = ui.ImageWithProvider(
self.ui_nerf_provider,
width=ui.Percent(100),
height=ui.Percent(100),
)
# TODO: Larger image size?
with ui.VStack(height=0):
self.ui_lbl_py = ui.Label("(To Be Updated)")
state = "supported" if platform.python_version().startswith("3.10") else "NOT supported"
self.ui_lbl_py.text = f"Python {platform.python_version()} is {state}"
# UI for setting the NeRF mesh
# Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/scatter_tool.html
with ui.HStack():
self.ui_lbl_mesh = ui.Label("NeRF Mesh", width=65)
# Ref: https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/ui/widgets/stringfield.html
self._mesh_prim_model = ui.SimpleStringModel()
ui.StringField(model=self._mesh_prim_model)
ui.Button(
" S ",
width=0,
height=0,
clicked_fn=self._on_btn_set_click,
tooltip="Get From Selection",
)
ui.Button("Reset Camera", width=20, clicked_fn=self.on_btn_reset_click)
self.update_ui()
def update_ui(self):
print("[omni.nerf.viewport] Updating UI")
# Ref: https://forums.developer.nvidia.com/t/refresh-window-ui/221200
self.ui_window.frame.rebuild()
def _on_btn_set_click(self):
self._mesh_prim_model.as_string = self._get_selected_prim_path()
def on_btn_reset_click(self):
# TODO: Allow resetting the camera to a specific position
# Below doesn't seem to work
# stage: Usd.Stage = self.usd_context.get_stage()
# prim: Usd.Prim = stage.GetPrimAtPath('/OmniverseKit_Persp')
# # `UsdGeom.Xformable(prim).SetTranslateOp` doesn't seem to exist
# prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(0, 0, 0.1722))
# prim.GetAttribute("xformOp:rotateXYZ").Set(Gf.Vec3d(0, -152, 0))
# print("translateOp", prim.GetAttribute("xformOp:translate").Get())
# print("rotateXYZOp", prim.GetAttribute("xformOp:rotateXYZ").Get())
print("[omni.nerf.viewport] (TODO) Reset Camera")
def _get_selected_prim_path(self):
"""Get the selected prim. Return '' if no prim is selected."""
# Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/object_info.html#step-5-get-the-selected-prims-data
selected_prim_paths = self.usd_context.get_selection().get_selected_prim_paths()
if not selected_prim_paths:
return ''
return selected_prim_paths[0]
def _on_rendering_event(self, event):
"""Called by rendering_event_stream."""
# No need to check event type, since there is only one event type: `NEW_FRAME`.
if self.is_python_supported and self._mesh_prim_model.as_string != '':
viewport_api = get_active_viewport()
# We chose to use Viewport instead of Isaac Sim's Camera Sensor to avoid dependency on Isaac Sim.
# We want the extension to work with any Omniverse app, not just Isaac Sim.
# Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/features/sensors_simulation/isaac_sim_sensors_camera.html
camera_to_world_mat: Gf.Matrix4d = viewport_api.transform
object_to_world_mat: Gf.Matrix4d = Gf.Matrix4d()
if self._mesh_prim_model.as_string != '':
stage: Usd.Stage = self.usd_context.get_stage()
selected_prim: Usd.Prim = stage.GetPrimAtPath(self._mesh_prim_model.as_string)
selected_xform: UsdGeom.Xformable = UsdGeom.Xformable(selected_prim)
object_to_world_mat = selected_xform.GetLocalTransformation()
# In USD, pre-multiplication is used for matrices.
# Ref: https://openusd.org/dev/api/usd_geom_page_front.html#UsdGeom_LinAlgBasics
world_to_object_mat: Gf.Matrix4d = object_to_world_mat.GetInverse()
camera_to_object_mat: Gf.Matrix4d = camera_to_world_mat * world_to_object_mat
camera_to_object_pos: Gf.Vec3d = camera_to_object_mat.ExtractTranslation()
# I suspect that the `Decompose` function will extract the rotation in the order of the input axes.
# So for EulerXYZ, we want to first extract and remove the Z rotation, then Y, then X.
# Then we reverse the order to get the XYZ rotation.
# I haven't spend time looking into the source code to confirm this hypothesis though.
# Ref: https://forums.developer.nvidia.com/t/how-to-get-euler-angle-of-the-prim-through-script-with-script-editor/269704/3
# Ref: https://github.com/PixarAnimationStudios/OpenUSD/blob/2864f3d04f396432f22ec5d6928fc37d34bb4c90/pxr/base/gf/rotation.cpp#L108
# must remove scale before rotation
camera_to_object_mat.Orthonormalize()
camera_to_object_rot: Gf.Vec3d = Gf.Vec3d(*reversed(camera_to_object_mat.ExtractRotation().Decompose(*reversed(Gf.Matrix3d()))))
# TODO: Consider using viewport camera projection matrix `viewport_api.projection`?
# Not same as below due to the potential difference in rotation matrix representation
# ```
# from scipy.spatial.transform import Rotation as R
# camera_rotation: Gf.Vec3d = R.from_matrix(camera_mat.ExtractRotationMatrix()).as_euler('xyz', degrees=True) # in degrees
# ```
# TODO: Consider object transform (if it is moved or rotated)
# No need to transform from Isaac Sim space to Nerfstudio space, since they are both in the same space.
# Ref: https://github.com/j3soon/coordinate-system-conventions
if camera_to_object_pos != self.camera_position or camera_to_object_rot != self.camera_rotation:
self.camera_position = camera_to_object_pos
self.camera_rotation = camera_to_object_rot
print("[omni.nerf.viewport] New camera position:", camera_to_object_pos)
print("[omni.nerf.viewport] New camera rotation:", camera_to_object_rot)
self.rpyc_conn.execute(f'rq.update_camera({list(camera_to_object_pos)}, {list(np.deg2rad(camera_to_object_rot))})')
image = self.rpyc_conn.eval('rq.get_rgb_image()')
if image is None:
return
print("[omni.nerf.viewport] NeRF viewport updated")
image = np.array(image) # received with shape (H*, W*, 3)
image = cv2.resize(image, (self.rgba_w, self.rgba_h), interpolation=cv2.INTER_LINEAR) # resize to (H, W, 3)
self.rgba[:,:,:3] = image * 255
else:
# If python version is not supported, render the dummy image.
self.rgba[:,:,:3] = (self.rgba[:,:,:3] + np.ones((self.rgba_h, self.rgba_w, 3), dtype=np.uint8)) % 256
self.ui_nerf_provider.set_bytes_data(self.rgba.flatten().tolist(), (self.rgba_w, self.rgba_h))
def on_shutdown(self):
print("[omni.nerf.viewport] omni nerf viewport shutdown")
if self.is_python_supported:
self.rpyc_conn.execute('del rq')
def destroy(self):
# Ref: https://docs.omniverse.nvidia.com/workflows/latest/extensions/object_info.html#step-3-4-use-usdcontext-to-listen-for-selection-changes
self.stage_event_stream = None
self.stage_event_delegate.unsubscribe()
| 13,852 | Python | 59.49345 | 184 | 0.632472 |
j3soon/omni-nerf-extension/nerfstudio_renderer/README.md | # Nerfstudio Renderer
The following instructions assume you are in the `/nerfstudio_renderer` directory under the git repository root:
```sh
git clone https://github.com/j3soon/omni-nerf-extension.git
cd omni-nerf-extension
cd nerfstudio_renderer
```
## Launching NeRF Studio
> You can skip this section if you want to download the example poster model checkpoint and mesh.
Follow the [installation](https://docs.nerf.studio/quickstart/installation.html#use-docker-image) guide, specifically:
```sh
mkdir data
docker run --rm -it --gpus all \
-u $(id -u) \
-v $(pwd)/data:/workspace/ \
-v $HOME/.cache/:/home/user/.cache/ \
-p 7007:7007 \
--shm-size=12gb \
dromni/nerfstudio:0.3.4
```
The following subsections assume you have launched the container and using its interactive shell.
### Training a NeRF Model
Follow the [training model](https://docs.nerf.studio/quickstart/first_nerf.html) guide, specifically:
```sh
# in the container
# Download some test data:
ns-download-data nerfstudio --capture-name=poster
# Train model without normal prediction (used in the provided example poster assets for simplicity)
ns-train nerfacto --data data/nerfstudio/poster
# or train model with normal prediction (preferred)
ns-train nerfacto --data data/nerfstudio/poster --pipeline.model.predict-normals True
# wait for training to finish
```
> If you have trouble downloading the dataset, please refer to [this pull request](https://github.com/nerfstudio-project/nerfstudio/pull/3045).
### View the NeRF Model
```sh
# in the container
# change the DATE_TIME to the actual value
DATE_TIME=2023-12-30_111633
# View model
ns-viewer --load-config outputs/poster/nerfacto/$DATE_TIME/config.yml
# open the printed URL
```
### Exporting a Mesh
Follow the [export geometry](https://docs.nerf.studio/quickstart/export_geometry.html) guide, specifically:
```sh
# in the container
# change the DATE_TIME to the actual value
DATE_TIME=2023-12-30_111633
# Export mesh
# center is (-0.2, 0.1, -0.2)
ns-export tsdf --load-config outputs/poster/nerfacto/$DATE_TIME/config.yml --output-dir exports/mesh/ --target-num-faces 50000 --num-pixels-per-side 2048 --use-bounding-box True --bounding-box-min -0.55 -0.25 -0.55 --bounding-box-max 0.15 0.45 0.15
```
> Or use [Poisson Surface Reconstruction](https://docs.nerf.studio/quickstart/export_geometry.html#poisson-surface-reconstruction) instead, if the network supports predicting normals.
### View the Mesh
Open the mesh (`mesh.obj`) in Blender or any other 3D viewer.
## Download Model Checkpoint and Mesh
> You can skip this section if you want to train the example poster model checkpoint and extract mesh by yourself.
(TODO: Add link to a download a pre-trained model in release)
## Rename the Model Directory and the Checkpoint File
Rename the timestamp and checkpoint files to the same name as the placeholder for simplicity:
```sh
# change the DATE_TIME to the name of the placeholder
DATE_TIME=2023-12-30_111633
CHECKPOINT_NAME=step-000029999
cp -r ./data/outputs/poster/nerfacto/$DATE_TIME ./data/outputs/poster/nerfacto/DATE_TIME
mv ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/$CHECKPOINT_NAME.ckpt ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt
```
You can check if the renaming succeeded with the following commands:
```sh
ls ./data/outputs/poster/nerfacto/DATE_TIME/config.yml
ls ./data/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt
```
## Running with Docker Compose
Run the PyGame test window with the following commands:
```sh
xhost +local:docker
docker compose up
# in new shell
docker exec -it pygame-window /workspace/run.sh
# the initial execution might result in a delay due to the download of the pre-trained torch model.
# please re-run the script if the script times out.
```
> There seems to be an issue in `nerfstudio-renderer` that uses old code
> upon restart. I'm not aware of a reliable fix for this issue yet.
> However, running `docker compose down && docker rm $(docker ps -aq)`
> seems to fix the issue (`docker compose down` isn't enough). I believe
it is due to the `pip install` in docker entrypoint. Please keep this in
> mind when modifying the renderer code.
For development purposes, you can run the following command to run the
PyGame test window directly in the `nerfstudio-renderer` container:
```sh
docker compose build
xhost +local:docker
docker compose up
# in new shell
docker exec -it nerfstudio-renderer /workspace/tests/run_local.sh
```
The `run_local.sh` script will re-copy and re-install the package
before launching the PyGame window, so this method will not encounter
the old code issue mentioned above.
## Running Inside Docker
Alternatively, it is possible to connect to the server with [rpyc](https://github.com/tomerfiliba-org/rpyc) in the `pygame-window` container.
```python
import rpyc
import random
import time
# Make connection
conn = rpyc.classic.connect('localhost', port=10001)
# Imports
conn.execute('import nerfstudio_renderer')
conn.execute('from pathlib import Path')
conn.execute('import torch')
# Create a NerfStudioRenderQueue
# For some reason, netref-based methods keep resulting in timeouts.
conn.execute('rq = nerfstudio_renderer.NerfStudioRenderQueue(model_config_path=Path("/workspace/outputs/poster/nerfacto/DATE_TIME/config.yml"), checkpoint_path="/workspace/outputs/poster/nerfacto/DATE_TIME/nerfstudio_models/CHECKPOINT_NAME.ckpt", device=torch.device("cuda"))')
# Update camera pose
position = [random.random() for _ in range(3)]
rotation = [0., -152, 0.]
conn.execute(f'rq.update_camera({position}, {rotation})')
# Wait for some time...
time.sleep(3)
# Obtain a rendered image
image = conn.eval('rq.get_rgb_image()')
# Delete remote render queue
conn.execute('del rq')
```
Please note that the use of rpyc does not perfectly decouple the client and server. The client must be using the same Python version as the server, otherwise, there will be compatibility issues.
## Notes
- `NerfStudioRenderQueue.update_camera` can be called whenever needed. The renderer will progressively render better images serially. Each update to the camera will result in an asynchronous rendering series.
- `NerfStudioRenderQueue.get_rgb_image` will always return a newly rendered image.
- These two calls need not to be paired.
- After a call to `NerfStudioRenderQueue.get_rgb_image`, its return value will become `None` until:
1. Another image from a *newer* camera update is completed.
2. Another image from the same camera update is completed, in higher quality than the previous ones, and no images from newer updates have been ready at that point.
- **No-Way-Back Guarantee**: If an image from a newer update (say, the 10-th update) is ready at `NerfStudioRenderQueue.get_rgb_image` (even if it is never retrieved), it is guaranteed no image from the 1-st to 9-th updates will be given by future calls.
- Therefore, it is safe to call `NerfStudioRenderQueue.get_rgb_image` multiple times just to check if a newer render is done between these calls.
- You may not immediately get newest renders, but you will never get two renders in reversed time ordering.
| 7,197 | Markdown | 37.698925 | 277 | 0.764624 |
j3soon/omni-nerf-extension/nerfstudio_renderer/src/setup.py | from setuptools import setup
setup(
name='nerfstudio_renderer',
version='0.1',
packages=['nerfstudio_renderer'],
install_requires=['rpyc'],
)
| 159 | Python | 16.777776 | 37 | 0.666667 |
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/renderer.py | from collections import defaultdict
from typing import Dict
import torch
import yaml
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.data.scene_box import SceneBox
from nerfstudio_renderer.utils import *
class NerfStudioRenderer():
"""
The class is responsible for giving rendered images,
given the position, rotation, width, height, and fov
of a camera.
"""
def __init__(self, model_config_path, checkpoint_path, device):
"""
Parameters
----------
model_config_path : Path
The path to model configuration .yml file.
checkpoint_path : Path or str
The path to model checkpoint .ckpt file.
device : torch.device
Device for the model to run on. Usually CUDA or CPU.
"""
# Originally, `nerfstudio.utils.eval_setup` is used to load the entire Pipeline, which takes as input a TrainerConfig yml file.
# During the TrainerConfig setup (`nerfstudio.configs.base_config`) process, the constructor of VanillaPipeline is called.
# It will set up several components to form a complete pipeline, including a DataManager.
# The DataManager (VanillaDataManager) will perform operations to obtain DataParser outputs.
# During the setup process of the DataParser (NerfstudioDataParser), an assert is made, which forces the presence of training dataset.
# See: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L86
# Thus, even when performing inference, training dataset is needed.
# The following code is a workaround that doesn't require to set up the entire Pipeline.
# It load solely the model checkpoint with regard to its TrainerConfig YAML, without needing to set up the entire Pipeline.
# Note that all code below are based on the v0.3.4 tag: https://github.com/nerfstudio-project/nerfstudio/tree/v0.3.4
self.device = device
# 1. Entrypoint `eval_setup`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L68
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L88
config = yaml.load(model_config_path.read_text(), Loader=yaml.Loader)
# Using zero or average appearance embedding is a inference-time choice,
# not a training-time choice (that would require inference-time to match such a choice).
# Therefore, we simply choose to use zero appearance embedding
# See Section B of the NeRF-W paper's supplementary material
# Ref: https://arxiv.org/abs/2008.02268v3
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L247-L254
if config.pipeline.model.use_average_appearance_embedding:
print("WARNING: Forcing zero appearance embedding, although model config specifies to use average appearance embedding.")
config.pipeline.model.use_average_appearance_embedding = False
# Disable predict normals
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L116
if config.pipeline.model.predict_normals:
print("WARNING: Forcing not predicting normals.")
config.pipeline.model.predict_normals = False
# TODO: Support configuring `eval_num_rays_per_chunk`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L92-L93
# 1.1. Call to `VanillaPipelineConfig.setup`, which inherits `InstantiateConfig.setup`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L103
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L52
# 1.2. Call to `VanillaPipelineConfig._target`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L224
# 1.3. Call to `VanillaPipeline.__init__`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L224
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L251
# 1.3.1. Call to `VanillaDataManagerConfig.setup`, which inherits `InstantiateConfig.setup`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L263-L265
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54
# 1.3.2. Call to `VanillaDataManagerConfig._target`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/configs/base_config.py#L54
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L320
# 1.3.3. Call to `VanillaDataManager.__init__`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L320
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L378
# 1.3.4. Call to `get_dataparser_outputs`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/datamanagers/base_datamanager.py#L403
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/base_dataparser.py#L155
# 1.3.5. `_generate_dataparser_outputs`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/base_dataparser.py#L165
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L85
# Gather model-related arguments
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L256-L263
# in x,y,z order
# assumes that the scene is centered at the origin
aabb_scale = config.pipeline.datamanager.dataparser.scene_scale
scene_box = SceneBox(
aabb=torch.tensor(
[[-aabb_scale, -aabb_scale, -aabb_scale], [aabb_scale, aabb_scale, aabb_scale]], dtype=torch.float32
)
)
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#L319-L322
metadata={
"depth_filenames": None, # depth filenames are only required during training
"depth_unit_scale_factor": config.pipeline.datamanager.dataparser.depth_unit_scale_factor,
}
# 1.4. Call to `VanillaPipeline.setup`
# Setting num_train_data to 0 is fine, since we are not using average appearance embedding.
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L272
num_train_data = 0
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L275
grad_scaler = None # only required during training
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L270-L276
self.model = config.pipeline.model.setup(
scene_box=scene_box,
num_train_data=num_train_data,
metadata=metadata,
device=device,
grad_scaler=grad_scaler,
)
# Move model to device
self.model.to(device)
# 2. Call to `pipeline.eval()`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L105
self.model.eval()
# 3. Call to `eval_load_checkpoint`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L108
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L35
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L62
loaded_state = torch.load(checkpoint_path, map_location="cpu")
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L63
loaded_state, step = loaded_state["pipeline"], loaded_state["step"]
# 4. Call to `VanillaPipeline.load_pipeline`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/utils/eval_utils.py#L63
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L423
# Alter loaded model state dict for loading and update model to step
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L430-L433
state = {
(key[len("module.") :] if key.startswith("module.") else key): value for key, value in loaded_state.items()
}
self.model.update_to_step(step)
# 5. Call to `Pipeline.load_state_dict`
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L434
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L109
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L110-L119
is_ddp_model_state = True
model_state = {}
for key, value in state.items():
if key.startswith("_model."):
# remove the "_model." prefix from key
model_state[key[len("_model.") :]] = value
# make sure that the "module." prefix comes from DDP,
# rather than an attribute of the model named "module"
if not key.startswith("_model.module."):
is_ddp_model_state = False
# Drop the embedding layer for appearance embedding that requires the number of training images,
# since we are not using average appearance embedding.
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/fields/nerfacto_field.py#L112
model_state = { key: value for key, value in model_state.items() if 'embedding_appearance' not in key }
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L120-L122
# remove "module." prefix added by DDP
if is_ddp_model_state:
model_state = { key[len("module.") :]: value for key, value in model_state.items() }
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/pipelines/base_pipeline.py#L130
self.model.load_state_dict(model_state, strict=False)
# Ref: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/models/base_model.py#L175
@torch.no_grad()
def get_outputs_for_camera_ray_bundle(self, model, camera_ray_bundle: RayBundle, invalidated_fn) -> Dict[str, torch.Tensor]:
"""Takes in camera parameters and computes the output of the model.
Args:
camera_ray_bundle: ray bundle to calculate outputs over
"""
num_rays_per_chunk = model.config.eval_num_rays_per_chunk
image_height, image_width = camera_ray_bundle.origins.shape[:2]
num_rays = len(camera_ray_bundle)
outputs_lists = defaultdict(list)
for i in range(0, num_rays, num_rays_per_chunk):
if invalidated_fn():
return None
start_idx = i
end_idx = i + num_rays_per_chunk
ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)
outputs = model.forward(ray_bundle=ray_bundle)
for output_name, output in outputs.items(): # type: ignore
if not torch.is_tensor(output):
# TODO: handle lists of tensors as well
continue
outputs_lists[output_name].append(output)
outputs = {}
for output_name, outputs_list in outputs_lists.items():
outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore
return outputs
def render_at(self, position, rotation, width, height, fov, invalidated_fn):
"""
Parameters
----------
position : list[float]
A 3-element list specifying the camera position.
rotation : list[float]
A 3-element list specifying the camera rotation, in euler angles.
width : int
The width of the camera.
height : int
The height of the camera.
fov : float
The vertical field-of-view of the camera.
invalidated_fn : Callable[[], bool]
Function that returns whether the request is invalidated.
Returns
----------
np.array
An np array of rgb values.
"""
# Obtain a Cameras object, and transform it to the same device as the model.
c2w_matrix = camera_to_world_matrix(position, rotation)
cameras = create_cameras(c2w_matrix, width, height, fov).to(self.device)
# Obtain a ray bundle with this Cameras
ray_bundle = cameras.generate_rays(camera_indices=0, aabb_box=None)
# Inference
with torch.no_grad():
# See: https://github.com/nerfstudio-project/nerfstudio/blob/c87ebe34ba8b11172971ce48e44b6a8e8eb7a6fc/nerfstudio/models/base_model.py#L175
outputs = self.get_outputs_for_camera_ray_bundle(self.model, ray_bundle, invalidated_fn)
if outputs is None:
# Allow early return between the calculation of ray bundles if the request is invalidated.
return None
# Return results
return outputs['rgb'].cpu().numpy() | 15,697 | Python | 60.803149 | 172 | 0.707524 |
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/__init__.py | from .renderer import NerfStudioRenderer
from .render_queue import RendererCameraConfig, NerfStudioRenderQueue
| 111 | Python | 36.333321 | 69 | 0.882883 |
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/render_queue.py | import json
import threading
import time
from collections import deque
from nerfstudio_renderer.renderer import *
class RendererCameraConfig:
"""
This class contains functions used to load
camera configurations for the NerfStudioRenderQueue to use.
The configuration is a list of dicts.
The NerfStudioRenderQueue is then able to render differently
sized images with respect to each configuration,
for performance considerations for example.
"""
def __init__(self, cameras_config):
"""
Parameters
----------
cameras_config : list[dict]
A list of dicts that describes different camera configurations.
Each element is of the form {
'width': int, # The rendered image width (in pixels)
'height': int, # The rendered image height (in pixels)
'fov': float, # The vertical field-of-view of the camera
}
"""
self.cameras = cameras_config
def default_config():
"""
Returns a default configuration, where there are 3 cameras,
two for accelerated and estimated rendering, and the other for
high-resolution display.
Returns
----------
RendererCameraConfig
A default config.
"""
# These configurations are chosen empirically, and may be subject to change.
# Nerfstudio camera defaults:
# - vertical FoV: 50 degrees
# Isaac Sim camera defaults:
# - Size: 1280x720
# - Focal Length: 18.14756
# - Horizontal Aperture: 20.955
# - Vertical Aperture: (Value Unused)
# - (Calculated) horizontal FoV = math.degrees(2 * math.atan(20.955 / (2 * 18.14756))) = 60
# The following vertical FoV is chosen to follow the Isaac Sim camera defaults.
# Some useful equations:
# - focal_length = width / (2 * math.tan(math.radians(fov_horizontal) / 2))
# - focal_length = height / (2 * math.tan(math.radians(fov_vertical) / 2))
# - fov_vertical = math.degrees(2 * math.atan(height / (2 * focal_length)))
# - fov_horizontal = math.degrees(2 * math.atan(width / (2 * focal_length)))
# - fov_horizontal = math.degrees(2 * math.atan(horiz_aperture / (2 * focal_length)))
# Ref: https://forums.developer.nvidia.com/t/change-intrinsic-camera-parameters/180309/6
# - aspect_ratio = width / height
# - fov_vertical = math.degrees(2 * math.atan((height / width) * math.tan(math.radians(fov_horizontal) / 2)))
return RendererCameraConfig([
# fov_vertical = math.degrees(2 * math.atan((height / width) * math.tan(math.radians(fov_horizontal) / 2)))
# = 35.98339777135764
# 0.05x resolution
{ 'width': 64, 'height': 36, 'fov': 35.98339777135764 },
# 0.1x resolution
{ 'width': 128, 'height': 72, 'fov': 35.98339777135764 },
# 0.25x resolution
{ 'width': 320, 'height': 180, 'fov': 35.98339777135764 },
# 0.5x resolution
{ 'width': 640, 'height': 360, 'fov': 35.98339777135764 },
# 1x resolution
{ 'width': 1280, 'height': 720, 'fov': 35.98339777135764 },
])
def load_config(file_path=None):
"""
Returns a configuration defined by a json-formatted file.
Parameters
----------
file_path : str, optional
The path to the config file.
Returns
----------
RendererCameraConfig
A config specified by `file_path`, or a default one.
"""
if file_path is None:
return RendererCameraConfig.default_config()
with open(file_path, 'r') as f:
return RendererCameraConfig(json.load(f))
class NerfStudioRenderQueue():
"""
The class encapsulates NerfStudioRenderer and provides
a mechanism that aims at minimizing rendering latency,
via an interface that allows registration of rendering
requests. The render queue attempts to deliver
rendering results of the latest request in time, so
requests are not guaranteed to be served.
Attributes
----------
camera_config : RendererCameraConfig
The different configurations of cameras (different qualities, etc.).
renderer : NerfStudioRenderer
The NerfStudioRenderer used to actually give rendered images.
"""
def __init__(self,
model_config_path,
checkpoint_path,
device,
thread_count=3,
camera_config_path=None):
"""
Parameters
----------
model_config_path : str
The path to model configuration .yml file.
camera_config_path : str, optional
The path to the config file.
Uses `RendererCameraConfig.default_config()` when not assigned.
"""
# Construct camera config and renderer
self.camera_config = RendererCameraConfig.load_config(camera_config_path)
self.renderer = NerfStudioRenderer(model_config_path, checkpoint_path, device)
# Data maintained for optimization:
self._last_request_camera_position = (-np.inf, -np.inf, -np.inf)
"""The camera position of the last accepted request."""
self._last_request_camera_rotation = (-np.inf, -np.inf, -np.inf)
"""The camera rotation of the last accepted request."""
self._request_deque = deque(maxlen=thread_count)
"""The queue/buffer of render requests. Since we want to drop
stale requests/responses, the max size of the deque is simply
set as the thread count. The deque acts like a request buffer
instead of a task queue, which drops older requests when full.
"""
self._request_deque_pop_lock = threading.Lock()
"""The lock for the request deque. Although deque is
thread-safe, we still need to lock it when popping the deque
while empty to create blocking behavior.
"""
self._last_request_timestamp = time.time()
"""The timestamp of the last accepted request."""
self._last_request_timestamp_lock = threading.Lock()
"""The timestamp lock for the last request timestamp."""
self._last_response_timestamp = time.time()
"""The timestamp of the last sent response."""
self._last_response_timestamp_lock = threading.Lock()
"""The timestamp lock for the last response timestamp."""
self._image_response_buffer = None
"""The latest rendered image buffer, which will be cleared
immediately after retrieval."""
self._image_response_buffer_lock = threading.Lock()
"""The image lock for the image response buffer."""
for i in range(thread_count):
t = threading.Thread(target=self._render_task)
t.daemon = True
t.start()
# We choose to use threading here instead of multiprocessing
# due to lower overhead. We are aware of the GIL, but since
# the bottleneck should lie in the rendering process, which
# is implemented in C++ by PyTorch, the GIL should be released
# during PyTorch function calls.
# Ref: https://discuss.pytorch.org/t/can-pytorch-by-pass-python-gil/55498
# After going through some documents, we conclude that switching
# to multiprocessing may not be a good idea, since the overhead
# of inter-process communication may be high, and the
# implementation is not trivial.
def get_rgb_image(self):
"""
Retrieve the most recently ready rgb image.
If no rgb images have been rendered since last call of `get_rgb_image`, returns None.
Returns
----------
np.array or None
If applicable, returns an np array of size (width, height, 3) and with values ranging from 0 to 1.
Otherwise, returns None.
"""
with self._image_response_buffer_lock:
image = self._image_response_buffer
self._image_response_buffer = None
return image
def update_camera(self, position, rotation):
"""
Notifies an update to the camera pose.
This may or may not result in a new render request.
Parameters
----------
position : list[float]
A 3-element list specifying the camera position.
rotation : list[float]
A 3-element list specifying the camera rotation, in euler angles.
"""
if self._is_input_similar(position, rotation):
return
self._last_request_camera_position = position.copy()
self._last_request_camera_rotation = rotation.copy()
now = time.time()
with self._last_request_timestamp_lock:
self._last_request_timestamp = now
# Queue this render request, with request timestamp attached.
self._request_deque.append((position, rotation, now))
def _render_task(self):
while True:
with self._request_deque_pop_lock:
if len(self._request_deque) == 0:
time.sleep(0.05)
continue
task = self._request_deque.pop()
position, rotation, timestamp = task
# For each render request, render lower quality images first, and then higher quality ones.
# This rendering request and response may be dropped, as newer requests/responses invalidate older ones.
for camera in self.camera_config.cameras:
# A request can be invalidated if there are newer requests.
with self._last_request_timestamp_lock:
if timestamp - self._last_request_timestamp < 0:
continue
# Render the image
# Early return if the request is invalidated.
def request_invalidated():
with self._last_request_timestamp_lock:
return timestamp - self._last_request_timestamp < 0
image = self.renderer.render_at(position, rotation, camera['width'], camera['height'], camera['fov'], request_invalidated)
if image is None:
continue
# A response must be dropped if there are newer responses.
with self._last_response_timestamp_lock:
if timestamp - self._last_response_timestamp < 0:
continue
self._last_response_timestamp = timestamp
with self._image_response_buffer_lock:
self._image_response_buffer = image
# Checks if camera pose is similar to what was recorded.
def _is_input_similar(self, position, rotation):
return position == self._last_request_camera_position and rotation == self._last_request_camera_rotation
| 11,091 | Python | 41.992248 | 138 | 0.600126 |
j3soon/omni-nerf-extension/nerfstudio_renderer/src/nerfstudio_renderer/utils.py | import numpy as np
import torch
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
from scipy.spatial.transform import Rotation as R
def camera_to_world_matrix(position, rotation):
"""
Constructs a camera-to-world (c2w) transformation matrix,
based on the position and rotation of the camera.
Parameters
----------
position : list[float]
A 3-element list of floats representing the position of the camera.
rotation : list[float]
A 3-element list of floats representing the rotation of the camera, in euler angles.
Returns
----------
np.array
A 4x4 camera-to-world matrix.
"""
camera_to_world_matrix = np.eye(4)
rot_matrix = R.from_euler('xyz', rotation).as_matrix()
camera_to_world_matrix[:3, :3] = rot_matrix
camera_to_world_matrix[:3, 3] = position
return camera_to_world_matrix
def create_cameras(camera_to_world_matrix, width, height, fov):
"""
Constructs a Cameras object based on a c2w matrix,
and a camera configuration from RendererCameraConfig.
Parameters
----------
camera_to_world_matrix : np.array
A 3-element list of floats representing the position of the camera.
width : int
The width of the camera.
height : int
The height of the camera.
fov : float
The vertical field-of-view of the camera.
Returns
----------
Cameras
A Cameras object.
"""
# Compute camera focal length
focal_length = three_js_perspective_camera_focal_length(fov, height)
# Only use the first 3 rows of the c2w matrix, as the last row is always [0 0 0 1].
camera_to_worlds = torch.tensor(camera_to_world_matrix)[:3].view(1, 3, 4)
return Cameras(
fx=torch.tensor([focal_length]),
fy=torch.tensor([focal_length]),
cx=width/2,
cy=height/2,
camera_to_worlds=camera_to_worlds,
camera_type=CameraType.PERSPECTIVE,
times=None,
)
| 2,071 | Python | 28.183098 | 92 | 0.654273 |
j3soon/omni-nerf-extension/pygame_viewer/pygame_test.py | import argparse
import time
import cv2
import numpy as np
import pygame
import rpyc
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default='localhost')
parser.add_argument("--port", type=int, default=10001)
parser.add_argument("--model_config_path", type=str, required=True)
parser.add_argument("--model_checkpoint_path", type=str, required=True)
parser.add_argument("--device", type=str, choices=['cpu', 'cuda'], default='cuda')
parser.add_argument("--rpyc", type=bool, default=False)
args = parser.parse_args()
return args
def main(args):
if not args.rpyc:
# Remote: Make Connection & Import
conn = rpyc.classic.connect(args.host, args.port)
conn.execute('from nerfstudio_renderer import NerfStudioRenderQueue')
conn.execute('from pathlib import Path')
conn.execute('import torch')
else:
from nerfstudio_renderer import NerfStudioRenderQueue
from pathlib import Path
import torch
if not args.rpyc:
# Create a Remote NerfStudioRenderQueue
conn.execute(f'rq = NerfStudioRenderQueue(model_config_path=Path("{args.model_config_path}"), checkpoint_path="{args.model_checkpoint_path}", device=torch.device("{args.device}"))')
else:
rq = NerfStudioRenderQueue(
model_config_path=Path(args.model_config_path),
checkpoint_path=args.model_checkpoint_path,
device=torch.device(args.device),
)
# Initialize Pygame
pygame.init()
# Set the width and height of the window
width, height = 640, 360
window_size = (width, height)
# Create a Pygame window
screen = pygame.display.set_mode(window_size)
# Create a clock to control the frame rate
clock = pygame.time.Clock()
# Camera curve time & global screen buffer
camera_curve_time = 0
screen_buffer = np.zeros((width, height, 3), dtype=np.uint8)
# Camera pose for the poster NeRF model
camera_position = [0, 0, 0]
camera_rotation = [0, 0, 0]
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Retrieve image
if not args.rpyc:
image = conn.eval('rq.get_rgb_image()')
else:
image = rq.get_rgb_image()
if image is not None:
image = np.array(image) # received with shape (H*, W*, 3)
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR) # resize to (H, W, 3)
image = np.transpose(image, (1, 0, 2))
screen_buffer[:] = image * 255
animation_progress = (np.sin(camera_curve_time) + 1) / 2
# Cover the screen buffer with an indicator of camera position
hud_width, hud_height = 100, 50
bar_x, bar_y = 20, 24
bar_w, bar_h = 60, 2
# white background
camera_position_indicator = np.ones((hud_width, hud_height, 3)) * 255
# horizontal line
camera_position_indicator[bar_x:bar_x+bar_w, bar_y:bar_y+bar_h, :] = 0
# square indicator of current position
hud_x = round(bar_x + bar_w * animation_progress)
camera_position_indicator[hud_x-5:hud_x+5, 20:30, :] = 0
screen_buffer[width-hud_width:, height-hud_height:, :] = camera_position_indicator
# Convert the NumPy array to a Pygame surface
image_surface = pygame.surfarray.make_surface(screen_buffer)
# Blit the surface to the screen
screen.blit(image_surface, (0, 0))
pygame.display.flip()
# Control the frame rate
clock.tick(30)
# Move Camera
camera_position[2] = animation_progress
# Update Camera
if not args.rpyc:
conn.execute(f'rq.update_camera({camera_position}, {camera_rotation})')
else:
rq.update_camera(camera_position, camera_rotation)
if int(time.time()) % 5 == 0:
camera_curve_time += 1.0 / 30.0
if not args.rpyc:
# Delete remote render queue
conn.execute('del rq')
# Quit Pygame
pygame.quit()
if __name__ == '__main__':
main(parse_args())
| 4,242 | Python | 32.409449 | 189 | 0.615747 |
yizhouzhao/OpenAnyDrawer/README.md | # OpenAnyDrawer
windows running guide:
C:/Users/zhaoy/AppData/Local/ov/pkg/isaac_sim-2022.1.0/python.bat
E:\researches\OpenAnyDrawer\open-any-drawer\exts\open.any.drawer\open\any\drawer
result_file_path = "F:\\allegro_exp_learning823.txt"
MODEL_PATH = "F:\\fasterrcnn_resnet50_fpn823.pth"
usd_path = "F:\\scene1.usd"
env = OpenEnv(load_nucleus=False)
open_env.py line 134-139 change to load locally
experiment.py line
result_file_path = "F:\\shadowhand_exp823.txt"
SHOW_IMAGE = False
usd_path = "F:\\scene2.usd"
MODEL_PATH = "F:\\fasterrcnn_resnet50_fpn823.pth"
linux running guide:
# go to directory: open-any-drawer/exts/open.any.drawer/open/any/drawer/
# # start notebook from: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/jupyter_notebook.sh
# start python: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
# next paper about body language
| 891 | Markdown | 30.857142 | 97 | 0.735129 |
yizhouzhao/OpenAnyDrawer/learning/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class IdentityBlock(nn.Module):
def __init__(self, in_planes, filters, kernel_size, stride=1, final_relu=True, batchnorm=True):
super(IdentityBlock, self).__init__()
self.final_relu = final_relu
self.batchnorm = batchnorm
filters1, filters2, filters3 = filters
self.conv1 = nn.Conv2d(in_planes, filters1, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(filters1) if self.batchnorm else nn.Identity()
self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, dilation=1,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(filters2) if self.batchnorm else nn.Identity()
self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += x
if self.final_relu:
out = F.relu(out)
return out
class ConvBlock(nn.Module):
def __init__(self, in_planes, filters, kernel_size, stride=1, final_relu=True, batchnorm=True):
super(ConvBlock, self).__init__()
self.final_relu = final_relu
self.batchnorm = batchnorm
filters1, filters2, filters3 = filters
self.conv1 = nn.Conv2d(in_planes, filters1, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(filters1) if self.batchnorm else nn.Identity()
self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, dilation=1,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(filters2) if self.batchnorm else nn.Identity()
self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, filters3,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(filters3) if self.batchnorm else nn.Identity()
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if self.final_relu:
out = F.relu(out)
return out
class ResNet43_8s(nn.Module):
def __init__(self, input_shape, output_dim, cfg, device, preprocess):
super(ResNet43_8s, self).__init__()
self.input_shape = input_shape
self.input_dim = input_shape[-1]
self.output_dim = output_dim
self.cfg = cfg
self.device = device
self.batchnorm = self.cfg['train']['batchnorm']
self.preprocess = preprocess
self.layers = self._make_layers()
def _make_layers(self):
layers = nn.Sequential(
# conv1
nn.Conv2d(self.input_dim, 64, stride=1, kernel_size=3, padding=1),
nn.BatchNorm2d(64) if self.batchnorm else nn.Identity(),
nn.ReLU(True),
# fcn
ConvBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(64, [128, 128, 128], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(128, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(128, [256, 256, 256], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(256, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
ConvBlock(256, [512, 512, 512], kernel_size=3, stride=2, batchnorm=self.batchnorm),
IdentityBlock(512, [512, 512, 512], kernel_size=3, stride=1, batchnorm=self.batchnorm),
# head
ConvBlock(512, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(256, [256, 256, 256], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
ConvBlock(256, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(128, [128, 128, 128], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
# conv2
ConvBlock(64, [16, 16, self.output_dim], kernel_size=3, stride=1,
final_relu=False, batchnorm=self.batchnorm),
IdentityBlock(self.output_dim, [16, 16, self.output_dim], kernel_size=3, stride=1,
final_relu=False, batchnorm=self.batchnorm),
)
return layers
def forward(self, x):
x = self.preprocess(x, dist='transporter')
out = self.layers(x)
return out | 5,290 | Python | 43.091666 | 99 | 0.60983 |
yizhouzhao/OpenAnyDrawer/learning/custom_cliport.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import torch
import torch.nn as nn
import torch.nn.functional as F
from fusion import *
from resnet import IdentityBlock, ConvBlock
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels), # (Mohit): argh... forgot to remove this batchnorm
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels), # (Mohit): argh... forgot to remove this batchnorm
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class CustomCliport(nn.Module):
def __init__(self, clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json",
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')) -> None:
"""
Prediction mode: initialize resnet
"""
super().__init__()
self.batchnorm = True
self.clip_text_feature_path = clip_text_feature_path
self.device = device
self.proj_input_dim = 512
self.lang_proj1 = nn.Linear(self.proj_input_dim, 512)
self.lang_proj2 = nn.Linear(self.proj_input_dim, 256)
self.lang_proj3 = nn.Linear(self.proj_input_dim, 128)
self.lang_fuser1 = FusionMult(512)
self.lang_fuser2 = FusionMult(256)
self.lang_fuser3 = FusionMult(128)
self.up1 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(512, 512, 512)
)
self.up2 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(512, 256, 512)
)
self.up3 = nn.Sequential(
nn.Upsample(scale_factor= 2, mode='bilinear', align_corners=True),
DoubleConv(256, 128, 256)
)
self.layer1 = nn.Sequential(
ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.layer2 = nn.Sequential(
ConvBlock(64, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(32, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.layer3 = nn.Sequential(
ConvBlock(32, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
IdentityBlock(16, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
nn.UpsamplingBilinear2d(scale_factor=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 1, kernel_size=1),
)
# in prediction
def set_prediction_mode(self):
# load vision model
from transformers import AutoFeatureExtractor, ResNetModel
self.feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-18")
self.resnet_model = ResNetModel.from_pretrained("microsoft/resnet-18").to(self.device)
# load language feature
import json
self.text2clip_feature = json.load(open(self.clip_text_feature_path,'r', encoding='utf-8'))
def pred_box_pos_and_dir(self, image, text):
"""
Prediction box center position and direction
"""
# image features
inputs = self.feature_extractor(image, return_tensors="pt").to(self.device)
with torch.no_grad():
image_features = self.resnet_model(**inputs).last_hidden_state # [1, 512, 7, 7]
text_feautures = torch.tensor(self.text2clip_feature[text]).float().unsqueeze(0).to(self.device) # [1, 512]
pred_y = self.forward(image_features, text_feautures)
pred_max_index = torch.argmax(pred_y[0].cpu().data).item()
h, w = pred_max_index// 256, pred_max_index % 256
# get direction
top_bound = max(h - 5, 0)
bottom_bound = min(h + 5, 255)
left_bound = max(w - 5, 0)
right_bound = min(w + 5, 255)
# mean over vertical direction
v_mean = torch.mean(pred_y[0][top_bound:bottom_bound, w]).item()
h_mean = torch.mean(pred_y[0][left_bound:right_bound, h]).item()
handle_dir = "horizontal" if v_mean > h_mean else "vertical" # if vertical direction more concentrate, then direciton is horizontal
return (h,w), handle_dir
def forward(self, x, l):
"""
x: image features [B x 512 x 7 x 7]
l: language features [B x 512]
"""
x = self.up1(x)
x = self.lang_fuser1(x, l, x2_proj = self.lang_proj1)
x = self.up2(x)
x = self.lang_fuser2(x, l, x2_proj = self.lang_proj2)
x = self.up3(x)
x = self.lang_fuser3(x, l, x2_proj = self.lang_proj3)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = F.relu(x)
x = x.squeeze(1)
return x
| 6,850 | Python | 34.497409 | 139 | 0.589343 |
yizhouzhao/OpenAnyDrawer/learning/custom_dataset.py | import numpy as np
import cv2
import os
from PIL import Image, ImageDraw
import json
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
# load vision model
from transformers import AutoFeatureExtractor, ResNetModel
feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-18")
resnet_model = ResNetModel.from_pretrained("microsoft/resnet-18").to("cuda")
# load language feature
import pickle
text2clip_feature = pickle.load(open("text2clip_feature.pickle",'rb'))
def collate_fn(batch):
image_list = []
target_list = []
text_feature_list = []
for (image, target, text) in batch:
image_list.append(image)
target_list.append(torch.tensor(target))
text_feature_list.append(text2clip_feature[text])
# image features
inputs = feature_extractor(image_list, return_tensors="pt").to("cuda")
with torch.no_grad():
image_features = resnet_model(**inputs).last_hidden_state
# targets
targets = torch.stack(target_list).to("cuda")
text_feautures = torch.stack(text_feature_list).to("cuda")
return image_features.float(), targets.float(), text_feautures.float()
class HandleDataset4Cliport(Dataset):
def __init__(self, image_dir, num_frames = 5, is_train = True, transforms=None):
super().__init__()
self.image_dir = image_dir
self.num_frames = num_frames # randomized frames in rendering
self.transforms = transforms
self.is_train = is_train
self.get_img_ids()
def get_img_ids(self):
self.image_ids = []
for image_id in tqdm(sorted(os.listdir(self.image_dir), key = lambda x: int(x))):
if self.is_train:
if int(image_id) > 150:
continue
else: # test
if int(image_id) <= 150:
continue
# print("image_id", image_id)
for i in range(self.num_frames):
boxes_np = np.load(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_{i}.npy')
lang_json = json.load(open(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_labels_{i}.json'))
if boxes_np.shape[0] > 0:
boxes = np.array([ list(e) for e in boxes_np])
boxes = boxes[...,1:] # 0 is the class index
boxes[:, :2] -= 1 # make min a little smaller
boxes[:, 2:] += 1 # make max a little large
for j, key in enumerate(lang_json):
self.image_ids.append([image_id, boxes[j], i, lang_json[key]['class']]) # image, box, frame, text
def __len__(self):
return len(self.image_ids)
def __getitem__(self, index: int):
"""
return:
image: image
"""
image_id, box, frame, text = self.image_ids[index]
image = Image.open(f'{self.image_dir}/{image_id}/rgb_{frame}.png')
image = image.convert('RGB')
box_image = Image.new('L', image.size)
draw_image = ImageDraw.Draw(box_image)
draw_image.rectangle(list(box), fill ="#FFFFFF")
box_image = np.array(box_image) / 255.0
text = text.replace("_"," ").replace("-"," ").replace(" ", " ").strip()
return image, box_image, text
| 3,353 | Python | 30.942857 | 117 | 0.583656 |
yizhouzhao/OpenAnyDrawer/learning/utils.py | # get text clip encoding
def get_text_embeddings():
"""
Get text embeddings from clip language model
"""
ALL_SEMANTIC_TYPES = [f"{v_desc}_{h_desc}_{cabinet_type}" for v_desc in ["", "bottom", "second-bottom", "middle", "second-top", "top"] for h_desc in ["", "right", "second-right", "middle", "second-left", "left"] for cabinet_type in ["drawer", "door"]]
ALL_SEMANTIC_TYPES = [f"{v_desc}_{h_desc}_{cabinet_type}" for v_desc in ["", "bottom", "second-bottom", "middle", "second-top", "top"] for h_desc in ["", "right", "second-right", "middle", "second-left", "left"] for cabinet_type in ["drawer", "door"]]
all_texts = [t.replace("_"," ").replace("-"," ").replace(" ", " ").strip() for t in ALL_SEMANTIC_TYPES]
# all_texts
from transformers import CLIPTokenizer, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
inputs = tokenizer(all_texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text2feature = {all_texts[i]: text_features[i].data for i in range(72)}
import pickle
# save dictionary to pickle file
with open('text2clip_feature.pickle', 'wb') as file:
pickle.dump(text2feature, file, protocol=pickle.HIGHEST_PROTOCOL)
| 1,357 | Python | 42.80645 | 255 | 0.647752 |
yizhouzhao/OpenAnyDrawer/learning/dataset.py | import numpy as np
import cv2
import os
import torch
from torch.utils.data import DataLoader, Dataset
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from tqdm.auto import tqdm
# Albumentations
def get_train_transform():
return A.Compose([
A.Resize(224, 224),
A.Flip(0.5),
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def get_valid_transform():
return A.Compose([
A.Resize(224, 224),
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def collate_fn(batch):
return tuple(zip(*batch))
class HandleDataset(Dataset):
def __init__(self, image_dir, num_frames = 5, is_train = True, transforms=None):
super().__init__()
self.image_dir = image_dir
self.num_frames = num_frames # randomized frames in rendering
self.transforms = transforms
self.is_train = is_train
self.get_img_ids()
def get_img_ids(self):
self.image_ids = []
for image_id in tqdm(os.listdir(self.image_dir)):
if self.is_train:
if int(image_id) > 150:
continue
else: # test
if int(image_id) <= 150:
continue
for i in range(self.num_frames):
boxes_np = np.load(f'{self.image_dir}/{image_id}/bounding_box_2d_tight_{i}.npy')
if boxes_np.shape[0] > 0:
boxes = np.array([ list(e) for e in boxes_np])
boxes = boxes[...,1:] # 0 is the class index
boxes[:, 2:] += 1 # make max a little large
self.image_ids.append([image_id, boxes, i])
def __len__(self):
return len(self.image_ids)
def __getitem__(self, index: int):
image_id, boxes, frame = self.image_ids[index]
image = cv2.imread(f'{self.image_dir}/{image_id}/rgb_{frame}.png', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
area = torch.as_tensor(area, dtype=torch.float32)
# there is only one class
labels = torch.ones((len(boxes),), dtype=torch.int64)
# suppose all instances are not crowd
iscrowd = torch.zeros((len(boxes),), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
# target['masks'] = None
target['image_id'] = torch.tensor([index])
target['area'] = area
target['iscrowd'] = iscrowd
if self.transforms:
sample = {
'image': image,
'bboxes': target['boxes'],
'labels': labels
}
sample = self.transforms(**sample)
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
return image, target, image_id
| 3,162 | Python | 28.839622 | 105 | 0.538267 |
yizhouzhao/OpenAnyDrawer/learning/fusion.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DotAttn(nn.Module):
""" Dot-Attention """
def forward(self, inp, h):
score = self.softmax(inp, h)
return score.expand_as(inp).mul(inp).sum(1), score
def softmax(self, inp, h):
raw_score = inp.bmm(h.unsqueeze(2))
score = F.softmax(raw_score, dim=1)
return score
class ScaledDotAttn(nn.Module):
""" Scaled Dot-Attention """
def forward(self, inp, h):
score = self.softmax(inp, h)
return score.expand_as(inp).mul(inp).sum(1), score
def softmax(self, inp, h):
raw_score = inp.bmm(h.unsqueeze(2)) / np.sqrt(h.shape[-1])
score = F.softmax(raw_score, dim=1)
return score
class Fusion(nn.Module):
""" Base Fusion Class"""
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def tile_x2(self, x1, x2, x2_proj=None):
if x2_proj:
x2 = x2_proj(x2)
x2 = x2.unsqueeze(-1).unsqueeze(-1)
x2 = x2.repeat(1, 1, x1.shape[-2], x1.shape[-1])
return x2
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
raise NotImplementedError()
class FusionAdd(Fusion):
""" x1 + x2 """
def __init__(self, input_dim=3):
super(FusionAdd, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return x1 + x2
class FusionMult(Fusion):
""" x1 * x2 """
def __init__(self, input_dim=3):
super(FusionMult, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return x1 * x2
class FusionMax(Fusion):
""" max(x1, x2) """
def __init__(self, input_dim=3):
super(FusionMax, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return torch.max(x1, x2)
class FusionConcat(Fusion):
""" [x1; x2] """
def __init__(self, input_dim=3):
super(FusionConcat, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return torch.cat([x1, x2], dim=1)
class FusionConv(Fusion):
""" 1x1 convs after [x1; x2] """
def __init__(self, input_dim=3):
super(FusionConv, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim * 2, input_dim, kernel_size=1, bias=False)
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, 2C, H, W]
x = self.conv(x) # [B, C, H, W]
return x
class FusionConvLat(Fusion):
""" 1x1 convs after [x1; x2] for lateral fusion """
def __init__(self, input_dim=3, output_dim=3):
super(FusionConvLat, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False)
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, input_dim, H, W]
x = self.conv(x) # [B, output_dim, H, W]
return x
## ------------- NOTE ----------------
## The following are various fusion types I experimented with.
## Most of them didn't work well ¯\_(ツ)_/¯
## But it doesn't mean there isn't a better way of
## doing lateral and multi-modal (language+vision) fusion.
class FusionFiLM(Fusion):
""" FiLM (Perez et. al, https://arxiv.org/abs/1709.07871).
Note: This is not used inside a Residual block before ReLU.
I had a version this in UpBlock with FiLM, which didn't seem to work at all.
"""
def __init__(self, input_dim=3, output_dim=3):
super(FusionFiLM, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, gamma, beta):
g = self.tile_x2(x1, x2, gamma)
b = self.tile_x2(x1, x2, beta)
return x1 * g + b
class FusionDeepConv(Fusion):
""" Multi-Layer 1x1 convs after [x1; x2] """
def __init__(self, input_dim=3):
super(FusionDeepConv, self).__init__(input_dim=input_dim)
self.conv = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(input_dim * 2, input_dim, kernel_size=1, bias=False),
nn.ReLU(True),
nn.Conv2d(input_dim, input_dim, kernel_size=1, bias=False),
nn.ReLU(True),
nn.Conv2d(input_dim, input_dim, kernel_size=1, bias=False),
)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
x = torch.cat([x1, x2], dim=1) # [B, 2C, H, W]
x = self.conv(x) # [B, C, H, W]
return x
class FusionMultWord(nn.Module):
""" Product with weighted-sum of words """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x2_len = int(x2_mask.count_nonzero())
weighted_x1 = torch.zeros_like(x1)
for t in range(x2_len):
x2_t = x2_proj(x2[:,t]) if x2_proj else x2[:,t]
x2_t = x2_t.unsqueeze(-1).unsqueeze(-1).repeat(B, 1, H, W)
weighted_x1 += x1 * x2_t
weighted_x1 /= x2_len
return weighted_x1
class FusionWordAttention(nn.Module):
""" Word Attention """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.dot_attn = DotAttn()
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x1_flat = x1.reshape(B, D, H*W)
x2_len = int(x2_mask.count_nonzero())
# TODO: batch this unrolling?
weight_sum_x1_flat = torch.zeros_like(x1_flat)
for t in range(x2_len):
x2_t = x2_proj(x2[:,t]) if x2_proj else x2[:,t]
x2_t = x2_t.repeat(B, 1)
_, attn_x1 = self.dot_attn(x1_flat.transpose(1, 2), x2_t)
weight_sum_x1_flat += x1_flat * attn_x1.transpose(1, 2)
weight_sum_x1_flat /= x2_len
x2 = weight_sum_x1_flat.reshape(B, D, H, W)
return x2
class FusionSentenceAttention(nn.Module):
""" Sentence Attention """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.dot_attn = ScaledDotAttn()
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
B, D, H, W = x1.shape
x1_flat = x1.reshape(B, D, H*W)
x2_t = x2_proj(x2) if x2_proj else x2
x2_t = x2_t.repeat(B, 1)
_, attn_x1 = self.dot_attn(x1_flat.transpose(1, 2), x2_t)
weight_sum_x1_flat = x1_flat * attn_x1.transpose(1, 2)
x2 = weight_sum_x1_flat.reshape(B, D, H, W)
return x2
class CrossModalAttention2d(nn.Module):
""" Cross-Modal Attention. Adapted from: https://github.com/openai/CLIP/blob/main/clip/model.py#L56 """
def __init__(self, spacial_dim=7, embed_dim=1024, num_heads=32,
output_dim=1024, lang_dim=512, lang_max_tokens=77):
super().__init__()
self.embed_dim = embed_dim
self.lang_dim = lang_dim
self.lang_max_tokens = lang_max_tokens
self.num_heads = num_heads
self.lang_proj = nn.Linear(self.lang_dim, embed_dim)
self.vision_positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2, embed_dim) / embed_dim ** 0.5)
self.lang_positional_embedding = nn.Parameter(torch.randn(lang_max_tokens, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
def forward(self, x, l, l_mask):
# reshape vision features
x_shape = x.shape
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = x + self.vision_positional_embedding[:x.shape[0], None, :].to(x.dtype) # (HW)NC
# project language
l = l.permute(1, 0, 2)
l_shape = l.shape
l = l.reshape(-1, self.lang_dim)
l = self.lang_proj(l)
l = l.reshape(l_shape[0], l_shape[1], self.embed_dim)
l = l + self.lang_positional_embedding[:, None, :].to(l.dtype)
# hard language mask
l_len = int(l_mask.count_nonzero())
l = l[:l_len]
l = l.repeat(1, x.shape[1], 1)
x, _ = F.multi_head_attention_forward(
query=x, key=l, value=l,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
x = x.permute(1, 2, 0)
x = x.reshape(x_shape)
return x
class FusionMultiHeadedWordAttention(nn.Module):
""" Multi-Headed Word Attention that uses Cross Modal Attention at different scales """
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
self.attn1 = CrossModalAttention2d(spacial_dim=7, embed_dim=1024, output_dim=1024)
self.attn2 = CrossModalAttention2d(spacial_dim=14, embed_dim=512, output_dim=512)
self.attn3 = CrossModalAttention2d(spacial_dim=28, embed_dim=256, output_dim=256)
self.multi_headed_attns = {
1024: self.attn1,
512: self.attn2,
256: self.attn3,
}
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
emb_dim = x1.shape[1]
x = self.multi_headed_attns[emb_dim](x1, x2, x2_mask)
return x
names = {
'add': FusionAdd,
'mult': FusionMult,
'mult_word': FusionMultWord,
'film': FusionFiLM,
'max': FusionMax,
'concat': FusionConcat,
'conv': FusionConv,
'deep_conv': FusionDeepConv,
'word_attn': FusionWordAttention,
'sent_attn': FusionSentenceAttention,
'multi_headed_word_attn': FusionMultiHeadedWordAttention,
}
| 11,360 | Python | 31.646552 | 116 | 0.563292 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand_common.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
default_grasp_profile = {
"position_offset":{
"vertical": [0,0,0],
"horizontal": [0,0,0],
},
"rotation":{
"vertical": [0,0,0,1], # XYZW
"horizontal": [0,0,0,1],
}
}
class HandBase():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
self.grasp_profile = default_grasp_profile
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
pos_offset = self.grasp_profile["position_offset"]
if verticle:
v_pos_offset = pos_offset["vertical"]
grasp_list = [[min_x - v_pos_offset[0], c[1] - v_pos_offset[1], c[2] - v_pos_offset[2]] for c in center_list]
else:
h_pos_offset = pos_offset["horizontal"]
grasp_list = [[min_x - h_pos_offset[0], c[1] - h_pos_offset[1], c[2] - h_pos_offset[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = self.grasp_profile["rotation"]["vertical"] if verticle else self.grasp_profile["rotation"]["horizontal"]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_grasp_location_from_pred_box(self, box, center = None, verticle = True):
"""
Calculate the grasp location for the handle
box: [y_0, z_0, y_1, z_1]
center: [y, z]
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
pos_offset = self.grasp_profile["position_offset"]
if verticle:
v_pos_offset = pos_offset["vertical"]
grasp_list = [[min_x - v_pos_offset[0], handle_y - v_pos_offset[1], handle_z - v_pos_offset[2]]]
else:
h_pos_offset = pos_offset["horizontal"]
grasp_list = [[min_x - h_pos_offset[0], handle_y - h_pos_offset[1], handle_z - h_pos_offset[2]]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = self.grasp_profile["rotation"]["vertical"] if verticle else self.grasp_profile["rotation"]["horizontal"]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 4,445 | Python | 34.285714 | 129 | 0.593926 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/open_env.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import omni
import pxr
from pxr import Gf, Sdf
from omni.isaac.franka import Franka
from omni.isaac.core.utils.stage import set_stage_up_axis
from omni.isaac.core import World, SimulationContext
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
import numpy as np
from pathlib import Path
from PIL import Image
from numpy_utils import *
from utils import get_bounding_box
ROOT = str(Path(__file__).parent.joinpath("../../../../../../").resolve())
class OpenEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None,
load_nucleus = True,
) -> None:
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
self.load_nucleus = load_nucleus
def add_camera(self):
self.stage = omni.usd.get_context().get_stage()
# Create prim
prim = self.stage.GetPrimAtPath("/World/Camera")
# delete if exist
if prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Camera"])
omni.kit.commands.execute("CreatePrimWithDefaultXform", prim_type="Camera", prim_path = "/World/Camera")
prim = self.stage.GetPrimAtPath("/World/Camera")
mat = Gf.Matrix4f().SetRotate(Gf.Quatf(0.5, 0.5, -0.5, -0.5)) * Gf.Matrix4f().SetTranslate(Gf.Vec3f(-1, 0, 0.5))
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/Camera",
new_transform_matrix=mat,
)
# Setup missing ftheta params
prim.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token)
prim.CreateAttribute("fthetaPolyA", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyB", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyC", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyD", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaPolyE", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaCx", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaCy", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaWidth", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaHeight", Sdf.ValueTypeNames.Float)
prim.CreateAttribute("fthetaMaxFov", Sdf.ValueTypeNames.Float)
camera_properties = {
"focalLength": 24.0,
"focusDistance": 400.0,
"fStop":0.0,
"horizontalAperture":20.955,
"horizontalApertureOffset":0.0,
"verticalApertureOffset":0.0,
"clippingRange":(1.0, 1000000.0),
"cameraProjectionType":"pinhole",
"fthetaWidth":1936.0,
"fthetaHeight":1216.0,
"fthetaCx":970.94244,
"fthetaCy":600.37482,
"fthetaMaxFov":200.0,
"fthetaPolyA":0.0,
"fthetaPolyB":0.00245,
"fthetaPolyC":0.0,
"fthetaPolyD":0.0,
"fthetaPolyE":0.0,
}
for attribute, attribute_value in camera_properties.items():
prim.GetAttribute(attribute).Set(attribute_value)
# import omni.replicator.core as rep
# camera = rep.create.camera(position=(-1, 0, 0.5), rotation=(90, 0, -90))
def add_robot(self):
print("add robot")
self.stage = omni.usd.get_context().get_stage()
self.game_path_str = "/World/Game"
xform_game = self.stage.GetPrimAtPath(self.game_path_str)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.game_path_str)
set_stage_up_axis("z")
# import robot
self.robot = Franka("/World/Game/Franka")
def add_object(self, obj_idx = 0, x_offset = 6, scale = 1):
from utils import get_bounding_box, add_physical_material_to, fix_linear_joint
print("add object")
self.stage = omni.usd.get_context().get_stage()
self.game_path_str = "/World/Game"
xform_game = self.stage.GetPrimAtPath(self.game_path_str)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.game_path_str)
# move obj to the correct place
mobility_prim_path = xform_game.GetPath().pathString + "/mobility"
prim = self.stage.GetPrimAtPath(mobility_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(mobility_prim_path)
# loading asset from Omniverse Nucleus or local
if self.load_nucleus:
asset_root = "omniverse://localhost/Users/yizhou"
r = omni.client.list(os.path.join(asset_root, "Asset/Sapien/StorageFurniture/"))
print("loading asset from omni nucleus")
object_ids = sorted([e.relative_path for e in r[1]])
else:
asset_root = ROOT
object_ids = sorted(os.listdir(os.path.join(asset_root, "Asset/Sapien/StorageFurniture/")))
obj_usd_path = os.path.join(asset_root, f"Asset/Sapien/StorageFurniture/{object_ids[obj_idx]}/mobility.usd")
success_bool = prim.GetReferences().AddReference(obj_usd_path)
assert success_bool, f"Import error at usd {obj_usd_path}"
xform = pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * \
pxr.Gf.Matrix4d().SetTranslate([0,0,0]) * \
pxr.Gf.Matrix4d().SetScale([7.0 * scale,7.0 *scale,7.0 * scale])
omni.kit.commands.execute(
"TransformPrimCommand",
path=mobility_prim_path,
new_transform_matrix=xform,
)
# get obj bounding box
bboxes = get_bounding_box(mobility_prim_path)
position = [-bboxes[0][0] + x_offset * scale, 0, -bboxes[0][2]]
xform.SetTranslateOnly(position)
omni.kit.commands.execute(
"TransformPrimCommand",
path=mobility_prim_path,
new_transform_matrix=xform,
)
# add physical meterial to
add_physical_material_to("handle_")
# fix linear joint
fix_linear_joint()
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def move_to_target(self, goal_pos, goal_rot):
"""
Move hand to target points
"""
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - hand_pos
orn_err = orientation_error(goal_rot, hand_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to franka hand
franka_hand_index = 8 # !!!
j_eef = jacobians[:, franka_hand_index - 1, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, 9)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Calculation --------------------------------------------#
##################################################################################################
def get_mesh_bboxes(self, keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.086):
"""
Calculate the grasp location for the handle
"""
bboxes_list = self.get_mesh_bboxes(keyword)
assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
grasp_list = [[min_x - x_offset, c[1], c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_pull_location(self, start_pos, start_rot, theta, r, clock_wise = False):
"""
Calculate how to pull to open the Cabinet
"""
clock_wise = float(2 * clock_wise - 1)
# position
pos_offset = np.tile(np.array([-r * np.sin(theta), clock_wise * r * (1 - np.cos(theta)), 0]), (self.num_envs, 1))
target_pos = start_pos + pos_offset
# rotate
rot_offset = np.tile(np.array([np.sin(clock_wise * theta / 2), 0, 0, np.cos( - clock_wise * theta / 2)]), (self.num_envs, 1))
target_rot = quat_mul(start_rot, rot_offset)
return target_pos, target_rot
##################################################################################################
# -------------------------------------- Render ------------------------------------------------#
##################################################################################################
def setup_viewport(self, camera_path = "/World/Camera", resolution = [256, 256]):
viewport = omni.kit.viewport_legacy.get_viewport_interface()
viewport_handle = viewport.get_instance("Viewport")
self.viewport_window = viewport.get_viewport_window(viewport_handle)
self.viewport_window.set_texture_resolution(*resolution)
self.viewport_window.set_active_camera(camera_path) # /OmniverseKit_Persp
self.sd_helper = SyntheticDataHelper()
self.sd_helper.initialize(sensor_names=["rgb",'depthLinear'], viewport=self.viewport_window)
def get_image(self, return_array = False, world = None):
if world:
world.step(render=True)
world.render()
gt = self.sd_helper.get_groundtruth(
["rgb", "depthLinear"], self.viewport_window, verify_sensor_init=False, wait_for_sensor_data= 0
)
if return_array:
return gt['rgb']
return Image.fromarray(gt['rgb'])
| 12,285 | Python | 36.571865 | 133 | 0.572324 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/usd_utils.py | from pxr import Gf
import omni
def calcuate_rotation_axis(q, axis = 2, direction = 0):
"""
Calculate quaternion axis (x,y,z) project on direction (x,y,z)
q: [x,y,z,w]
"""
mat = Gf.Matrix4f().SetRotate(Gf.Quatf(float(q[3]), float(q[0]), float(q[1]), float(q[2])))
return mat.GetRow(axis)[direction]
| 324 | Python | 28.545452 | 95 | 0.62037 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/extension.py | import omni.ext
import omni.ui as ui
from .open_env import OpenEnv
# NOTE:
# go to directory: open-any-drawer/exts/open.any.drawer/open/any/drawer/
# # start notebook from: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/jupyter_notebook.sh
# start python: /home/yizhou/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
# next paper about body language
# hand helper
import carb
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
from .hand.limiter import *
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[open.any.drawer] MyExtension startup")
self.env = OpenEnv()
self._window = ui.Window("Open any drawer", width=300, height=300)
with self._window.frame:
with ui.VStack():
with ui.HStack(height = 20):
ui.Button("Add Franka Robot", clicked_fn= self.env.add_robot)
with ui.HStack(height = 20):
ui.Label("object index: ", width = 80)
self.object_id_ui = omni.ui.IntField(height=20, width = 40, style={ "margin": 2 })
self.object_id_ui.model.set_value(0)
ui.Label("object scale: ", width = 80)
self.object_scale_ui = omni.ui.FloatField(height=20, width = 40, style={ "margin": 2 })
self.object_scale_ui.model.set_value(0.1)
ui.Button("Add Object", clicked_fn=self.add_object)
with ui.HStack(height = 20):
ui.Button("Add Ground", clicked_fn=self.add_ground)
ui.Button("Add Camera", clicked_fn=self.add_camera)
with ui.HStack(height = 20):
# ui.Button("Add hand from copying", clicked_fn= self.debug)
ui.Button("Add hand from helper", clicked_fn= self.rig_hand2)
ui.Button("Rig D6", clicked_fn= self.debug_rig_d6)
ui.Button("Add drivers to joint", clicked_fn = self._add_driver_to_revolve_joint)
with ui.HStack(height = 20):
ui.Button("Test instructor", clicked_fn= self.debug_instructor)
ui.Button("Batch generation", clicked_fn= self.debug_batch_gen)
with ui.HStack(height = 20):
ui.Button("Test task checker", clicked_fn= self.debug_task_checker)
with ui.HStack(height = 20):
ui.Button("Test Load FastRCNN", clicked_fn= self.debug_load_model)
def add_ground(self):
from utils import add_ground_plane
add_ground_plane("/World/Game")
def add_camera(self):
self.env.add_camera()
self.env.setup_viewport()
def add_object(self):
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/game", True, True, True, True)
viewport = omni.kit.viewport_legacy.get_viewport_interface()
if viewport:
viewport.get_viewport_window().focus_on_selected()
def on_shutdown(self):
print("[open.any.drawer] MyExtension shutdown")
def rig_hand2(self):
print("debug2")
from .hand.helper import HandHelper
self.hand_helper = HandHelper()
def debug_rig_d6(self):
self._stage = omni.usd.get_context().get_stage()
self._damping = 5 # 1e4
self._stiffness = 5e1 # 2e5
# create anchor:
self._anchorXform = UsdGeom.Xform.Define(
self._stage, Sdf.Path("/World/AnchorXform")
)
# these are global coords because world is the xform's parent
xformLocalToWorldTrans = Gf.Vec3f(0)
xformLocalToWorldRot = Gf.Quatf(1.0)
self._anchorXform.AddTranslateOp().Set(xformLocalToWorldTrans)
self._anchorXform.AddOrientOp().Set(xformLocalToWorldRot)
xformPrim = self._anchorXform.GetPrim()
physicsAPI = UsdPhysics.RigidBodyAPI.Apply(xformPrim)
physicsAPI.CreateRigidBodyEnabledAttr(True)
physicsAPI.CreateKinematicEnabledAttr(True)
# setup joint to floating hand base
component = UsdPhysics.Joint.Define(
self._stage, Sdf.Path("/World/AnchorToHandBaseD6") # allegro/
)
# "/World/Hand/Bones/l_carpal_mid" # "/World/allegro/allegro_mount" # "/World/shadow_hand/robot0_hand_mount"
# "/World/Franka/panda_link8"
self._articulation_root = self._stage.GetPrimAtPath("/World/Hand/Bones/l_carpal_mid")
baseLocalToWorld = UsdGeom.Xformable(self._articulation_root).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointPosition = baseLocalToWorld.GetInverse().Transform(xformLocalToWorldTrans)
jointPose = Gf.Quatf(baseLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
component.CreateExcludeFromArticulationAttr().Set(True)
component.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0))
component.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
component.CreateBody0Rel().SetTargets([self._anchorXform.GetPath()])
component.CreateBody1Rel().SetTargets([self._articulation_root.GetPath()])
component.CreateLocalPos1Attr().Set(jointPosition)
component.CreateLocalRot1Attr().Set(jointPose)
component.CreateBreakForceAttr().Set(sys.float_info.max)
component.CreateBreakTorqueAttr().Set(sys.float_info.max)
rootJointPrim = component.GetPrim()
for dof in ["transX", "transY", "transZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, dof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(self._damping)
driveAPI.CreateStiffnessAttr(self._stiffness)
for rotDof in ["rotX", "rotY", "rotZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, rotDof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(self._damping)
driveAPI.CreateStiffnessAttr(self._stiffness)
def _add_driver_to_revolve_joint(self):
stage = omni.usd.get_context().get_stage()
joint_prim_list = [ item for item in list(stage.TraverseAll()) if item.GetTypeName() == 'PhysicsRevoluteJoint'] #
for joint in joint_prim_list:
# if not UsdPhysics.DriveAPI.Get(stage, joint.GetPath()):
driveAPI = UsdPhysics.DriveAPI.Apply(joint, "angular")
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e6)
driveAPI.CreateStiffnessAttr(1e8)
def debug_instructor(self):
print("debug instru")
from task.instructor import SceneInstructor
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
self.scene_instr.add_semantic_to_handle()
self.scene_instr.export_data()
def debug_batch_gen(self):
print("debug_batch_gen")
from .task.instructor import SceneInstructor
import omni.replicator.core as rep
# object_id = self.object_id_ui.model.set_value(4)
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
print("scene_instr.is_obj_valid: ", self.scene_instr.is_obj_valid)
if self.scene_instr.is_obj_valid:
self.scene_instr.add_semantic_to_handle()
self.scene_instr.output_path = f"/home/yizhou/Research/temp0/"
self.scene_instr.export_data()
# print("print(rep.orchestrator.get_is_started())", rep.orchestrator.get_is_started())
############ task check #####################################################################
def debug_task_checker(self):
print("debug_task_checker")
from task.checker import TaskChecker
from task.instructor import SceneInstructor
self.env.add_robot()
object_id = self.object_id_ui.model.get_value_as_int()
object_scale = self.object_scale_ui.model.get_value_as_float()
self.env.add_object(object_id, scale = object_scale)
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
# self.task_checker = TaskChecker("mobility", "joint_0", "PhysicsRevoluteJoint")
############ deep learning #####################################################################
def debug_load_model(self):
print("load_model")
from task.instructor import SceneInstructor
self.scene_instr = SceneInstructor()
self.scene_instr.analysis()
self.scene_instr.build_handle_desc_ui()
print("scene_instr.is_obj_valid: ", self.scene_instr.is_obj_valid)
if self.scene_instr.is_obj_valid:
# print("valid_handle_list", self.scene_instr.valid_handle_list)
self.scene_instr.load_model()
self.scene_instr.predict_bounding_boxes(image_path="/home/yizhou/Research/temp0/rgb_0.png")
print("pred bboxes", self.scene_instr.pred_boxes)
handle_list = list(self.scene_instr.valid_handle_list.keys())
for HANDLE_INDEX in range(len(handle_list)):
handle_path_str = handle_list[HANDLE_INDEX]
v_desc = self.scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = self.scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
print("handle_path_str", handle_path_str, "v desc: ", v_desc, "h desc:", h_desc)
the_box = self.scene_instr.get_box_from_desc(v_desc, h_desc)
print("the_box:", the_box)
del self.scene_instr.model | 11,224 | Python | 42.507752 | 123 | 0.622951 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand_env.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
from hand.helper import HandHelper
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
class HandEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.1):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
if verticle:
grasp_list = [[min_x - x_offset, c[1], c[2] - 0.12] for c in center_list]
else:
grasp_list = [[min_x - x_offset, c[1] + 0.12, c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.38268, 0, 0, 0.92388] if verticle else [0.3036, 0.23296, -0.56242, 0.73296]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def move_to_target(self, goal_pos, goal_rot, finger = "thumb"):
"""
Move hand to target points
"""
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - finger_pos
orn_err = orientation_error(goal_rot, finger_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to correct finger
if finger == "thumb":
finger_index = 14
elif finger == "index":
finger_index = 15
elif finger == "middle":
finger_index = 16
elif finger == "pinky":
finger_index = 17
else: # ring
finger_index = 18
j_eef = jacobians[:, finger_index, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, -1)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_finger_to_fast(self, target_pos, target_rot, world, finger = "thumb", max_step = 100):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ -> XYZW
print("finger_pos", finger_pos)
orient_error = quat_mul(target_rot[0], quat_conjugate(finger_rot[0]))
# print("orient_error", orient_error)
# if abs(orient_error[3] - 1) < 0.02 and \
# np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
# np.sqrt(np.sum((target_pos[0] - finger_pos[0])**2)) < 0.01:
# print("Done rotation, position", finger_pos, finger_rot)
# return
u = self.move_to_target(target_pos, target_rot)
# u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", finger_pos, finger_rot)
def calculate_grasp_location_from_pred_box(self, box, verticle = True, x_offset = 0.1):
"""
Calculate the grasp location for the handle
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
if verticle:
grasp_list = [[min_x - x_offset, handle_y, handle_z - 0.12]]
else:
grasp_list = [[min_x - x_offset, handle_y + 0.12, handle_z]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.38268, 0, 0, 0.92388] if verticle else [0.3036, 0.23296, -0.56242, 0.73296]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 6,608 | Python | 36.338983 | 122 | 0.548123 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/utils.py | # utility
import numpy as np
import omni
from omni.physx.scripts import physicsUtils
from pxr import UsdGeom, Usd, UsdShade, UsdPhysics, Gf
def add_ground_plane(prim_path = "/World/game", invisible = False):
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
z = bboxes.ComputeAlignedRange().GetMin()[2]
physicsUtils.add_ground_plane(stage, "/groundPlane", "Z", 10.0, Gf.Vec3f(0.0, 0.0, z), Gf.Vec3f(0.2))
if invisible:
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
for prim in prim_list:
prim.GetAttribute('visibility').Set('invisible')
def get_bounding_box(prim_path: str):
"""
Get the bounding box of a prim
"""
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
return game_bboxes
def get_mesh_bboxes(keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list
def add_physical_material_to(keyword:str):
"""
Set up physical material
"""
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
for prim in prim_list:
setup_physics_material(prim)
print("add physics material to handle")
# setStaticCollider(prim, approximationShape = "convexDecomposition")
def setup_physics_material(prim):
"""
Set up physic material for prim at Path
"""
# def _setup_physics_material(self, path: Sdf.Path):
stage = omni.usd.get_context().get_stage()
_material_static_friction = 100.0
_material_dynamic_friction = 100.0
_material_restitution = 0.0
_physicsMaterialPath = None
if _physicsMaterialPath is None:
# _physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
_physicsMaterialPath = prim.GetPath().AppendChild("physicsMaterial")
UsdShade.Material.Define(stage, _physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(_material_static_friction)
material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction)
material.CreateRestitutionAttr().Set(_material_restitution)
collisionAPI = UsdPhysics.CollisionAPI.Get(stage, prim.GetPath())
# prim = stage.GetPrimAtPath(path)
if not collisionAPI:
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath)
print("physics material: path: ", _physicsMaterialPath)
def fix_linear_joint(fix_driver = True, damping_cofficient = 0.0):
stage = omni.usd.get_context().get_stage()
prim_list = stage.TraverseAll()
for prim in prim_list:
if "joint_" in str(prim.GetPath()):
if fix_driver:
# find linear drive
joint_driver = UsdPhysics.DriveAPI.Get(prim, "linear")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear drive
joint_driver = UsdPhysics.DriveAPI.Get(prim, "angular")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear joint upperlimit
joint = UsdPhysics.PrismaticJoint.Get(stage, prim.GetPath())
if joint:
upper_limit = joint.GetUpperLimitAttr().Get() #GetAttribute("xformOp:translate").Get()
# print(prim.GetPath(), "upper_limit", upper_limit)
mobility_prim = prim.GetParent().GetParent()
mobility_xform = UsdGeom.Xformable.Get(stage, mobility_prim.GetPath())
scale_factor = mobility_xform.GetOrderedXformOps()[2].Get()[0]
# print("scale_factor", scale_factor)
joint.CreateUpperLimitAttr(upper_limit * scale_factor / 100) | 4,968 | Python | 41.110169 | 129 | 0.657407 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand/limiter.py | # hand limiter
import carb
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
# helpers
def computeMeshWorldBoundsFromPoints(mesh: UsdGeom.Mesh) -> Vt.Vec3fArray:
mesh_pts = mesh.GetPointsAttr().Get()
extent = UsdGeom.PointBased.ComputeExtent(mesh_pts)
transform = mesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
for i in range(len(extent)):
extent[i] = transform.Transform(extent[i])
return extent
def get_quat_from_extrinsic_xyz_rotation(angleXrad: float = 0.0, angleYrad: float = 0.0, angleZrad: float = 0.0):
# angles are in radians
rotX = rotate_around_axis(1, 0, 0, angleXrad)
rotY = rotate_around_axis(0, 1, 0, angleYrad)
rotZ = rotate_around_axis(0, 0, 1, angleZrad)
return rotZ * rotY * rotX
def rotate_around_axis(x: float, y: float, z: float, angle: float) -> Gf.Quatf:
s = math.sin(0.5 * angle)
return Gf.Quatf(math.cos(0.5 * angle), s * x, s * y, s * z)
class QuaternionRateOfChangeLimiter:
def __init__(self, initQuat: Gf.Quatf = Gf.Quatf(1.0), maxRateRad: float = 0.01, iirAlpha: float = 0.0):
self.maxDeltaPerFrame = maxRateRad
self.cosThreshdold = math.cos(maxRateRad)
self.resetValue = initQuat
self.reset()
self.alpha = 1.0 - iirAlpha # 1 - alpha due to slerp (alpha = 0 -> immediate step to new goal)
def reset(self):
self.targetValue = self.resetValue
self.currentValue = self.resetValue
self.filteredTarget = None
def set_target(self, targetValue: Gf.Quatf):
self.targetValue = targetValue
def set_target_and_update(self, targetValue: Gf.Quatf):
self.targetValue = targetValue
self.update()
@property
def current_value(self):
return self.currentValue
def update(self):
if self.filteredTarget is None:
self.filteredTarget = self.targetValue
else:
self.filteredTarget = Gf.Quatf(Gf.Slerp(self.alpha, self.filteredTarget, self.targetValue))
toTarget = self.currentValue.GetInverse() * self.filteredTarget
# shortest rotation
if toTarget.GetReal() < 0.0:
toTarget = -toTarget
angle = math.acos(max(-1, min(1, toTarget.GetReal()))) * 2.0
if angle > self.maxDeltaPerFrame:
angle = self.maxDeltaPerFrame
axis = toTarget.GetImaginary()
axis.Normalize()
sin = math.sin(0.5 * angle)
toTarget = Gf.Quatf(math.cos(angle * 0.5), sin * axis[0], sin * axis[1], sin * axis[2])
self.currentValue = self.currentValue * toTarget
class JointGeometry:
def __init__(self, bbCenterWeight=None, quat=None, posOffsetW=None, axis=None, limits=None, joint_type="revolute"):
self.bbCenterWeight = bbCenterWeight
self.quat = quat
self.posOffsetW = posOffsetW
self.axis = axis
self.limits = limits
self.type = joint_type
self.defaultDriveAngles = {"rotX": 0.0, "rotY": 0.0, "rotZ": 0.0}
class VectorRateOfChangeLimiter:
def __init__(self, initVector: Gf.Vec3f = Gf.Vec3f(0.0), maxRatePerAxis: float = 0.01, iirAlpha: float = 0.0):
self.maxDeltaPerFrame = maxRatePerAxis
self.resetValue = initVector
self.reset()
self.alpha = iirAlpha
def reset(self):
# need to copy to avoid creating just a ref
self.targetValue = copy(self.resetValue)
self.currentValue = copy(self.resetValue)
self.filteredTarget = None
def set_target(self, targetValue: Gf.Vec3f):
self.targetValue = targetValue
def set_target_and_update(self, targetValue: Gf.Vec3f):
self.targetValue = targetValue
self.update()
@property
def current_value(self):
return self.currentValue
def update(self):
if self.filteredTarget is None:
self.filteredTarget = self.targetValue
else:
self.filteredTarget = self.alpha * self.filteredTarget + (1.0 - self.alpha) * self.targetValue
for i in range(3):
toTarget = self.filteredTarget[i] - self.currentValue[i]
if abs(toTarget) > self.maxDeltaPerFrame:
if toTarget < 0.0:
toTarget = -self.maxDeltaPerFrame
else:
toTarget = self.maxDeltaPerFrame
self.currentValue[i] += toTarget
class JointAngleRateOfChangeLimiter:
def __init__(self, jointDriveAPI, initValue: float = 0.0, maxRateRad: float = 0.01):
self.maxDeltaPerFrame = maxRateRad
self.jointDriveAPI = jointDriveAPI
self.resetValue = initValue
self.reset()
def set_current_angle_in_drive_api(self):
targetDeg = self.currentValue * 180.0 / math.pi
self.jointDriveAPI.GetTargetPositionAttr().Set(targetDeg)
def reset(self):
self.targetValue = self.resetValue
self.currentValue = self.resetValue
def set_target(self, targetValue):
self.targetValue = targetValue
def set_target_and_update(self, targetValue):
self.targetValue = targetValue
self.update()
def update(self):
toTarget = self.targetValue - self.currentValue
if abs(toTarget) > self.maxDeltaPerFrame:
if toTarget < 0:
toTarget = -self.maxDeltaPerFrame
else:
toTarget = self.maxDeltaPerFrame
self.currentValue += toTarget | 5,611 | Python | 36.165563 | 119 | 0.641597 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/hand/helper.py | # hand helper
import carb
import omni
import sys
from pxr import Usd, Sdf, PhysxSchema, UsdPhysics, Vt, Gf, UsdGeom, UsdShade
from omni.physx.scripts import physicsUtils, particleUtils
from omni.physx.scripts import deformableUtils, utils
import math
from copy import copy
from .limiter import *
class HandHelper():
def __init__(self) -> None:
self.stage = omni.usd.get_context().get_stage()
self._scale = 1
#########################################################
################### constants ###########################
#########################################################
self._physicsMaterialPath = None
self._material_static_friction = 1.0
self._material_dynamic_friction = 1.0
self._material_restitution = 0.0
# Joint drives / params:
radToDeg = 180.0 / math.pi
self._drive_max_force = 1e20
self._revolute_drive_stiffness = 10000000 / radToDeg # 50000.0
self._spherical_drive_stiffness = 22000000 / radToDeg # 50000.0
self._revolute_drive_damping = 0.2 * self._revolute_drive_stiffness
self._spherical_drive_damping = 0.2 * self._spherical_drive_stiffness
self._maxJointVelocity = 3.0 * radToDeg
self._jointFriction = 0.01
self._finger_mass = 0.1
mHand = self._finger_mass * 20.0 + self._finger_mass + self._finger_mass
dh = 0.05
self._d6LinearSpring = mHand * 100 / dh
self._d6LinearDamping = 20 * math.sqrt(self._d6LinearSpring * mHand)
self._d6RotationalSpring = self._d6LinearSpring * 100.0 * 100.0 / radToDeg
self._d6RotationalDamping = self._d6LinearDamping * 100.0 * 50.0 / radToDeg
self._jointAngleRateLimitRad = 150 / 60 * math.pi / 180.0
# driving and dofs
self._drives = []
self._driveGuards = []
self._numDofs = 0
self._thumbIndices = []
#########################################################
################### hand ###########################
#########################################################
self._handInitPos = Gf.Vec3f(0.0)
self.import_hand()
self._setup_geometry()
self._setup_mesh_tree()
self._rig_hand()
#! disable tips
# tips_prim = self.stage.GetPrimAtPath(self._tips_root_path.pathString)
# tips_prim.SetActive(False)
self._rig_D6_anchor()
# self._setup_skeleton_hand_db_tips(self.stage)
def import_hand(self):
# import skeleton hand
default_prim_path = Sdf.Path("/World") # stage.GetDefaultPrim().GetPath()
self._hand_prim_path = default_prim_path.AppendPath("Hand")
self._bones_root_path = default_prim_path.AppendPath("Hand/Bones")
self._tips_root_path = default_prim_path.AppendPath("Hand/Tips")
abspath = "/home/yizhou/Desktop/hand1.usd"
# "https://omniverse-content-staging.s3.us-west-2.amazonaws.com/DoNotDelete/PhysicsDemoAssets/103.1/DeformableHand/skeleton_hand_with_tips.usd"
assert self.stage.DefinePrim(self._hand_prim_path).GetReferences().AddReference(abspath)
self._hand_prim = self.stage.GetPrimAtPath(self._hand_prim_path.pathString)
hand_xform = UsdGeom.Xformable(self._hand_prim)
hand_xform.ClearXformOpOrder()
precision = UsdGeom.XformOp.PrecisionFloat
hand_xform.AddTranslateOp(precision=precision).Set(self._handInitPos)
hand_xform.AddOrientOp(precision=precision).Set(Gf.Quatf(1,0,0,0))
hand_xform.AddScaleOp(precision=precision).Set(Gf.Vec3f(self._scale))
# Physics scene
# physicsScenePath = default_prim_path.AppendChild("physicsScene")
# scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
# scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
# scene.CreateGravityMagnitudeAttr().Set(9.81)
# utils.set_physics_scene_asyncsimrender(scene.GetPrim())
# physxAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
# physxAPI.CreateSolverTypeAttr("TGS")
# physxAPI.CreateGpuMaxNumPartitionsAttr(4)
def _setup_geometry(self):
boneNames = ["proximal", "middle", "distal"]
self._jointGeometry = {}
revoluteLimits = (-20, 120)
# Thumb:
metacarpal = JointGeometry()
metacarpal.bbCenterWeight = 0.67
metacarpal.posOffsetW = Gf.Vec3d(-1.276, 0.28, 0.233)
# this quaternion is the joint pose in the inertial coordinate system
# and will be converted to the bone frame in the joint rigging
angleY = -0.45
angleZ = -0.5
quat = get_quat_from_extrinsic_xyz_rotation(angleYrad=angleY, angleZrad=angleZ)
metacarpal.quat = quat # first y then z, extrinsic
metacarpal.type = "spherical"
metacarpal.axis = "X"
metacarpal.limits = (90, 90)
metacarpal.defaultDriveAngles["rotY"] = angleY
metacarpal.defaultDriveAngles["rotZ"] = angleZ
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(0, 0, 0, 1) * quat
proximal.axis = "Y"
proximal.limits = revoluteLimits
distal = copy(proximal)
distal.bbCenterWeight = 0.55
self._jointGeometry["Thumb"] = {
"metacarpal": copy(metacarpal),
"proximal": copy(proximal),
"distal": copy(distal),
}
sphericalLimits = (60, 90)
# Index:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.axis = "X"
proximal.limits = sphericalLimits
middle = JointGeometry()
middle.bbCenterWeight = 0.67
xAngleRad = 5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad)
middle.axis = "Z"
middle.limits = revoluteLimits
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Index"] = dict(zip(boneNames, geoms))
# middle:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
middle = JointGeometry()
middle.bbCenterWeight = 0.67
middle.quat = Gf.Quatf(1.0)
middle.axis = "Z"
middle.limits = revoluteLimits
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Middle"] = dict(zip(boneNames, geoms))
# ring:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
proximal.quat = Gf.Quatf(1.0)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
middle = JointGeometry()
middle.bbCenterWeight = 0.6
middle.quat = Gf.Quatf(1.0)
middle.limits = revoluteLimits
xAngleRad = -5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad)
middle.axis = "Z"
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Ring"] = dict(zip(boneNames, geoms))
# pinky:
proximal = JointGeometry()
proximal.bbCenterWeight = 0.67
yAngleRad = 8.0 * math.pi / 180.0
proximal.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad, angleYrad=yAngleRad)
proximal.type = "spherical"
proximal.limits = sphericalLimits
proximal.axis = "X"
proximal.defaultDriveAngles["rotY"] = yAngleRad
middle = JointGeometry()
middle.bbCenterWeight = 0.67
middle.quat = Gf.Quatf(1.0)
middle.limits = revoluteLimits
middle.axis = "Z"
yAngleRad = 8.0 * math.pi / 180.0
xAngleRad = -5.0 * math.pi / 180.0
middle.quat = get_quat_from_extrinsic_xyz_rotation(angleXrad=xAngleRad, angleYrad=yAngleRad)
distal = copy(middle)
distal.bbCenterWeight = 0.55
geoms = [copy(g) for g in [proximal, middle, distal]]
self._jointGeometry["Pinky"] = dict(zip(boneNames, geoms))
def _setup_mesh_tree(self):
self._baseMesh = UsdGeom.Mesh.Get(self.stage, self._bones_root_path.AppendChild("l_carpal_mid"))
assert self._baseMesh
boneNames = ["metacarpal", "proximal", "middle", "distal"]
fingerNames = ["Thumb", "Index", "Middle", "Ring", "Pinky"]
self._fingerMeshes = {}
for fingerName in fingerNames:
self._fingerMeshes[fingerName] = {}
groupPath = self._bones_root_path.AppendChild(f"l_{fingerName.lower()}Skeleton_grp")
for boneName in boneNames:
if fingerName == "Thumb" and boneName == "middle":
continue
bonePath = groupPath.AppendChild(f"l_{boneName}{fingerName}_mid")
boneMesh = UsdGeom.Mesh.Get(self.stage, bonePath)
assert boneMesh, f"Mesh {bonePath.pathString} invalid"
self._fingerMeshes[fingerName][boneName] = boneMesh
################################## rigging #########################################
def _rig_hand(self):
self._set_bones_to_rb()
self._rig_articulation_root()
self._setup_physics_material(self._baseMesh.GetPath())
self._rig_hand_base()
self._rig_fingers()
def _rig_articulation_root(self):
self.hand_prim = self.stage.GetPrimAtPath("/World/Hand")
self.bone_prim = self.stage.GetPrimAtPath("/World/Hand/Bones")
self.tip_prim = self.stage.GetPrimAtPath("/World/Hand/Tips")
# reset bone XForm and tip Xform
mat = Gf.Matrix4d()
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/Hand/Bones",
new_transform_matrix=mat,
)
# self.bone_prim.GetAttribute("xformOp:transform").Set(mat)
# if self.tip_prim :
# self.tip_prim.GetAttribute("xformOp:transform").Set(mat)
ropt_prim = self._baseMesh.GetPrim()
UsdPhysics.ArticulationRootAPI.Apply(ropt_prim)
physxArticulationAPI = PhysxSchema.PhysxArticulationAPI.Apply(ropt_prim)
physxArticulationAPI.GetSolverPositionIterationCountAttr().Set(15)
physxArticulationAPI.GetSolverVelocityIterationCountAttr().Set(0)
# fixedJointPath = ropt_prim.GetPath().AppendChild(f"rootJoint")
# fixedJoint = UsdPhysics.FixedJoint.Define(self.stage, fixedJointPath)
# fixedJoint.CreateBody0Rel().SetTargets([])
# fixedJoint.CreateBody1Rel().SetTargets([Sdf.Path("/World/Hand/Bones/l_carpal_mid")])
# parentWorldBB = computeMeshWorldBoundsFromPoints(self._baseMesh)
# self._base_mesh_world_pos = Gf.Vec3f(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
# # fixedJoint.CreateLocalPos0Attr().Set(Gf.Vec3f(0))
# # fixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
# fixedJoint.CreateLocalPos1Attr().Set(-self._base_mesh_world_pos)
# fixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0))
# print("rootJoint", self._base_mesh_world_pos)
def _rig_hand_base(self):
basePath = self._baseMesh.GetPath()
parentWorldBB = computeMeshWorldBoundsFromPoints(self._baseMesh)
self._base_mesh_world_pos = Gf.Vec3f(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
baseLocalToWorld = self._baseMesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
for fingerName, finger in self._fingerMeshes.items():
if fingerName == "Thumb":
# skip thumb
continue
for boneName, bone in finger.items():
if boneName == "metacarpal":
fixedJointPath = bone.GetPath().AppendChild(f"{fingerName}_baseFixedJoint")
fixedJoint = UsdPhysics.FixedJoint.Define(self.stage, fixedJointPath)
fixedJoint.CreateBody0Rel().SetTargets([basePath])
fixedJoint.CreateBody1Rel().SetTargets([bone.GetPath()])
childWorldBB = computeMeshWorldBoundsFromPoints(bone)
childWorldPos = Gf.Vec3f(0.5 * (childWorldBB[0] + childWorldBB[1]))
childLocalToWorld = bone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointWorldPos = 0.5 * (self._base_mesh_world_pos + childWorldPos)
jointParentPosition = baseLocalToWorld.GetInverse().Transform(jointWorldPos)
jointChildPosition = childLocalToWorld.GetInverse().Transform(jointWorldPos)
fixedJoint.CreateLocalPos0Attr().Set(jointParentPosition)
fixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
fixedJoint.CreateLocalPos1Attr().Set(jointChildPosition)
fixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0))
fixedJoint.CreateBreakForceAttr().Set(sys.float_info.max)
fixedJoint.CreateBreakTorqueAttr().Set(sys.float_info.max)
def _rig_joint(self, boneName, fingerName, parentBone):
if boneName not in self._jointGeometry[fingerName]:
return
childBone = self._fingerMeshes[fingerName][boneName]
jointGeom = self._jointGeometry[fingerName][boneName]
jointType = jointGeom.type.lower()
# print("jointType", parentBone, jointType, childBone, jointType)
parentWorldBB = computeMeshWorldBoundsFromPoints(parentBone)
parentWorldPos = Gf.Vec3d(0.5 * (parentWorldBB[0] + parentWorldBB[1]))
parentLocalToWorld = parentBone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
childWorldBB = computeMeshWorldBoundsFromPoints(childBone)
childWorldPos = Gf.Vec3d(0.5 * (childWorldBB[0] + childWorldBB[1]))
childLocalToWorld = childBone.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointWorldPos = parentWorldPos + jointGeom.bbCenterWeight * (childWorldPos - parentWorldPos)
# print("jointWorldPos", jointWorldPos, parentWorldPos)
if jointGeom.posOffsetW is not None:
jointWorldPos += jointGeom.posOffsetW * 0.01
# print("jointGeom.posOffsetW", jointGeom.posOffsetW)
jointParentPosition = parentLocalToWorld.GetInverse().Transform(jointWorldPos)
jointChildPosition = childLocalToWorld.GetInverse().Transform(jointWorldPos)
if jointType == "revolute":
jointPath = childBone.GetPath().AppendChild(f"{fingerName}_{boneName}_RevoluteJoint")
joint = UsdPhysics.RevoluteJoint.Define(self.stage, jointPath)
# elif jointType == "spherical":
# jointPath = childBone.GetPath().AppendChild("SphericalJoint")
# joint = UsdPhysics.SphericalJoint.Define(self.stage, jointPath)
joint.CreateBody0Rel().SetTargets([parentBone.GetPath()])
joint.CreateBody1Rel().SetTargets([childBone.GetPath()])
joint.CreateAxisAttr(jointGeom.axis)
# for the sphericals, the relative orientation does not matter as they are externally driven.
# for the revolutes, it is key that they are oriented correctly and that parent and child are identical
# in order to avoid offsets - offsets will be added in the joint commands
jointPose = Gf.Quatf(parentLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
jointPose *= jointGeom.quat
# this is assuming that parent and child's frames coincide
joint.CreateLocalPos0Attr().Set(jointParentPosition)
joint.CreateLocalRot0Attr().Set(jointPose)
joint.CreateLocalPos1Attr().Set(jointChildPosition)
joint.CreateLocalRot1Attr().Set(jointPose)
physxJointAPI = PhysxSchema.PhysxJointAPI.Apply(joint.GetPrim())
physxJointAPI.GetMaxJointVelocityAttr().Set(self._maxJointVelocity)
physxJointAPI.GetJointFrictionAttr().Set(self._jointFriction)
if jointType == "revolute":
# for revolute create drive
driveAPI = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), "angular")
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateDampingAttr(self._revolute_drive_damping)
driveAPI.CreateStiffnessAttr(self._revolute_drive_stiffness)
dofIndex = len(self._drives)
self._numDofs += 1
if fingerName == "Thumb":
self._thumbIndices.append(dofIndex)
self._drives.append(driveAPI)
targetAngle = jointGeom.defaultDriveAngles["rot" + jointGeom.axis]
self._driveGuards.append(
JointAngleRateOfChangeLimiter(driveAPI, targetAngle, self._jointAngleRateLimitRad)
)
elif jointType == "spherical":
# add 6d external joint and drive:
d6path = childBone.GetPath().AppendChild(f"{fingerName}_{boneName}_D6DriverJoint")
d6j = UsdPhysics.Joint.Define(self.stage, d6path)
# d6j.CreateExcludeFromArticulationAttr().Set(True)
d6j.CreateBody0Rel().SetTargets([parentBone.GetPath()])
d6j.CreateBody1Rel().SetTargets([childBone.GetPath()])
# d6j.CreateExcludeFromArticulationAttr().Set(True)
d6j.CreateLocalPos0Attr().Set(jointParentPosition)
parentWorldToLocal = Gf.Quatf(parentLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
# print("D6DriverJoint parentWorldToLocal", jointParentPosition, jointChildPosition)
d6j.CreateLocalRot0Attr().Set(parentWorldToLocal)
d6j.CreateLocalPos1Attr().Set(jointChildPosition)
childPose = parentWorldToLocal * jointGeom.quat
d6j.CreateLocalRot1Attr().Set(childPose)
# d6j.CreateBreakForceAttr().Set(1e20)
# d6j.CreateBreakTorqueAttr().Set(1e20)
axes = [x for x in "XYZ" if jointGeom.axis != x]
assert len(axes) == 2, "Error in spherical drives setup"
drives = ["rot" + x for x in axes]
# lock the joint axis:
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), "rot" + jointGeom.axis)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transY)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transZ)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6j.GetPrim(), UsdPhysics.Tokens.transX)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
for d in drives:
driveAPI = UsdPhysics.DriveAPI.Apply(d6j.GetPrim(), d)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateDampingAttr(self._spherical_drive_damping)
driveAPI.CreateStiffnessAttr(self._spherical_drive_stiffness)
dofIndex = len(self._drives)
self._numDofs += 1
if fingerName == "Thumb":
self._thumbIndices.append(dofIndex)
self._drives.append(driveAPI)
targetAngle = jointGeom.defaultDriveAngles[d]
self._driveGuards.append(
JointAngleRateOfChangeLimiter(driveAPI, targetAngle, self._jointAngleRateLimitRad)
)
def _rig_fingers(self):
for fingerName, finger in self._fingerMeshes.items():
# print("fingerName", fingerName)
parentBone = self._baseMesh
for boneName, bone in finger.items():
self._rig_joint(boneName, fingerName, parentBone)
parentBone = bone
# return
def _rig_D6_anchor(self):
# create anchor:
self._anchorXform = UsdGeom.Xform.Define(
self.stage, self.stage.GetDefaultPrim().GetPath().AppendChild("AnchorXform")
)
# these are global coords because world is the xform's parent
xformLocalToWorldTrans = self._handInitPos
xformLocalToWorldRot = Gf.Quatf(1.0)
self._anchorXform.AddTranslateOp().Set(xformLocalToWorldTrans)
self._anchorXform.AddOrientOp().Set(xformLocalToWorldRot)
self._anchorPositionRateLimiter = VectorRateOfChangeLimiter(
xformLocalToWorldTrans, 0.01666, 0.5 ** (1 / 6) #! change max movement per frame
)
self._anchorQuatRateLimiter = QuaternionRateOfChangeLimiter(
xformLocalToWorldRot, 0.01666, 0.5 ** (1 / 6)
)
xformPrim = self._anchorXform.GetPrim()
physicsAPI = UsdPhysics.RigidBodyAPI.Apply(xformPrim)
physicsAPI.CreateRigidBodyEnabledAttr(True)
physicsAPI.CreateKinematicEnabledAttr(True)
# setup joint to floating hand base
component = UsdPhysics.Joint.Define(
self.stage, self.stage.GetDefaultPrim().GetPath().AppendChild("AnchorToHandBaseD6")
)
if not hasattr(self, "_baseMesh"):
self._baseMesh = UsdGeom.Mesh.Get(self.stage, self._bones_root_path.AppendChild("l_carpal_mid"))
baseLocalToWorld = self._baseMesh.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
jointPosition = baseLocalToWorld.GetInverse().Transform(xformLocalToWorldTrans)
jointPose = Gf.Quatf(baseLocalToWorld.GetInverse().RemoveScaleShear().ExtractRotationQuat())
component.CreateExcludeFromArticulationAttr().Set(True)
component.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0))
component.CreateLocalRot0Attr().Set(Gf.Quatf(1.0))
component.CreateBody0Rel().SetTargets([self._anchorXform.GetPath()])
component.CreateBody1Rel().SetTargets([self._baseMesh.GetPath()]) #
component.CreateLocalPos1Attr().Set(jointPosition)
component.CreateLocalRot1Attr().Set(jointPose)
component.CreateBreakForceAttr().Set(sys.float_info.max)
component.CreateBreakTorqueAttr().Set(sys.float_info.max)
rootJointPrim = component.GetPrim()
for dof in ["transX", "transY", "transZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, dof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e4)
driveAPI.CreateStiffnessAttr(1e5)
for rotDof in ["rotX", "rotY", "rotZ"]:
driveAPI = UsdPhysics.DriveAPI.Apply(rootJointPrim, rotDof)
driveAPI.CreateTypeAttr("force")
# driveAPI.CreateMaxForceAttr(self._drive_max_force)
driveAPI.CreateTargetPositionAttr(0.0)
driveAPI.CreateDampingAttr(1e4)
driveAPI.CreateStiffnessAttr(1e5)
# limitAPI = UsdPhysics.LimitAPI.Apply(rootJointPrim, rotDof)
# limitAPI.CreateLowAttr(1.0)
# limitAPI.CreateHighAttr(-1.0)
########################################## physics ###################################
def _setup_physics_material(self, path: Sdf.Path):
if self._physicsMaterialPath is None:
self._physicsMaterialPath = self.stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
UsdShade.Material.Define(self.stage, self._physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(self.stage.GetPrimAtPath(self._physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(self._material_static_friction)
material.CreateDynamicFrictionAttr().Set(self._material_dynamic_friction)
material.CreateRestitutionAttr().Set(self._material_restitution)
# collisionAPI = UsdPhysics.CollisionAPI.Get(self.stage, path)
prim = self.stage.GetPrimAtPath(path)
# if not collisionAPI:
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(self.stage, prim, self._physicsMaterialPath)
def _apply_mass(self, mesh: UsdGeom.Mesh, mass: float):
massAPI = UsdPhysics.MassAPI.Apply(mesh.GetPrim())
massAPI.GetMassAttr().Set(mass)
def _setup_rb_parameters(self, prim, restOffset, contactOffset):
physxCollisionAPI = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self._setup_physics_material(prim.GetPath())
assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset)
assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset)
assert prim.CreateAttribute("physxMeshCollision:minThickness", Sdf.ValueTypeNames.Float).Set(0.001)
physxRBAPI = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
physxRBAPI.CreateSolverPositionIterationCountAttr().Set(15)
physxRBAPI.CreateSolverVelocityIterationCountAttr().Set(0)
def _set_bone_mesh_to_rigid_body_and_config(self, mesh: UsdGeom.Mesh, approximationShape="convexHull"):
prim = mesh.GetPrim()
utils.setRigidBody(prim, approximationShape=approximationShape, kinematic=False)
# self._setup_rb_parameters(prim, restOffset=0.0, contactOffset= 0.01)
# omni.kit.commands.execute(
# "SetRigidBodyCommand",
# path=prim.GetPath().pathString,
# approximationShape="convexHull",
# kinematic=False
# )
def _set_bones_to_rb(self):
# utils.setRigidBody(self.stage.GetPrimAtPath("/World/Hand"), approximationShape="convexHull", kinematic=False)
# return
self._set_bone_mesh_to_rigid_body_and_config(self._baseMesh)
# self._apply_mass(self._baseMesh, self._finger_mass)
for _, finger in self._fingerMeshes.items():
for _, bone in finger.items():
self._set_bone_mesh_to_rigid_body_and_config(bone)
self._setup_physics_material(bone.GetPrim().GetPath()) #! add physical material
# self._apply_mass(bone, self._finger_mass)
########################### soft body #################################################
def _setup_skeleton_hand_db_tips(self, stage):
# SB and fluid:
self._sb_hand_schema_parameters = {
"youngsModulus": 1.0e5,
"poissonsRatio": 0.3,
"dampingScale": 1.0,
"dynamicFriction": 1.0,
"solver_position_iteration_count": 15,
"collisionRestOffset": 0.1,
"collisionContactOffset": 0.5,
"self_collision": False,
"vertex_velocity_damping": 0.005,
"sleep_damping": 0.001, # disable
"sleep_threshold": 0.001, # disable
"settling_threshold": 0.001, # disable
}
self._sb_tips_schema_parameters = self._sb_hand_schema_parameters
self._sb_tips_schema_parameters["collisionRestOffset"] = 0.00001
self._sb_tips_resolution = 8
self._sb_hand_resolution = 20
# create and attach softbodies
sbTipsStringPaths = [
"LeftHandThumbTipScaled/geom",
"LeftHandIndexTipScaled/geom",
"LeftHandMiddleTipScaled/geom",
"LeftHandRingTipScaled/geom",
"LeftHandPinkyTipScaled/geom",
]
sbTipsPaths = [self._tips_root_path.AppendPath(x) for x in sbTipsStringPaths]
sbTips_material_path = omni.usd.get_stage_next_free_path(stage, "/sbTipsMaterial", True)
deformableUtils.add_deformable_body_material(
stage,
sbTips_material_path,
youngs_modulus=self._sb_tips_schema_parameters["youngsModulus"],
poissons_ratio=self._sb_tips_schema_parameters["poissonsRatio"],
damping_scale=self._sb_tips_schema_parameters["dampingScale"],
dynamic_friction=self._sb_tips_schema_parameters["dynamicFriction"],
)
self._deformableTipMass = 0.01
for sbTipPath in sbTipsPaths:
self.set_softbody(
sbTipPath,
self._sb_tips_schema_parameters,
sbTips_material_path,
self._deformableTipMass,
self._sb_tips_resolution,
)
# rigid attach
attachmentBoneStringPaths = [
"l_thumbSkeleton_grp/l_distalThumb_mid",
"l_indexSkeleton_grp/l_distalIndex_mid",
"l_middleSkeleton_grp/l_distalMiddle_mid",
"l_ringSkeleton_grp/l_distalRing_mid",
"l_pinkySkeleton_grp/l_distalPinky_mid",
"l_thumbSkeleton_grp/l_metacarpalThumb_mid",
"l_indexSkeleton_grp/l_metacarpalIndex_mid",
"l_middleSkeleton_grp/l_metacarpalMiddle_mid",
"l_ringSkeleton_grp/l_metacarpalRing_mid",
"l_pinkySkeleton_grp/l_metacarpalPinky_mid",
"l_thumbSkeleton_grp/l_proximalThumb_mid",
"l_indexSkeleton_grp/l_proximalIndex_mid",
"l_middleSkeleton_grp/l_proximalMiddle_mid",
"l_ringSkeleton_grp/l_proximalRing_mid",
"l_pinkySkeleton_grp/l_proximalPinky_mid",
"l_indexSkeleton_grp/l_middleIndex_mid",
"l_middleSkeleton_grp/l_middleMiddle_mid",
"l_ringSkeleton_grp/l_middleRing_mid",
"l_pinkySkeleton_grp/l_middlePinky_mid",
"l_carpal_mid",
]
# color of tips:
color_rgb = [161, 102, 94]
sbColor = Vt.Vec3fArray([Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2]) / 256.0])
attachmentBonePaths = [self._bones_root_path.AppendPath(x) for x in attachmentBoneStringPaths]
for sbTipPath, bonePath in zip(sbTipsPaths, attachmentBonePaths):
sbMesh = UsdGeom.Mesh.Get(stage, sbTipPath)
sbMesh.CreateDisplayColorAttr(sbColor)
boneMesh = UsdGeom.Mesh.Get(stage, bonePath)
self.create_softbody_rigid_attachment(sbMesh, boneMesh, 0)
softbodyGroupPath = "/World/physicsScene/collisionGroupSoftBodyTips"
boneGroupPath = "/World/physicsScene/collisionGroupHandBones"
softbodyGroup = UsdPhysics.CollisionGroup.Define(stage, softbodyGroupPath)
boneGroup = UsdPhysics.CollisionGroup.Define(stage, boneGroupPath)
filteredRel = softbodyGroup.CreateFilteredGroupsRel()
filteredRel.AddTarget(boneGroupPath)
filteredRel = boneGroup.CreateFilteredGroupsRel()
filteredRel.AddTarget(softbodyGroupPath)
for sbTipPath in sbTipsPaths:
self.assign_collision_group(sbTipPath, softbodyGroupPath)
# filter all SB tips vs bone rigid bodies collisions
self.assign_collision_group(self._baseMesh.GetPath(), boneGroupPath)
for finger in self._fingerMeshes.values():
for bone in finger.values():
self.assign_collision_group(bone.GetPath(), boneGroupPath)
def assign_collision_group(self, primPath: Sdf.Path, groupPath: Sdf.Path):
stage = self.stage
physicsUtils.add_collision_to_collision_group(stage, primPath, groupPath)
def set_softbody(
self, mesh_path: Sdf.Path, schema_parameters: dict, material_path: Sdf.Path, mass: float, resolution: int
):
success = omni.kit.commands.execute(
"AddDeformableBodyComponentCommand",
skin_mesh_path=mesh_path,
voxel_resolution=resolution,
solver_position_iteration_count=schema_parameters["solver_position_iteration_count"],
self_collision=schema_parameters["self_collision"],
vertex_velocity_damping=schema_parameters["vertex_velocity_damping"],
sleep_damping=schema_parameters["sleep_damping"],
sleep_threshold=schema_parameters["sleep_threshold"],
settling_threshold=schema_parameters["settling_threshold"],
)
prim = self.stage.GetPrimAtPath(mesh_path)
physxCollisionAPI = PhysxSchema.PhysxCollisionAPI.Apply(prim)
assert physxCollisionAPI.CreateRestOffsetAttr().Set(schema_parameters["collisionRestOffset"])
assert physxCollisionAPI.CreateContactOffsetAttr().Set(schema_parameters["collisionContactOffset"])
massAPI = UsdPhysics.MassAPI.Apply(prim)
massAPI.CreateMassAttr().Set(mass)
physicsUtils.add_physics_material_to_prim(self.stage, self.stage.GetPrimAtPath(mesh_path), material_path)
assert success
def create_softbody_rigid_attachment(self, soft_body, gprim, id):
assert PhysxSchema.PhysxDeformableBodyAPI(soft_body)
assert UsdPhysics.CollisionAPI(gprim)
# get attachment to set parameters:
attachmentPath = soft_body.GetPath().AppendChild(f"rigid_attachment_{id}")
attachment = PhysxSchema.PhysxPhysicsAttachment.Define(self.stage, attachmentPath)
attachment.GetActor0Rel().SetTargets([soft_body.GetPath()])
attachment.GetActor1Rel().SetTargets([gprim.GetPath()])
PhysxSchema.PhysxAutoAttachmentAPI.Apply(attachment.GetPrim())
attachment = PhysxSchema.PhysxAutoAttachmentAPI.Get(self.stage, attachmentPath)
attachment.GetEnableDeformableVertexAttachmentsAttr().Set(True)
attachment.GetEnableRigidSurfaceAttachmentsAttr().Set(True) | 33,932 | Python | 44.486595 | 151 | 0.630349 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/franka/control.py | import os
import sys
sys.path.append(os.path.dirname(__file__))
import omni
import pxr
from pxr import Gf
from omni.isaac.franka import Franka
from omni.isaac.core.utils.stage import set_stage_up_axis
from omni.isaac.core import World, SimulationContext
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
import numpy as np
from pathlib import Path
from numpy_utils import *
from utils import get_mesh_bboxes
ROOT = str(Path(__file__).parent.joinpath("../../../../../../").resolve())
class FrankaControl():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None) -> None:
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def move_to_target(self, goal_pos, goal_rot):
"""
Move hand to target points
"""
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - hand_pos
orn_err = orientation_error(goal_rot, hand_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to franka hand
franka_hand_index = 8 # !!!
j_eef = jacobians[:, franka_hand_index - 1, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, 9)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Calculation --------------------------------------------#
##################################################################################################
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.086):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
grasp_list = [[min_x - x_offset, c[1], c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def calculate_pull_location(self, start_pos, start_rot, theta, r, clock_wise = False):
"""
Calculate how to pull to open the Cabinet
"""
clock_wise = float(2 * clock_wise - 1)
# position
pos_offset = np.tile(np.array([-r * np.sin(theta), clock_wise * r * (1 - np.cos(theta)), 0]), (self.num_envs, 1))
target_pos = start_pos + pos_offset
# rotate
rot_offset = np.tile(np.array([np.sin(clock_wise * theta / 2), 0, 0, np.cos( - clock_wise * theta / 2)]), (self.num_envs, 1))
target_rot = quat_mul(start_rot, rot_offset)
return target_pos, target_rot
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_hand_to_fast(self, target_pos, target_rot, world, open_gripper = True, max_step = 300):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
hand_pos, hand_rot = self.xforms.get_world_poses()
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ -> XYZW
orient_error = quat_mul(target_rot[0], quat_conjugate(hand_rot[0]))
# print("orient_error", orient_error)
if abs(orient_error[3] - 1) < 0.02 and \
np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
np.sqrt(np.sum((target_pos[0] - hand_pos[0])**2)) < 0.01:
print("Done rotation, position", hand_pos, hand_rot)
return
u = self.move_to_target(target_pos, target_rot)
u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", hand_pos, hand_rot)
def move_hand_to_slow(self, target_pos, target_rot, world, open_gripper = True, step = 60):
"""
Continuously and slowly move robot hands to the target position and rotation
target_pos, target_rot: [x,y,z], [x, y, z, w]
"""
hand_pos, hand_rot = self.xforms.get_world_poses() # [x,y,z], [w, x, y, z]
hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ -> XYZW
inter_pos, inter_rot = np.zeros_like(hand_pos), np.zeros_like(hand_rot)
start_pos, start_rot = [], []
target_pos_gf, target_rot_gf = [], []
# init
for i in range(self.num_envs):
start_pos.append(Gf.Vec3f(float(hand_pos[i][0]), float(hand_pos[i][1]), float(hand_pos[i][2])))
start_rot.append(Gf.Quatf(float(hand_rot[i][3]),float(hand_rot[i][0]),float(hand_rot[i][1]),float(hand_rot[i][2])))
target_pos_gf.append(Gf.Vec3f(float(target_pos[i][0]), float(target_pos[i][1]), float(target_pos[i][2])))
target_rot_gf.append(Gf.Quatf(float(target_rot[i][3]),float(target_rot[i][0]),float(target_rot[i][1]),float(target_rot[i][2])))
# gripper
dof_pos = self.robots.get_joint_positions()
init_gripper_close = dof_pos[...,-1][0] <= 0.015
# step
for t in range(step):
world.step(render=True)
for i in range(self.num_envs):
inter_pos_i = Gf.Lerp(t / (step - 1), start_pos[i], target_pos_gf[i])
inter_pos[i] = [inter_pos_i[0], inter_pos_i[1], inter_pos_i[2]]
inter_rot_i = Gf.Slerp(t / (step - 1), start_rot[i], target_rot_gf[i])
inter_rot_i_imaginary = inter_rot_i.GetImaginary()
inter_rot[i] = [inter_rot_i_imaginary[0], inter_rot_i_imaginary[1], inter_rot_i_imaginary[2], inter_rot_i.GetReal()]
u = self.move_to_target(inter_pos, inter_rot)
if init_gripper_close and not open_gripper:
gripper_target = -0.5
else:
gripper_target = 0.5 if open_gripper else 0.5 - (0.5 - -0.5) / (step - 1) * t
# print("gripper_target", gripper_target)
u[:,[-2, -1]] = gripper_target
self.robots.set_joint_position_targets(u)
# final adjustment
for t in range(step // 10):
world.step(render=True)
u = self.move_to_target(target_pos, target_rot)
u[:,[-2, -1]] = 0.5 if open_gripper else -0.5
self.robots.set_joint_position_targets(u)
world.step(render=True)
############################### SLAM #########################################################
def calculate_grasp_location_from_bbox(self, box,
resolution = 256, D = -293, camera_pos = [-1, 0, 0.5], handle_x = 0.61857):
"""
Calculate the grasp location for the handle
box: [x_min, y_min, x_max, y_max] 2D boudning box in camera
resolution: camera resolution
D: depth of field
camera_pos: camera_position
handle_x: object offset
"""
delta_w = (box[0] + box[2]) / 2 - resolution / 2
delta_h = (box[1] + box[3]) / 2 - resolution / 2
handle_z = (handle_x - camera_pos[0]) * delta_h / D + camera_pos[2]
handle_y = (handle_x - camera_pos[0]) * delta_w / D + camera_pos[1]
graps_pos = np.array([[handle_x, handle_y, handle_z]], dtype=np.float32)
verticle = delta_w < delta_h
base_rotation = [0.5, 0.5, 0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32).repeat(self.num_envs, axis = 0) # XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
| 10,251 | Python | 38.430769 | 139 | 0.534094 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/frankahand_experiment_learning.py | import numpy as np
from PIL import Image
## Get object indexes
# import os
# OBJ_INDEX_LIST = []
# for i in os.listdir("/home/yizhou/Research/temp/"):
# if str(i).isdigit():
# OBJ_INDEX_LIST.append(i)
# print(sorted(OBJ_INDEX_LIST, key = lambda x: int(x)))
from exp.params import OBJ_INDEX_LIST
SUCESS_PERCENTAGE = 20
result_file_path = "/home/yizhou/Research/Data/frankahand_exp_learning.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn.pth"
SHOW_IMAGE = False
import getpass
user = getpass.getuser()
from omni.isaac.kit import SimulationApp
# "/home/yizhou/Research/OpenAnyDrawer/scene0.usd" #
usd_path = f"omniverse://localhost/Users/{user}/scene4.usd"
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from franka.gripper import GripperHandEnv
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv()
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
controller = GripperHandEnv("/World/Franka/panda_link8", "/World/AnchorXform")
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath("/World/Franka")
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(model_path = MODEL_PATH, model_name = "fasterrcnn_resnet50_fpn")
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[:1]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
world.scene.add(mobility_obj)
world.reset()
world.render()
scene_instr = SceneInstructor()
scene_instr.analysis()
# export data and load model
# scene_instr.output_path = "/home/yizhou/Research/temp0/"
# scene_instr.export_data()
# omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
world.render()
world.render()
world.render()
image_array =env.get_image(return_array=True)
scene_instr.model = model
scene_instr.predict_bounding_boxes(image_array[:,:,:3])
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# if no valid predicted boundbox
if not scene_instr.is_pred_valid:
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX}, invalid prediction\n")
world.scene.remove_object(mobility_obj_name)
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
continue
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
# handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
the_box = scene_instr.get_box_from_desc(v_desc, h_desc)
handle_direction = "horizontal" if (the_box[2] - the_box[0]) > (the_box[3] - the_box[1]) else "vertical"
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", graps_pos, grasp_rot )
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(200):
world.step(render=False)
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=False)
# close
pos = np.array([[0.0, 0.0]])
for _ in range(100):
pos -= 0.01
controller.robots.set_joint_position_targets(pos)
world.step(render=False)
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(pos)
pos += 0.015
world.step(render=False)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{v_desc}|{h_desc}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 7,458 | Python | 34.018779 | 175 | 0.633414 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/experiment_learning_common_cliport.py | import numpy as np
from PIL import Image
from exp.params import OBJ_INDEX_LIST, GRASP_PROFILES
import getpass
user = getpass.getuser()
ROBOT_NAME = "frankahand" #"skeletonhand" # "shadowhand" # "allegro"
grasp_profile = GRASP_PROFILES[ROBOT_NAME]
SUCESS_PERCENTAGE = 20
print("SUCESS_PERCENTAGE: ", SUCESS_PERCENTAGE)
result_file_path = f"/home/yizhou/Research/Data/{ROBOT_NAME}_exp_cliport824.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/custom_cliport824.pth"
clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json"
load_nucleus = True # nucleus loading
usd_path = "omniverse://localhost/Users/yizhou/scene4.usd" #grasp_profile["usd_path"]
SHOW_IMAGE = True
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# import
try:
import transformers
except:
omni.kit.pipapi.install("transformers")
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from hand_env import HandEnv
from hand_common import HandBase
from render.utils import prim_random_color, LOOKS_PATH
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv(load_nucleus=load_nucleus)
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
# controller = HandEnv("/World/allegro*/allegro_mount", "/World/AnchorXform")
controller = HandBase(grasp_profile["articulation_root"], "/World/AnchorXform")
controller.grasp_profile = grasp_profile["offset"]
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath(grasp_profile["robot_path"])
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(
model_path = MODEL_PATH,
model_name = "custom_cliport",
clip_text_feature_path = clip_text_feature_path
)
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[:2]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
# randomize color
# reset look in scene
mat_look_prim = world.scene.stage.GetPrimAtPath(LOOKS_PATH)
if mat_look_prim:
omni.kit.commands.execute("DeletePrims", paths=[LOOKS_PATH])
world.step(render = False)
scene_instr = SceneInstructor()
scene_instr.analysis()
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
prim_random_color(handle_path_str)
world.scene.add(mobility_obj)
world.reset()
world.render()
world.render()
image_array = env.get_image(return_array=True)
image =env.get_image()
if SHOW_IMAGE:
world.render()
env.get_image().show()
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
# handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
cabinet_type = scene_instr.valid_handle_list[handle_path_str]["cabinet_type"]
# add_update_semantics(prim, "handle")
text = f"{v_desc}_{h_desc}_{cabinet_type}"
text = text.replace("_"," ").replace("-"," ").replace(" ", " ").strip()
print("task text", text)
bbox_center, handle_direction = model.pred_box_pos_and_dir(image.convert('RGB'), text)
the_box = scene_instr.get_bbox_world_position([bbox_center[1], bbox_center[0], bbox_center[1], bbox_center[0]])
# Task
# print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", the_box, graps_pos, grasp_rot )
if SHOW_IMAGE:
world.render()
env.get_image().show()
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(500):
world.step(render=SHOW_IMAGE)
print("move to handle")
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=SHOW_IMAGE)
# close finger
print("close finger")
finger_pos = grasp_profile["finger_pos"].copy()
if ROBOT_NAME == "allegro":
for i in range(120):
controller.robots.set_joint_position_targets(finger_pos * i / 120) #
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for _ in range(100):
finger_pos -= 0.01
controller.robots.set_joint_position_targets(finger_pos)
pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
dof_pos = finger_pos
for i in range(80):
# thumb
step_gain = 0.01
dof_pos[6] += step_gain
dof_pos[11] += 2 * step_gain
# dof_pos[16] += 0.01
dof_pos[21] += - step_gain
dof_pos[7] += step_gain
dof_pos[8] += step_gain
dof_pos[9] += step_gain
# dof_pos[14] += 0.01
dof_pos[12] += step_gain
dof_pos[13] += step_gain
dof_pos[14] += step_gain
dof_pos[17] += step_gain
dof_pos[18] += step_gain
dof_pos[19] += step_gain
# pinky
dof_pos[15] += step_gain
dof_pos[20] += step_gain
dof_pos[22] += step_gain
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=True)
elif ROBOT_NAME == "skeletonhand":
# close finger
for i in range(120):
i = i / 4
dof_pos = np.array([
[ i * 0.03, i * 0.04,
i * 0.01, -i * 0.04,
i * 0.005, -i * 0.04,
-i * 0.02, -i * 0.04,
-i * 0.01, -i * 0.04,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
],
])
# pos = np.random.randn(2,25)
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=SHOW_IMAGE)
print("pull out")
# pull out
if ROBOT_NAME == "allegro":
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for i in range(300):
graps_pos[...,0] -= 0.001
finger_pos += np.sqrt(i) * 1e-4
# print(pos)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
finger_pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
dof_pos *= 0.997
# print(dof_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "skeletonhand":
# pull out
for i in range(200):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
dof_pos /= 1.5
# pull out furthur
for i in range(100):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{text}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 12,074 | Python | 34.101744 | 164 | 0.575617 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/model.py | import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from exp.learning.custom_cliport import CustomCliport
def load_vision_model(
model_path = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn.pth",
model_name = "fasterrcnn_resnet50_fpn",
clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json"
):
# load a model; pre-trained on COCO
if model_name == "fasterrcnn_resnet50_fpn":
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
num_classes = 2 # 1 class (wheat) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.load_state_dict(torch.load(model_path))
model.eval()
elif model_name == "custom_cliport":
model = CustomCliport(clip_text_feature_path = clip_text_feature_path)
model.load_state_dict(torch.load(model_path))
model = model.to(model.device)
model.set_prediction_mode()
model.eval()
return model
| 1,331 | Python | 34.052631 | 98 | 0.706236 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/render/utils.py |
import os
from typing import Union, Tuple, Dict, List, Callable
import omni.usd
import omni.kit.commands
from pxr import Sdf, UsdShade, Usd, Gf
import numpy as np
LOOKS_PATH = "/World/RLooks"
def material_omnipbr(
prim_path_str,
diffuse: Tuple[float] = None,
diffuse_texture: str = None,
roughness: float = None,
roughness_texture: str = None,
metallic: float = None,
metallic_texture: str = None,
specular: float = None,
emissive_color: Tuple[float] = None,
emissive_texture: str = None,
emissive_intensity: float = 0.0,
project_uvw: bool = False,
):
stage = omni.usd.get_context().get_stage()
mdl = "OmniPBR.mdl"
mtl_name, _ = os.path.splitext(mdl)
if not stage.GetPrimAtPath(LOOKS_PATH):
stage.DefinePrim(LOOKS_PATH, "Scope")
prim_path = omni.usd.get_stage_next_free_path(stage, f"{LOOKS_PATH}/{mdl.split('.')[0]}", False)
omni.kit.commands.execute(
"CreateMdlMaterialPrim", mtl_url=mdl, mtl_name=mtl_name, mtl_path=prim_path, select_new_prim=False
)
shader = UsdShade.Shader(omni.usd.get_shader_from_material(stage.GetPrimAtPath(prim_path), True))
shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f)
shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("reflection_roughness_constant", Sdf.ValueTypeNames.Float)
shader.CreateInput("reflectionroughness_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("reflection_roughness_texture_influence", Sdf.ValueTypeNames.Float)
shader.CreateInput("metallic_constant", Sdf.ValueTypeNames.Float)
shader.CreateInput("metallic_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("metallic_texture_influence", Sdf.ValueTypeNames.Float)
shader.CreateInput("specular_level", Sdf.ValueTypeNames.Float)
shader.CreateInput("enable_emission", Sdf.ValueTypeNames.Bool)
shader.CreateInput("emissive_color", Sdf.ValueTypeNames.Color3f)
shader.CreateInput("emissive_color_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("emissive_intensity", Sdf.ValueTypeNames.Float)
shader.CreateInput("project_uvw", Sdf.ValueTypeNames.Bool)
enable_emission = emissive_intensity != 0.0
roughness_texture_influence = float(roughness_texture is not None)
metallic_texture_influence = float(roughness_texture is not None)
prim = stage.GetPrimAtPath(prim_path)
properties = {
"diffuse_color_constant": diffuse,
"diffuse_texture": diffuse_texture,
"reflection_roughness_constant": roughness,
"reflectionroughness_texture": roughness_texture,
"reflection_roughness_texture_influence": roughness_texture_influence,
"metallic_constant": metallic,
"metallic_texture": metallic_texture,
"metallic_texture_influence": metallic_texture_influence,
"specular_level": specular,
"enable_emission": enable_emission,
"emissive_color": emissive_color,
"emissive_color_texture": emissive_texture,
"emissive_intensity": emissive_intensity,
"project_uvw": project_uvw,
}
for attribute, attribute_value in properties.items():
if attribute_value is None:
continue
if UsdShade.Material(prim):
shader = UsdShade.Shader(omni.usd.get_shader_from_material(prim, True))
shader.GetInput(attribute).Set(attribute_value)
else:
prim.GetAttribute(attribute).Set(attribute_value)
omni.kit.commands.execute(
"BindMaterialCommand",
prim_path=prim_path_str,
material_path=prim.GetPath().pathString,
strength=UsdShade.Tokens.strongerThanDescendants,
)
def prim_random_color(prim_path_str):
"""
Randomize color for prim at path
"""
diffuse = Gf.Vec3f(np.random.rand(), np.random.rand(), np.random.rand())
material_omnipbr(prim_path_str, diffuse = diffuse)
# # test
# prim_random_color("/World/Cube")
# print("test random shader") | 4,031 | Python | 35.654545 | 106 | 0.694617 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/render/offline_rendering.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# ~/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
"""Generate offline synthetic dataset
"""
from omni.isaac.kit import SimulationApp
import os
import carb
# Set rendering parameters and create an instance of kit
CONFIG = {"renderer": "RayTracedLighting", "headless": True,
"width": 256, "height": 256, "num_frames": 5}
kit = SimulationApp(launch_config=CONFIG)
from omni.isaac.core import World
world = World()
from omni.isaac.core.prims.xform_prim import XFormPrim
from open_env import OpenEnv
from task.instructor import SceneInstructor
from exp.params import OBJ_INDEX_LIST, ALL_SEMANTIC_TYPES
env = OpenEnv()
# we will be using the replicator library
import omni.replicator.core as rep
# This allows us to run replicator, which will update the random
# parameters and save out the data for as many frames as listed
def run_orchestrator():
rep.orchestrator.run()
# Wait until started
while not rep.orchestrator.get_is_started():
kit.update()
# Wait until stopped
while rep.orchestrator.get_is_started():
kit.update()
rep.BackendDispatch.wait_until_done()
for i in OBJ_INDEX_LIST[3:]:
print("rendering object id:", i)
i = int(i)
env.add_object(i, scale = 0.1)
game_obj = XFormPrim("/World/Game")
game_obj_name = game_obj.name
world.scene.add(game_obj)
scene_instr = SceneInstructor()
scene_instr.output_path = "/home/yizhou/Research/temp1"
scene_instr.analysis()
scene_instr.add_semantic_to_handle()
if scene_instr.is_obj_valid:
with rep.new_layer():
camera = rep.create.camera(position=(-10 * scene_instr.scale, 0, 5 * scene_instr.scale), rotation=(90, 0, -90))
render_product = rep.create.render_product(camera, (256, 256))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize( output_dir=os.path.join(scene_instr.output_path, f"{i}"), rgb=True, bounding_box_2d_tight=True)
writer.attach([render_product])
light_group = rep.create.group(["/World/defaultLight"])
shapes = rep.get.prims(semantics=[('class', i) for i in ALL_SEMANTIC_TYPES])
mats = rep.create.material_omnipbr(diffuse=rep.distribution.uniform((0,0,0), (1,1,1)), count=20)
with rep.trigger.on_frame(num_frames=CONFIG["num_frames"]):
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-1.5, -0.2, 0.5), (-1, 0.2, 0.5)),
rotation=(90, 0, -90),
)
# # Randomize light colors
# with light_group:
# rep.modify.attribute("color", rep.distribution.uniform((0.1, 0.1, 0.1), (1.0, 1.0, 1.0)))
# rep.modify.pose(
# position=rep.distribution.uniform((0, -45, 90), (0, 0, 90))
# )
# randomize
with shapes:
rep.randomizer.materials(mats)
run_orchestrator()
world.scene.remove_object(game_obj_name)
kit.update()
kit.close()
| 3,621 | Python | 32.537037 | 126 | 0.632422 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/instructor.py | # instructions as language
import carb
import omni
import os
import torch
# try:
# import cv2
# except:
# omni.kit.pipapi.install("opencv-python")
# import cv2
import numpy as np
from pxr import UsdPhysics, Gf, UsdGeom
from task.utils import *
import omni.kit.viewport_widgets_manager as wm
from omni import ui
from omni.isaac.core.utils.semantics import add_update_semantics, remove_all_semantics
import omni.replicator.core as rep
CAMERA_WIDGET_STYLING = {
"Rectangle::background": {"background_color": 0x7F808080, "border_radius": 5}
}
class LabelWidget(wm.WidgetProvider):
def __init__(self, text_list:list):
self.text_list = text_list
def build_widget(self, window):
with ui.ZStack(width=0, height=0, style=CAMERA_WIDGET_STYLING):
ui.Rectangle(name="background")
with ui.VStack(width=0, height=0):
ui.Spacer(height=2)
for text in self.text_list:
ui.Label(text, width=0, height=0, name="", style={"color": "darkorange"})
class SceneInstructor():
def __init__(self) -> None:
# constant
self.long_handle_ratio = 3 # ratio to determin the long handle
self.short_handle_ratio = 1.5 # ratio to determin the small handle
self.spatial_desc_tolerance = 0.05 # spatial description
# output path
self.output_path = "/home/yizhou/Research/temp"
self.reset()
def reset(self):
# scene
self.stage = omni.usd.get_context().get_stage()
# knowledge
self.handle_knowledge = {}
self.joint_knowledge = {"PhysicsRevoluteJoint":[], "PhysicsPrismaticJoint":[], "PhysicsFixedJoint": []}
# constant
self.scale = 0.1 # object scale
self.is_obj_valid = True # valid object scene
# pred
self.pred_boxes = None
self.is_pred_valid = True # Prediction valid
####################################################################################
############################ analysis ###############################################
####################################################################################
def analysis(self):
self.analysis_game()
self.analysis_handle_primary()
self.analysis_cabinet_type()
self.analysis_spatial_rel()
def analysis_game(self):
"""
Analysis global game information
"""
bboxes = get_bounding_box("/World/Game/mobility")
self.game_center = 0.5 * (bboxes[0] + bboxes[1])
def analysis_handle_primary(self):
"""
Analysis handle to get the positions
"""
keyword = "handle_"
prim_list = list(self.stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# get basic information
for prim in prim_list:
prim_path_str = prim.GetPath().pathString
handle_num = prim_path_str.split("/")[-1].split("_")[-1]
# get bounding boxes
bboxes = get_bounding_box(prim_path_str)
center = 0.5 * (bboxes[0] + bboxes[1])
scale = (bboxes[1][0] - bboxes[0][0], bboxes[1][1] - bboxes[0][1], bboxes[1][2] - bboxes[0][2])
size = scale[0] * scale[1] * scale[2]
size_type = self.get_handle_type_from_scale(scale)
direction = "horizontal" if scale[1] > scale[2] else "vertical"
relative_to_game_center = "left" if center[1] >= self.game_center[1] else "right"
self.handle_knowledge[prim_path_str] = {
"num": handle_num,
"center": center,
"relative_to_game_center": relative_to_game_center,
"bboxes": bboxes,
"scale": scale,
"size": size,
"size_type": size_type,
"direction": direction,
"overlap_with": [],
"overlap_with_longer": False,
"joint_type": "",
}
# get intersection
for i in range(len(prim_list)):
path_str1 = prim_list[i].GetPath().pathString
bboxes1 = self.handle_knowledge[path_str1]["bboxes"]
for j in range(i + 1, len(prim_list)):
path_str2 = prim_list[j].GetPath().pathString
bboxes2 = self.handle_knowledge[path_str2]["bboxes"]
if bboxes_overlap(bboxes1, bboxes2):
overlap_with1 = self.handle_knowledge[path_str1]["overlap_with"]
overlap_with1.append(path_str2)
overlap_with2 = self.handle_knowledge[path_str2]["overlap_with"]
overlap_with2.append(path_str1)
if max(self.handle_knowledge[path_str1]["scale"]) > max(self.handle_knowledge[path_str2]["scale"]):
self.handle_knowledge[path_str2]["overlap_with_longer"] = True
else:
self.handle_knowledge[path_str1]["overlap_with_longer"] = True
def analysis_cabinet_type(self):
# get drawer/door from joint type
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if "joint_" in item.GetPath().pathString]
# get joint knowledge
for prim in prim_list:
# print("type", prim, prim.GetTypeName())
joint = UsdPhysics.Joint.Get(self.stage, prim.GetPath())
assert joint, f"Not a joint? Check model {prim.GetPath().pathString}"
b1paths = joint.GetBody1Rel().GetTargets()
# print("b1paths", prim.GetTypeName(), b1paths)
self.joint_knowledge[prim.GetTypeName()].append([b1paths[0].pathString, prim.GetPath().pathString])
# update joint type
for handle_path_str in self.handle_knowledge:
handle_know = self.handle_knowledge[handle_path_str]
for joint_type in self.joint_knowledge:
for joint_body_path_str, joint_prim_path_str in self.joint_knowledge[joint_type]:
if joint_body_path_str in handle_path_str:
handle_know["joint_type"] = joint_type
handle_know["joint_path_str"] = joint_prim_path_str
break
# get revolute/linear handles
self.valid_handle_list = {}
# if it doesn't overlap with any larger handle, it is a true handle
for handle_path_str in self.handle_knowledge:
if not self.handle_knowledge[handle_path_str]["overlap_with_longer"]:
if self.handle_knowledge[handle_path_str]["joint_type"] == "PhysicsRevoluteJoint":
self.valid_handle_list[handle_path_str] = {
"joint_type": "PhysicsRevoluteJoint",
"cabinet_type": "door",
"vertical_description": "",
"horizontal_description": "",
}
if self.handle_knowledge[handle_path_str]["joint_type"] == "PhysicsPrismaticJoint":
self.valid_handle_list[handle_path_str] = {
"joint_type": "PhysicsPrismaticJoint",
"cabinet_type": "drawer",
"vertical_description": "",
"horizontal_description": "",
}
# other import information
self.valid_handle_list[handle_path_str]["joint"] = self.handle_knowledge[handle_path_str]["joint_path_str"].split("/")[-1]
self.valid_handle_list[handle_path_str]["relative_to_game_center"] = self.handle_knowledge[handle_path_str]["relative_to_game_center"]
self.valid_handle_list[handle_path_str]["direction"] = self.handle_knowledge[handle_path_str]["direction"]
def analysis_spatial_rel(self):
"""
Analysis the spatial relationship of handle
: joint_type -> vertical -> horizontal
"""
print("analysis_spatial_rel: ", self.valid_handle_list)
if len(self.valid_handle_list) == 0:
carb.log_warn("No handle in the scene")
self.is_obj_valid = False
return
# if only one joint, no need to describe from spatial layout
if len(self.valid_handle_list) == 1:
self.is_obj_valid = True
return
# get vertical and horizontal centers
v_centers = []
h_centers = []
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
center_z = handle_center[2]
center_y = handle_center[1]
is_v_center_list = any([abs(z - center_z) < self.spatial_desc_tolerance for z in v_centers])
is_h_center_list = any([abs(y - center_y) < self.spatial_desc_tolerance for y in h_centers])
if not is_v_center_list:
v_centers.append(center_z)
if not is_h_center_list:
h_centers.append(center_y)
v_centers = sorted(v_centers)
h_centers = sorted(h_centers)
# vertical
if len(v_centers) == 1:
pass
elif len(v_centers) == 2:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
elif len(v_centers) == 3:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
elif abs(handle_center[2] - v_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "middle"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
elif len(v_centers) == 4:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
elif abs(handle_center[2] - v_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "second-bottom"
elif abs(handle_center[2] - v_centers[2]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "second-top"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
else:
carb.log_warn("too many handles align vertically!")
self.is_obj_valid = False
# horizontal
if len(h_centers) == 1:
pass
elif len(h_centers) == 2:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
elif len(h_centers) == 3:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
elif abs(handle_center[1] - h_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "middle"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
elif len(h_centers) == 4:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
elif abs(handle_center[1] - h_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "second-right"
elif abs(handle_center[1] - h_centers[2]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "second-left"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
else:
carb.log_warn("too many handles align horizontally!")
self.is_obj_valid = False
# print("valid_handle_list: ", self.valid_handle_list)
# print("knowledge", self.handle_knowledge)
def get_handle_type_from_scale(self, scale):
"""
Get a general shape for the handle
"""
if max(scale) / min(scale) > self.long_handle_ratio:
return "long"
elif max(scale) / min(scale) < self.short_handle_ratio:
return "short"
else:
return "middle?"
####################################################################################
############################ UI ###############################################
####################################################################################
def build_ui(self, desc:list, gui_path:str, gui_location):
gui = self.stage.GetPrimAtPath(gui_path)
if not gui:
gui = UsdGeom.Xform.Define(self.stage, gui_path)
gui.AddTranslateOp().Set(gui_location)
self.wiget_id = wm.add_widget(gui_path, LabelWidget(desc), wm.WidgetAlignment.TOP)
def build_handle_desc_ui(self):
"""
build hud for handle
"""
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
handle_num = self.handle_knowledge[handle_path_str]["num"]
gui_location = handle_center
gui_path = f"/World/GUI/handle_{handle_num}"
h_desc = self.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = self.valid_handle_list[handle_path_str]["vertical_description"]
cabinet_type = self.valid_handle_list[handle_path_str]["cabinet_type"]
self.build_ui([f"{cabinet_type}", "handle_" + handle_num, f"{v_desc}/{h_desc}"], gui_path, gui_location)
######################################## semantic #####################################################
def add_semantic_to_handle(self):
for handle_path_str in self.valid_handle_list:
prim = self.stage.GetPrimAtPath(handle_path_str)
h_desc = self.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = self.valid_handle_list[handle_path_str]["vertical_description"]
cabinet_type = self.valid_handle_list[handle_path_str]["cabinet_type"]
# add_update_semantics(prim, "handle")
add_update_semantics(prim, semantic_label = f"{v_desc}_{h_desc}_{cabinet_type}")
def export_data(self):
"""
Export RGB and Bounding box info to file
"""
with rep.new_layer():
camera = rep.create.camera(position=(-10 * self.scale, 0, 5 * self.scale), rotation=(90, 0, -90))
render_product = rep.create.render_product(camera, (256, 256))
# Initialize and attach writer
self.writer = rep.WriterRegistry.get("BasicWriter")
self.writer.initialize( output_dir=self.output_path, rgb=True, bounding_box_2d_tight=True)
self.writer.attach([render_product])
with rep.trigger.on_frame(num_frames=1):
pass
rep.orchestrator.run()
rep.BackendDispatch.wait_until_done()
# rep.orchestrator.preview()
# omni.kit.commands.execute("DeletePrims", paths=["/World/Game"])
def load_model(self):
"""
Load deep leanring model
"""
from exp.model import load_vision_model
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = load_vision_model().to(self.device)
print("successfully loaded model")
def predict_bounding_boxes(self, image, detection_threshold = 0.5):
"""
Predict bounding boxes
::params:
image: 255 rgb
"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
image_arr = image / 255.0
images = [torch.tensor(image_arr).to(torch.float).permute(2,0,1).to(self.device )] # .to("cuda")
outputs = self.model(images)
# print("outputs", outputs)
boxes = outputs[0]['boxes'].data.cpu().numpy()
scores = outputs[0]['scores'].data.cpu().numpy()
# sort from max to min
inds = scores.argsort()[::-1]
boxes = boxes[inds]
# if no boxes!
if len(boxes) == 0:
self.is_pred_valid = False
return
select_boxes = boxes[scores >= detection_threshold].astype(np.int32)
# if no boxes?
if len(select_boxes) == 0:
select_boxes = boxes
# get world box positions
self.pred_boxes= [self.get_bbox_world_position(box) for box in select_boxes]
def get_bbox_world_position(self, box,
resolution = 256, D = -293, camera_pos = [-1, 0, 0.5], handle_x = 0.61857):
"""
Calculate the grasp location for the handle
box: [x_min, y_min, x_max, y_max] 2D boudning box in camera
resolution: camera resolution
D: depth of field
camera_pos: camera_position
handle_x: object offset
"""
w_min = box[0] - resolution / 2
w_max = box[2] - resolution / 2
h_min = box[1] - resolution / 2
h_max = box[3] - resolution / 2
y_max = (handle_x - camera_pos[0]) * w_min / D + camera_pos[1]
y_min = (handle_x - camera_pos[0]) * w_max / D + camera_pos[1]
z_max = (handle_x - camera_pos[0]) * h_min / D + camera_pos[2]
z_min = (handle_x - camera_pos[0]) * h_max / D + camera_pos[2]
return [y_min, z_min, y_max, z_max]
def get_box_from_desc(self, v_desc, h_desc):
"""
Get box from description
"""
# if no description, get bbox of the highest score
if v_desc == "" and h_desc == "":
return self.pred_boxes[0]
# if just one box
if len(self.pred_boxes) == 1:
return self.pred_boxes[0]
v_boxes = sorted(self.pred_boxes, key = lambda box: 0.5 * (box[1] + box[3]))
h_boxes = sorted(self.pred_boxes, key = lambda box: 0.5 * (box[0] + box[2]))
# only vertical relation
if h_desc == "":
if v_desc == "top":
return v_boxes[-1]
elif v_desc == "second top" or v_desc == "middle":
return v_boxes[-2]
if v_desc == "bottom":
return v_boxes[0]
elif v_desc == "second bottom" or v_desc == "middle":
return v_boxes[1]
# only horizontal relation
elif v_desc == "":
if h_desc == "left":
return h_boxes[-1]
elif h_desc == "second left" or h_desc == "middle":
return h_boxes[-2]
if h_desc == "right":
return h_boxes[0]
elif h_desc == "second right" or h_desc == "middle":
return h_boxes[1]
else: # have both description
if v_desc == "bottom" and h_desc == "left":
if v_boxes[0][0] > v_boxes[1][0]:
return v_boxes[0]
else:
return v_boxes[1]
elif v_desc == "bottom" and h_desc == "right":
if v_boxes[0][0] > v_boxes[1][0]:
return v_boxes[1]
else:
return v_boxes[0]
elif v_desc == "top" and h_desc == "left":
if v_boxes[-1][0] > v_boxes[-2][0]:
return v_boxes[-1]
else:
return v_boxes[-2]
elif v_desc == "top" and h_desc == "right":
if v_boxes[-1][0] > v_boxes[-2][0]:
return v_boxes[-2]
else:
return v_boxes[-1]
# TODO: unhandled situation
else:
return self.pred_boxes[0]
return self.pred_boxes[0]
| 21,907 | Python | 40.335849 | 151 | 0.532752 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/utils.py | import omni
from pxr import UsdGeom, Usd
def get_bounding_box(prim_path: str):
"""
Get the bounding box of a prim
"""
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
return game_bboxes
def bboxes_overlap(bboxes1, bboxes2):
"""
To judge whether two bboxes overlap with each other
bboxes: [min (vec3), max (vec3)]
"""
return not ( bboxes1[0][0] > bboxes2[1][0] or # left
bboxes1[1][0] < bboxes2[0][0] or # right
bboxes1[0][1] > bboxes2[1][1] or # bottom
bboxes1[1][1] < bboxes2[0][1] or # up
bboxes1[0][2] > bboxes2[1][2] or # front
bboxes1[1][2] < bboxes2[0][2]) # back
def get_mesh_bboxes(self, keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list | 1,461 | Python | 32.999999 | 119 | 0.607803 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/checker.py | # task check
import omni
from task.newJointCheck import JointCheck
class TaskChecker():
def __init__(self, target_obj, target_joint, joint_type, IS_RUNTIME = False) -> None:
self.target_obj =target_obj
self.target_joint = target_joint
self.joint_type = joint_type
self.target_prim_path = "/World/Game/" + self.target_obj
self.joint_checker = JointCheck(self.target_prim_path, self.target_joint)
self.init_value = 0.0 # from start
self.target_value = 0.25 # to target
# reverse joint direction check if necessary
if self.joint_type == "PhysicsRevoluteJoint":
self.check_joint_direction()
# other constant
self.total_step = 0
self.print_every = 30
self.checking_interval = 30
# register events
if not IS_RUNTIME:
self.create_task_callback()
def check_joint_direction(self):
"""
Check joint positive rotation to upper or negative rotation to lower
"""
is_upper = abs(self.joint_checker.upper) > abs(self.joint_checker.lower)
if not is_upper:
# if is lower, reverse init_value and target value
self.init_value = 1 - self.init_value if self.init_value != -1 else -1
self.target_value = 1 - self.target_value
################################### UPDATE ###########################################
def create_task_callback(self):
self.timeline = omni.timeline.get_timeline_interface()
stream = self.timeline.get_timeline_event_stream()
self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event)
# subscribe to Physics updates:
self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events(
self._on_physics_step
)
def _on_timeline_event(self, e):
"""
set up timeline event
"""
if e.type == int(omni.timeline.TimelineEventType.STOP):
self.it = 0
self.time = 0
self.reset()
def reset(self):
"""
Reset event
"""
self._physics_update_subscription = None
self._timeline_subscription = None
# self._setup_callbacks()
def _on_physics_step(self, dt):
self.start_checking()
def start_checking(self):
self.total_step += 1
if self.total_step % self.checking_interval == 0:
percentage = self.joint_checker.compute_percentage()
# log
if self.total_step % self.print_every == 0:
print("current: {:.1f}; target: {:.1f}; delta percentage: {:.1f}:".format(percentage, self.target_value * 100, self.target_value * 100 - percentage) )
if percentage / 100.0 > self.target_value:
print("success")
# self.timeline.pause()
| 2,981 | Python | 33.275862 | 166 | 0.568266 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/newJointCheck.py | from omni.isaac.dynamic_control import _dynamic_control
import omni
import math
class JointCheck():
def __init__(self, joint_prim, joint_name) -> None:
self.joint_name = joint_name
self.stage = omni.usd.get_context().get_stage()
self.prim_list = list(self.stage.TraverseAll())
self.prim_list = [ item for item in self.prim_list if joint_name in
item.GetPath().pathString and item.GetPath().pathString.startswith(joint_prim) and item.GetPath().pathString.endswith(joint_name)]
assert len(self.prim_list) == 1, "len of " + str(len(self.prim_list))
self.prim = self.prim_list[0]
self.type = self.prim.GetTypeName()
self.full_name = self.prim.GetPath().pathString
self.joint = self.stage.GetPrimAtPath(self.full_name)
# get joint upper and
self.upper = self.joint.GetAttribute("physics:upperLimit").Get()
self.lower = self.joint.GetAttribute("physics:lowerLimit").Get()
# need to compute this at the first step
self.initial_percentage = self.compute_percentage()
def compute_velocity(self):
# this function currently is not accurate, do not use it.
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
dof_vel = self.dc.get_dof_velocity(dof_ptr)
# dof_vel = self.dc.get_dof_velocity_target(dof_ptr)
if self.type == 'PhysicsPrismaticJoint':
from omni.isaac.core.utils.stage import get_stage_units
v = dof_vel * (get_stage_units() * 100) # in centimeters
print("units conversion: ", get_stage_units() * 100)
else:
v = math.degrees(dof_vel)
return v
def get_joint_link(self):
body0 = self.joint.GetRelationship("physics:body0").GetTargets()[0]
body1 = self.joint.GetRelationship("physics:body1").GetTargets()[0]
return body1
def set_velocity(self, velocity):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
if self.type == 'PhysicsPrismaticJoint':
from omni.isaac.core.utils.stage import get_stage_units
#velocity is in centimeters
v = velocity / (get_stage_units() * 100)
else:
v = math.radians(velocity)
self.dc.wake_up_articulation(self.art)
self.dc.set_dof_velocity(dof_ptr, velocity)
def compute_percentage(self):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
dof_pos = self.dc.get_dof_position(dof_ptr)
if self.type == 'PhysicsPrismaticJoint':
tmp = dof_pos
else:
tmp = math.degrees(dof_pos)
pertentage = (tmp - self.lower)/(self.upper - self.lower) * 100
# print("upper lower percentage", tmp, self.upper, self.lower, pertentage)
if pertentage > 100:
pertentage = 100
elif pertentage < 0:
pertentage = 0
return pertentage
def compute_distance(self):
return abs(self.compute_percentage() - self.initial_percentage)
def set_joint(self, percentage):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
upper = self.joint.GetAttribute("physics:upperLimit").Get()
lower = self.joint.GetAttribute("physics:lowerLimit").Get()
tmp = percentage / 100.0 *(upper-lower) + lower
if self.type == 'PhysicsPrismaticJoint':
dof_pos = tmp
else:
dof_pos = math.radians(tmp)
self.dc.wake_up_articulation(self.art)
self.dc.set_dof_position(dof_ptr, dof_pos)
#test cases
# check = JointCheck("/World/game/mobility_Door_8897","joint_1")
# check.set_velocity(0.001)
# check.compute_velocity()
# print(check.set_joint(50))
# check = JointCheck("/World/game/mobility_StorageFurniture_40417", "joint_3")
# print(check.compute_velocity()) | 4,466 | Python | 37.179487 | 142 | 0.62987 |
lydd8888/kit-exts-uv_viewer/README.md | # UV viewer Extension

Demo:https://www.youtube.com/watch?v=deavDepvdlc
# About
This extension can show object's UV in viewport
This Extension is heavily inspried by Camera Reticle Extension
I create this entension mainly to check if my UV is right since Omniverse does not natively support UV check
Since it is fully Python, for complex model, it takes couple seconds to calculate the UV
# Adding Extensions
To add this extension to your Omniverse app:
Clone the extension from: https://github.com/lydd8888/kit-exts-uv_viewer
Go into: Extension Manager -> Gear Icon -> Setting
Add Local Link to Extension Search Path:```../uv_viewer_extension/kit-exts-uv_viewer/exts```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
| 1,416 | Markdown | 26.249999 | 258 | 0.753531 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/constants.py | """Constants used by the CameraReticleExtension"""
DEFAULT_UI_PERCENTAGE = 50
| 79 | Python | 18.999995 | 50 | 0.772152 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/extension.py | import omni.ext
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport_window
from .viewport_scene import ViewportSceneInfo
from .uv_viewer import UvModel
from omni.ui import scene as sc
import carb
class UV_Viewer(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
super().__init__()
self.viewport_scene = None
def on_startup(self, ext_id: str) -> None:
viewport_window = get_active_viewport_window()
if viewport_window is not None:
uv_model = UvModel()
self.viewport_scene = ViewportSceneInfo(uv_model, viewport_window, ext_id)
def on_shutdown(self):
"""Called when the extension is shutting down."""
if self.viewport_scene:
self.viewport_scene.destroy()
self.viewport_scene = None | 982 | Python | 31.766666 | 119 | 0.669043 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/viewport_scene.py | from functools import partial
from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjInfoManipulator
from .uv_viewer import UvModel
import carb
from omni.ui import color as cl
import omni.kit.app
import omni.client
import threading
import os
from . import constants
class ViewportSceneInfo():
"""The scene view overlay
Build the Uv and Uv button on the given viewport window.
"""
def __init__(self, model: UvModel, viewport_window: ui.Window, ext_id: str) -> None:
"""
for check UV map changed
"""
self.filename = "D:/Amazon_Box_Stable_Diffusion/HoudiniUV/UV_Viewer_Extension/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/data/output.png"
self.previous_timestamp = None # Initialize the previous timestamp
"""
Overlay Constructor
Args:
viewport_window (Window): The viewport window to build the overlay on.
ext_id (str): The extension id.
"""
self.model = model
self.scene_view = None
self.viewport_window = viewport_window
self.ext_id = ext_id
self.on_window_changed()
self.previous_resolution = (None, None)
# Rebuild the overlay whenever the model change
self.model.add_model_changed_fn(self.build_uv_overlay)
# Rebuild the overlay whenever the viewport resolution changed
self.check_resolution_periodically()
def check_resolution_periodically(self):
self.check_resolution_change()
# Call this method every 1 second, for example
threading.Timer(0.1, self.check_resolution_periodically).start()
def check_resolution_change(self):
current_resolution = self.viewport_window.viewport_api.get_texture_resolution()
if current_resolution != self.previous_resolution:
self.build_uv_overlay()
self.previous_resolution = current_resolution
def on_window_changed(self, *args):
"""Update aspect ratio and rebuild overlay when viewport window changes."""
if self.viewport_window is None:
return
settings = carb.settings.get_settings()
fill = self.viewport_window.viewport_api.fill_frame
if fill:
width = self.viewport_window.frame.computed_width + 8
height = self.viewport_window.height
else:
width, height = self.viewport_window.viewport_api.resolution
self._aspect_ratio = width / height
self.model = self.get_model()
carb.log_info("build_overlay")
self.build_uv_overlay()
def get_aspect_ratio_flip_threshold(self):
"""Get magic number for aspect ratio policy.
Aspect ratio policy doesn't seem to swap exactly when window_aspect_ratio == window_texture_aspect_ratio.
This is a hack that approximates where the policy changes.
"""
return self.get_aspect_ratio()*0.95
def build_uv_overlay(self, *args):
# Create a unique franme for our SceneView
with self.viewport_window.get_frame(self.ext_id):
with ui.ZStack():
# Create a default SceneView (it has a default camera-model)
self.scene_view = sc.SceneView()
with self.scene_view.scene:
if self.model.uv_enabled.as_bool:
ObjInfoManipulator(viewport_window=self.viewport_window, model=self.get_model())
# Register the SceneView with the Viewport to get projection and view updates
# This is control
# self.viewport_window.viewport_api.add_scene_view(self.scene_view)
# Build UV Menu button
with ui.VStack():
ui.Spacer()
with ui.HStack(height=0):
ui.Spacer()
self.uv_menu = UvMenu(self.model)
def get_aspect_ratio(self):
return self._aspect_ratio
def get_model(self):
return self.model
def __del__(self):
self.destroy()
def destroy(self):
if self.scene_view:
# Empty the SceneView of any elements it may have
self.scene_view.scene.clear()
# un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self.scene_view)
# Remove our references to these objects
self.viewport_window = None
self.scene_view = None
class UvMenu():
"""The popup uv menu"""
def __init__(self, model: UvModel):
self.model = model
self.button = ui.Button("Show Uv", height = 0, width = 0, mouse_pressed_fn=self.show_uv_menu,
style={"margin": 10, "padding": 5, "color": cl.white})
self.uv_menu = None
def on_group_check_changed(self, safe_area_group, model):
"""Enables/disables safe area groups
When a safe area checkbox state changes, all the widgets of the respective
group should be enabled/disabled.
Args:
safe_area_group (HStack): The safe area group to enable/disable
model (SimpleBoolModel): The safe group checkbox model.
"""
safe_area_group.enabled = model.as_bool
def show_uv_menu(self, x, y, button, modifier):
self.uv_menu = ui.Menu("Uv Option", width=200, height=100)
self.uv_menu.clear()
with self.uv_menu:
with ui.Frame(width=0, height=100):
with ui.HStack():
with ui.VStack():
ui.Label("Uv Option", alignment=ui.Alignment.LEFT, height=30)
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.uv_enabled)
# if not action_safe_group, the floatslider will not work
action_safe_group = ui.HStack(enabled=self.model.uv_enabled.as_bool)
callback = partial(self.on_group_check_changed, action_safe_group)
cb.model.add_value_changed_fn(callback)
with action_safe_group:
ui.Spacer(width=10)
ui.Label("uv viewer", alignment=ui.Alignment.TOP)
ui.Spacer(width=14)
with ui.VStack():
ui.FloatSlider(self.model.uv_size, width=100,
format="%.0f%%", min=0, max=100, step=1)
ui.Rectangle(name="ActionSwatch", height=5)
ui.Spacer()
self.uv_menu.show_at(x - self.uv_menu.width, y - self.uv_menu.height)
| 6,987 | Python | 38.480226 | 155 | 0.574782 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/uv_viewer.py | from omni.ui import scene as sc
from omni.ui_scene._scene import AbstractManipulatorItem
import omni.usd
from pxr import Tf
from pxr import Usd
from pxr import UsdGeom
import omni.ui as ui
from . import constants
import carb
class UvModel(sc.AbstractManipulatorModel):
"""
The model that track mesh's uv
"""
# Position needed for when we call item changed
class PositionItem(sc.AbstractManipulatorItem):
def __init__(self) -> None:
super().__init__()
self.value = [0,0,0]
def __init__(self) -> None:
super().__init__()
# Current select prim
self.prim = None
# Set Current path
self.current_path = ""
# update to hold position object created
self.position = UvModel.PositionItem()
# Save the UsdContext name
usd_context = self._get_context()
# Get the Menu item
self.uv_enabled = ui.SimpleBoolModel(True)
self.uv_size = ui.SimpleFloatModel(constants.DEFAULT_UI_PERCENTAGE, min=0, max=100)
# Track selection changes
self.events = usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
self._register_submodel_callbacks()
self._callbacks = []
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context()
def on_stage_event(self, event):
# if statement to only check when selection changed
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths:
# This turns off the manipulator when everything is deselected
self._item_changed(self.position)
self.current_path = ""
return
prim = stage.GetPrimAtPath(prim_paths[0])
self.prim = prim
self.current_path = prim_paths[0]
# Position is changed because new selected object has a different position
self._item_changed(self.position)
def get_item(self, indentifier: str) -> AbstractManipulatorItem:
if indentifier == "name":
return self.current_path
def _register_submodel_callbacks(self):
"""Register to listen to when any submodel values change."""
self.uv_enabled.add_value_changed_fn(self._model_changed)
self.uv_size.add_value_changed_fn(self._model_changed)
def _model_changed(self, model):
for callback in self._callbacks:
callback()
def add_model_changed_fn(self, callback):
self._callbacks.append(callback)
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe() | 3,090 | Python | 33.730337 | 91 | 0.609385 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/object_info_manipulator.py | import omni.kit.pipapi
omni.kit.pipapi.install("matplotlib==3.8.2")
omni.kit.pipapi.install("numpy==1.26.2")
omni.kit.pipapi.install("pycairo==1.25.1")
from PIL import Image, ImageDraw
from pxr import Usd,Gf,UsdGeom,Vt
from omni.ui import scene as sc
import omni.ui as ui
from omni.ui import color as cl
import carb
from omni.ui_scene._scene import Color4
import omni.usd
import matplotlib.pyplot as plt
import numpy as np
import cairo
from .uv_viewer import UvModel
import time
import os
from pathlib import Path
class ObjInfoManipulator(sc.Manipulator):
"""
Manipulator that display the object uv right next to the object
"""
def __init__(self, viewport_window, model,**kwargs) -> None:
super().__init__(**kwargs)
# Build Cache for the UV data
self.cache = {}
self.vp_win = viewport_window
resolution = self.vp_win.viewport_api.get_texture_resolution()
self._aspect_ratio = resolution[0] / resolution[1]
self._width = resolution[0]
self._height = resolution[1]
if model is None:
self.model = UvModel() # Initialize with UvModel() if model is not provided
else:
self.model = model # Use the provided model if it's given
levels_to_go_up = 3
script_directory = Path(__file__).resolve()
script_directory = script_directory.parents[levels_to_go_up]
#script_directory = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
uv_path = "data/output.png"
uv_background = "data/uv_background.jpg"
self.file_path = os.path.join(script_directory,uv_path)
self.uv_background = os.path.join(script_directory,uv_background)
def on_build(self) -> None:
"""Called when the model is changed and rebuilds the whole manipulator"""
initial_mtime = os.path.getmtime(self.file_path)
aspect_ratio = self.get_aspect_ratio()
width = self.get_width()
height = self.get_height()
inverse_ratio = 1 / aspect_ratio
# Get uv_size, by default it is 50 from model file
uv_size = self.model.uv_size.as_float
if not self.model:
return
# If we don't have selection then just return
if self.model.get_item("name") == "":
return
# Track aspect ratio to determine where to place the uv graph
if width>height:
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.01*uv_size,0.01*uv_size,0.6)
transform = move*rotate*scale
with sc.Transform(transform):
with sc.Transform(sc.Matrix44.get_translation_matrix(0.5,0.5,0)):
self._build_safe_rect()
self._build_axis()
self._build_uv()
# Build Uv and save to disk
time.sleep(0.15)
current_mtime = os.path.getmtime(self.file_path)
# Compare current modification time with the initial one
if current_mtime != initial_mtime:
carb.log_warn(current_mtime)
carb.log_warn(initial_mtime)
# File has been updated, call _show_uv()
self._show_uv()
# Update the initial_mtime to the current modification time
initial_mtime = current_mtime
else :
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.5,0.5,0.5)
transform = move*rotate*scale
with sc.Transform(transform):
with sc.Transform(sc.Matrix44.get_translation_matrix(0.5,0.5,0)):
self._build_safe_rect()
self._build_axis()
self._build_uv()
# Build Uv and save to disk
time.sleep(0.15)
current_mtime = os.path.getmtime(self.file_path)
# Compare current modification time with the initial one
if current_mtime != initial_mtime:
carb.log_warn(current_mtime)
carb.log_warn(initial_mtime)
# File has been updated, call _show_uv()
self._show_uv()
# Update the initial_mtime to the current modification time
initial_mtime = current_mtime
# Check if uv png is new
def is_file_updated(file_path, reference_time):
file_stat = os.stat(file_path)
file_modification_time = time.localtime(file_stat.st_mtime)
return file_modification_time > reference_time
def _build_uv(self):
# Get the object's path as a unique key
object_path = self.model.get_item('name')
# Check if object information is already in the cache
if object_path in self.cache:
object_info = self.cache[object_path]
carb.log_warn("uv info in cache")
else:
carb.log_warn("uv info not in cache")
# Gather information
stage = omni.usd.get_context().get_stage()
mesh = stage.GetPrimAtPath(self.model.get_item('name'))
if mesh == None:
return
if mesh.GetTypeName() != "Mesh":
carb.log_error("PLEASE SELECT A MESH")
return
else:
st = mesh.GetAttribute("primvars:st").Get()
st_indices = []
if mesh.GetAttribute("primvars:st:indices"):
st_indices = mesh.GetAttribute("primvars:st:indices").Get()
uv_coordinates = st
vertex_indices = list(range(len(uv_coordinates)))
vertex_counts = mesh.GetAttribute("faceVertexCounts").Get()
# Create a UV Mesh with UV Faces
if st_indices:
uv_mesh = np.array([uv_coordinates[i] for i in st_indices])
else:
uv_mesh = np.array([uv_coordinates[i] for i in vertex_indices])
# Initialize object_info dictionary
object_info = {
"uv_mesh": uv_mesh,
"vertex_counts": vertex_counts,
# Add any other information needed for self._draw_uv_line here
}
# Store the object information in the cache
self.cache[object_path] = object_info
# Retrieve the required information from the cached object_info dictionary
uv_mesh = object_info["uv_mesh"]
vertex_counts = object_info["vertex_counts"]
# initial count, will plus count number in vertex_counts
current_index = 0
# for debug only
loop_counter = 0
width, height = 512, 512
# image = Image.new("RGB", (width, height), (0, 0, 0))
# draw = ImageDraw.Draw(image)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
# Set background color
ctx.set_source_rgba(0, 0, 0, 0.8)
ctx.paint()
# Set the line width for thinner lines (adjust as needed)
line_width = 0.7 # You can change this value to make the lines thinner or thicker
ctx.set_line_width(line_width)
for count in vertex_counts:
uv_face = uv_mesh[current_index:current_index+count]
pixel_uv_face = [(uv[0] * width, uv[1] * height) for uv in uv_face]
ctx.set_source_rgb(0.6, 0.6, 0.6) # Light gray outline
ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD) # Filling rule
ctx.move_to(*pixel_uv_face[0])
for point in pixel_uv_face[1:]:
ctx.line_to(*point)
ctx.close_path()
ctx.stroke()
ctx.set_source_rgb(0.2, 0.2, 0.2) # Dark gray fill
ctx.fill()
current_index += count
# for debug only, test for loop counts
loop_counter += 1
if loop_counter >= 1000000:
break
# image.save("D:/Amazon_Box_Stable_Diffusion/HoudiniUV/UV_Viewer_Extension/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/data/output.png")
surface.write_to_png(self.file_path)
def _show_uv(self):
point_count = 4
# Form the mesh data
alpha = 0.9
points = [[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]]
vertex_indices = [0, 1, 2, 3]
colors = [[1, 1, 1, alpha], [1, 1, 1, alpha], [1, 1, 1, alpha], [1, 1, 1, alpha]]
uvs = [[0, 0], [0, 1], [1, 1], [1, 0]]
# Draw the mesh
uv_background = self.uv_background
filename = self.file_path
sc.TexturedMesh(uv_background, uvs, points, colors, [point_count], vertex_indices)
sc.TexturedMesh(filename, uvs, points, colors, [point_count], vertex_indices)
def display_previous_show_uv(self):
aspect_ratio = self.get_aspect_ratio()
width = self.get_width()
height = self.get_height()
inverse_ratio = 1 / aspect_ratio
if width>height:
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.6,0.6,0.6)
transform = move*rotate*scale
with sc.Transform(transform):
self._show_uv()
"""Main Function to draw UV directly in Omniverse"""
"""Depreciate due to performance issue"""
def _draw_uv_line(self, point_count, points):
# point_count = 3
# points = [[0,0,0],[0,0.5,0],[0.5,0.5,0]]
vertex_indices = []
colors = []
for i in range(point_count):
vertex_indices.append(i)
colors.append([0.5, 0.5, 0.5, 1])
# This will create a new list to append the first element to the list and form a closed line
line_points = points + [points[0]]
# Draw UV
sc.PolygonMesh(points, colors, [point_count], vertex_indices)
sc.Curve(
line_points,
thicknesses=[0.2],
colors=[0.0, 0.0, 0.0, 1],
curve_type=sc.Curve.CurveType.LINEAR,
)
# Draw a rect in 1:1 to show the UV block
def _build_safe_rect(self):
"""Build the scene ui graphics for the safe area rectangle
Args:
percentage (float): The 0-1 percentage the render target that the rectangle should fill.
color: The color to draw the rectangle wireframe with.
"""
transparent_black = (0, 0, 0, 0.1)
sc.Rectangle(1, 1, thickness=1, wireframe=False, color=transparent_black)
def _build_axis(self):
# grid represent 0-1
sc.Line([0,0,1], [0, 1, 1], thicknesses=[5.0], color=cl.red)
sc.Line([0,0,1], [1, 0, 1], thicknesses=[5.0], color=cl.red)
def get_aspect_ratio(self):
"""Get the aspect ratio of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._aspect_ratio
def get_width(self):
"""Get the width of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._width
def get_height(self):
"""Get the height of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._height
def on_model_updated(self, item):
# Regenerate the manipulator
self.invalidate()
"""Test Function"""
def __example_draw_shape(self):
point_count = 6
points = [[0,0,0],[0,0.5,0],[0.5,0.5,0],[0.8,0,0],[0.8,0.5,0],[0.8,0.7,0]]
vertex_indices = []
sizes = []
colors = []
for i in range(point_count):
weight = i / point_count
vertex_indices.append(i)
colors.append([weight, 1 - weight, 1, 1])
print(vertex_indices)
sc.PolygonMesh(points, colors, [point_count], vertex_indices)
#pass | 12,430 | Python | 36.784194 | 153 | 0.556637 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/tests/Test.py | import matplotlib.pyplot as plt
import numpy as np
# Your provided data
st_values = [(-6.121573, -0.53302103), (-0.68017477, -0.53302103), (-4.7612233, -0.53302103),
(0.68017477, -0.53302103), (-0.68017477, 0.53302103), (0.68017477, 0.53302103),
(-2.0405242, -0.53302103), (-3.4008737, -0.53302103), (2.0405242, -0.53302103),
(-2.0405242, 0.53302103), (-3.4008737, 0.53302103), (2.0405242, 0.53302103)]
st_indices = [1, 3, 5, 4, 6, 9, 10, 7, 9, 4, 5, 11, 6, 7, 2, 0, 6, 1, 4, 9, 8, 11, 5, 3]
# Reshape the indices into pairs of (u, v) coordinates
uv_indices = np.array(st_indices).reshape(-1, 2)
# Extract coordinates based on indices
mapped_coordinates = [st_values[i] for i in uv_indices.flatten()]
# Convert to NumPy array for easier manipulation
mapped_coordinates = np.array(mapped_coordinates)
# Extract u and v coordinates
u_coords, v_coords = mapped_coordinates[:, 0], mapped_coordinates[:, 1]
# Plot the UV mapping
plt.scatter(u_coords, v_coords, marker='o', label='UV Mapping')
plt.title('UV Mapping')
plt.xlabel('U Coordinate')
plt.ylabel('V Coordinate')
plt.legend()
plt.show()
| 1,142 | Python | 32.617646 | 93 | 0.658494 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/docs/README.md | # UV viwer Extension [com.soliptionpictures.hunter]
This is a simple extension to show UV of selected mesh
This Extension is heavily inspried by Camera Reticle Extension
I create this entension mainly to check if my UV is right since Omniverse does not natively support UV check
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go into: Extension Manager -> Gear Icon -> Extension Search Path
2. Add Link to the path: ../uv_viewer_extension/kit-exts-uv_viewer/exts
| 496 | Markdown | 40.416663 | 108 | 0.782258 |
barakooda/Nvidia-Test-Barak-Koren/README.md | Nvidia test for Senior Solution Engineer.
Omniverse Test
This test is split into 3 subtasks:
1. Rotating cube with texture:
Create an extension in omniverse that opens a window loads a texture and displays it on a 3d rotating cube.
Useful links:
a. Quick example on how to create an extension: Build an Omniverse Extension in less than 10 Minutes | NVIDIA On-Demand
b. Scene and UI Manipulator Extensions - Samples & Tutorials on Github
c. Extensions: Extensions documentation -NVIDIA-
2. Inference Model
Add the MNIST model to your extension now you will load the texture (a handwritten letter/character) and you will display the character on the cube
Useful links:
a. MNIST https://github.com/pytorch/examples/tree/main/mnist
b. Inference https://docs.omniverse.nvidia.com/extensions/latest/ext_inference.html
3. Draw with mouse:
Add to the extension window a widget that can draw with the mouse a letter/character and display it on the cube.
| 950 | Markdown | 46.549998 | 147 | 0.795789 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/common.py |
import os
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODEL_PATH = os.path.join(root_path, "data", "mnist_cnn.pt")
OUTPUT_PATH = os.path.join(root_path, "data", "predicted_label_image.png")
TEXTURE_SIZE = 256
TXT_SIZE = 180
BLACK_COLOR = [0, 0, 0, 255] | 284 | Python | 24.909089 | 74 | 0.683099 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/extension.py | import os
import omni.ext
import omni.ui as ui
import omni.kit.commands
from pxr import Sdf,UsdShade,UsdGeom
from .img2txt2img import img2txt2img
from .circle import draw_circle,draw_circle_optimized
from PIL import Image
import numpy as np
from .bug_fixes import fix_cube_uv
from .utils import wait_for_stage
from .common import OUTPUT_PATH,MODEL_PATH,TEXTURE_SIZE,root_path
import omni.timeline as timeline
#na_vi_da_test
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Na_vi_da_testExtension(omni.ext.IExt):
def set_rotation_for_cube(self)->None:
cube = self.stage.GetPrimAtPath(self.cube_path)
# Obtain xformable interface
xformable = UsdGeom.Xformable(cube)
# Create rotation attribute for keyframing
rotation_attr = xformable.AddRotateYOp().GetAttr()
# Set keyframes
self.stage.SetStartTimeCode(1)
time_start = self.stage.GetStartTimeCode()
time_end = self.stage.GetEndTimeCode()
num_frames = time_end - time_start
rotation_attr.Set(0, time_start)
rotation_attr.Set(360 - (num_frames / 360 ), 100)
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def click_spwan_cube(self):
self.spwan_cube()
self.set_rotation_for_cube()
timeline.get_timeline_interface().play()
def click_load_image(self):
image_path = self.image_path_string_field_model.get_value_as_string()
img2txt2img(MODEL_PATH, image_path, OUTPUT_PATH)
shader = UsdShade.Shader.Define(self.stage, f'/World/Looks/OmniPBR/Shader')
shader.GetInput('diffuse_texture').Set(OUTPUT_PATH)
def click_reset(self):
self.clear_all()
def _on_mouse_pressed(self, x, y, key):
image_pos_x=self._image.screen_position_x
image_pos_y=self._image.screen_position_y
x = int(x - image_pos_x)
y = int(y - image_pos_y)
self.image_data = draw_circle_optimized(self.image_data, x, y, 8)
self.image_data_np = self.image_data.data
self.provider.set_data_array(self.image_data_np, self.image_data_size)
def clear_image(self):
self.image_data.fill(255)
self.image_data_size = self.image_data.shape[:2]
self.image_data_np = self.image_data.data
self.provider.set_data_array(self.image_data_np, self.image_data_size)
print("clear")
def spwan_cube(self):
omni.kit.commands.execute('cl')
self.cube_path = omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',prim_type='Cube')[1]
print("##########",self.cube_path)
self.stage = wait_for_stage()
cube = self.stage.GetPrimAtPath(self.cube_path)
fix_cube_uv(cube)
self.mat = omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary',
mdl_name='OmniPBR.mdl',
mtl_name='OmniPBR',
mtl_created_list=['/World/Looks/OmniPBR'],
bind_selected_prims=[])
print("##########",self.mat)
omni.kit.commands.execute('BindMaterial',
material_path='/World/Looks/OmniPBR',
prim_path=['/World/Cube'],
strength=['weakerThanDescendants'])
self.material = UsdShade.Material.Get(self.stage, '/World/Looks/OmniPBR')
print("Cube Spwaned")
def clear_all(self)->None:
omni.kit.commands.execute('DeletePrims',
paths=[Sdf.Path('/World/Cube')],
destructive=False)
omni.kit.commands.execute('DeletePrims',
paths=[Sdf.Path('/World/Looks')],
destructive=False)
def on_shutdown(self):
print("[na_vi_da_test] na_vi_da_test shutdown")
def submit_drwaing(self):
image_path = OUTPUT_PATH
img = Image.fromarray(self.image_data,mode="RGBA")
img.save(image_path, "PNG")
img2txt2img(MODEL_PATH, image_path, OUTPUT_PATH,invert_image=True)
shader = UsdShade.Shader.Define(self.stage, f'/World/Looks/OmniPBR/Shader')
shader.GetInput('diffuse_texture').Set(OUTPUT_PATH)
def build_window(self):
with self._window.frame:
with ui.VStack():
with ui.HStack(height=ui.Percent(5)):
ui.Button("Spwan Cube", clicked_fn=self.click_spwan_cube,width=64,height=64)
ui.Button("Reset", clicked_fn=self.click_reset,width=64,height=64)
with ui.VStack(height=ui.Percent(20)):
with ui.CollapsableFrame("By Image Path"):
with ui.VStack():
ui.Button("Load Image From Path", clicked_fn=self.click_load_image)
ui.Label("Image Path:")
self.image_path_string_field_model = ui.StringField().model
with ui.VStack(height=ui.Percent(75)):
with ui.CollapsableFrame("By Drawing"):
with ui.VStack():
with ui.HStack(height=ui.Percent(5)):
ui.Button("Submit",width=32,height=16, clicked_fn=self.submit_drwaing)
ui.Button("Clear",width=32,height=16, clicked_fn=self.clear_image)
self._image = ui.ImageWithProvider(
self.provider,
width=TEXTURE_SIZE,
height=TEXTURE_SIZE,
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT)
self._image.set_mouse_moved_fn(lambda x, y, b, m: self._on_mouse_pressed(x,y,b))
self._image.set_mouse_pressed_fn(lambda x, y, b, m: self._on_mouse_pressed(x,y,b))
def on_startup(self, ext_id):
self.image_path_string_field_model = None
self.image_data = np.ones((TEXTURE_SIZE, TEXTURE_SIZE, 4), dtype=np.uint8) * 255
self.image_data_size = self.image_data.shape[:2]
self.image_data_np = self.image_data.data
self.provider = ui.ByteImageProvider()
self.provider.set_data_array(self.image_data_np, self.image_data_size)
self._window = ui.Window("Textured Cube", width=512, height=512)
self.build_window() | 6,971 | Python | 39.77193 | 119 | 0.585999 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/bug_fixes.py |
from pxr import UsdGeom, Vt
NEW_UVS = Vt.Vec2fArray([
(0, 0), (1, 0), (1, 1), (0, 1),
(1, 0), (1, 1), (0, 1), (0, 0),
(1, 1), (0, 1), (0, 0), (1, 0),
(0, 1), (0, 0), (1, 0), (1, 1),
(0, 0), (1, 0), (1, 1), (0, 1),
(1, 0), (1, 1), (0, 1), (0, 0)])
def fix_cube_uv(cube):
mesh = UsdGeom.Mesh(cube)
uv_attr = mesh.GetPrimvar('st')
uv_primvar = UsdGeom.Primvar(uv_attr)
uv_primvar.Set(NEW_UVS)
| 501 | Python | 22.904761 | 44 | 0.383234 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/img2txt2img.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import TEXTURE_SIZE,TXT_SIZE
from PIL import Image, ImageDraw, ImageFont, ImageOps
import torchvision.transforms as transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def img2txt2img(model_path:str,image_path:str,output_path:str,invert_image:bool=False):
# Initialize the model
model = Net()
# Load the model
model.load_state_dict(torch.load(model_path))
# Set the model to evaluation mode
model.eval()
#input image
input_image = Image.open(image_path)
input_image = input_image.resize((28, 28))
input_image = input_image.split()[0]
if invert_image:
input_image = ImageOps.invert(input_image)
# Define the transformation
transform = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # Normalization values used during training
])
# Apply the transformations to the image
input_image = transform(input_image)
# Add a batch dimension
input_image = input_image.unsqueeze(0) # shape becomes [1, 1, 28, 28]
# Perform inference
with torch.no_grad(): # Deactivate gradients for the following block
output = model(input_image)
# Get the predicted label
_, predicted_label = torch.max(output, 1)
predicted_txt = predicted_label.item()
#print("Predicted label:", predicted_txt)
# Create an image with white background
width, height = TEXTURE_SIZE, TEXTURE_SIZE
image = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(image)
# Load a font
# You might have to download a specific font or use one that's available on your system
try:
font = ImageFont.truetype("arial.ttf", TXT_SIZE)
except IOError:
font = ImageFont.load_default()
# Calculate text size to center it
text = str(predicted_txt) # Assuming `predicted_label` is a PyTorch tensor containing the label
text_width, text_height = draw.textsize(text, font=font)
text_x = width / 2 - text_width / 2
text_y = height / 2 - text_height / 2
# Add text to image
draw.text((text_x, text_y), text, font=font, fill="black")
# Save or show image
image.save(output_path) | 2,991 | Python | 28.92 | 100 | 0.626881 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/utils.py | import time
from pxr import Sdf,Usd,UsdUtils
def wait_for_stage(timeout=10) -> Usd.Stage:
end_time = time.time() + timeout
while time.time() < end_time:
stages = UsdUtils.StageCache.Get().GetAllStages()
if stages:
return stages[0]
time.sleep(0.1) # Sleep for 100 milliseconds before checking again
return None | 361 | Python | 26.846152 | 75 | 0.648199 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/circle.py | import numpy as np
from .common import TEXTURE_SIZE,BLACK_COLOR
def draw_circle(image_data, coord_x, coord_y, radius)->np.ndarray:
# Create grids for x and y coordinates
y, x = np.ogrid[0:TEXTURE_SIZE, 0:TEXTURE_SIZE]
# Calculate the distance to the center for each point
distance_to_center = (x - coord_x)**2 + (y - coord_y)**2
# Identify the points within the circle
circle_points = distance_to_center <= radius**2
# Create the circle image with the same initial data as image_data
circle_image = np.copy(image_data)
# Draw the circle in black (setting it to [0, 0, 0, 255])
circle_image[circle_points] = [0, 0, 0, 255]
# Superimpose the circle onto the existing image data
image_data = np.where(circle_image == [0, 0, 0, 255], circle_image, image_data)
return image_data
def draw_circle_optimized(image_data, coord_x, coord_y, radius)->np.ndarray:
# Determine the bounding box of the circle.
box_left = max(0, coord_x - radius)
box_right = min(TEXTURE_SIZE, coord_x + radius)
box_top = max(0, coord_y - radius)
box_bottom = min(TEXTURE_SIZE, coord_y + radius)
# Create a coordinate grid for the bounding box.
y, x = np.ogrid[box_top:box_bottom, box_left:box_right]
# Use the circle equation to create a mask for that region.
circle_mask = (x - coord_x)**2 + (y - coord_y)**2 <= radius**2
# Update only the pixels in that region where the mask is True.
image_data[box_top:box_bottom, box_left:box_right][circle_mask] = BLACK_COLOR
return image_data | 1,578 | Python | 34.088888 | 83 | 0.664132 |
RPL-CS-UCL/IsaacSyntheticPerception/README.md |
# Isaac Synthetic Perception Data Generator
<img src="https://github.com/RPL-CS-UCL/IsaacSyntheticPerception/blob/main/docs/img/rpl_logo.png" width="250">
# Introduction
This data generator uses the photo-realistic NVIDIA Isaac Simulator to gather sensor data in procedurally created environments.
The system is built to use a SensorRig (that can be extended) that holds different types of sensors (RGB, RGB-D, LiDAR, IMU, US, and contact sensors). The Rig also captures semantic, object detection and semantic instances as ground truths with the data. This SensorRig can move through the environments through multiple methods (waypoints, velocity API). The Rig captures data based on the user's parameters. This allows the user to manually snapshot data, snapshot at points of interest, or at a set sample frequency.
The procedural generations use different layers of noise to create heightmaps, biomes, and specific object placement. These parameters can be customised to produce different and repeatable environments.
The major advantage of this system is that data can be generated for specific use cases saving space and ignoring obsolete data. Large datasets do not need to be downloaded from slow repositories. Data can be generated to have ideal class balances and can be optimised in a pipeline to generate data on the fly for poor-performing classes in ML models.
# Getting Started
## Installations
```
./python.sh -m pip install faiss-gpu, opencv-python, scikit-image, timm, fast_pytorch_keymeans, pytorch_metric_learning, kornia
```
### ToDO:
Figure out how to install `pydensecrf` in Isaac's python
## Isaac Extension
Open IsaacSim, and enable the FrankaCopycat extension.
# Using the Sensor Rig
## What is it
The SensorRig is a custom class that creates, manages, moves, and samples an array of sensors automatically and based on user parameters. For example; you can place cameras at certain positions on the sensor rig. You can then have it then follow user defined waypoints and sample at certain intervals (all within the UI).
## How to use it
There are a few key things to note...
## How to expand it.
...
## SensorRig base options
### Sensors
Custom array of sensors, their positions and orientations
### Movement system
Movement type (velocity, waypoint, human control)
Move speed
#### waypoint
load from file
initialise from scene (build when in the scene)
save waypoints
| 2,415 | Markdown | 43.74074 | 519 | 0.785921 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
| 178 | Python | 16.899998 | 36 | 0.657303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/synthetic_perception.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
try:
import omni.isaac.version as v
VERSION = v.get_version()[0]
except:
VERSION = '2021'
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from pxr import Usd, Gf
from .PCG import AreaMaskGenerator
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core.utils.semantics import get_semantics
import omni
import omni.kit.commands
import omni.timeline
from omni.isaac.core.utils.prims import get_prim_at_path # , get_prim_property
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import numpy as np
from .sensors import Lidar, DepthCamera, SensorRig
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.core import World
import omni.appwindow # Contains handle to keyboard
import carb
from omni.isaac.core.utils.stage import (
add_reference_to_stage,
is_stage_loading,
update_stage_async,
update_stage,
)
from pxr import UsdShade, Sdf
import omni.kit.commands
# from omni import usd._usd
from pxr import Sdf
import omni.physx
from omni.physx import get_physx_scene_query_interface
class SyntheticPerception(BaseSample):
"""
Main class
"""
# pylint: disable=too-many-instance-attributes
# Big class requires lots of attrs.
def __init__(self) -> None:
super().__init__()
self.__created_objs = []
self.save_count = 0
self.obstacles = []
self.__undefined_class_string = 'undef'
self.sr = SensorRig('SensorRig', '/World')
self._event_flag = False
self._o = "[SyntheticPerception] "
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
'NUMPAD_8': [1.5, 0.0, 0.0],
'UP': [1.5, 0.0, 0.0],
# back command
'NUMPAD_2': [-1.5, 0.0, 0.0],
'DOWN': [-1.5, 0.0, 0.0],
# left command
'NUMPAD_6': [0.0, -1.0, 0.0],
'RIGHT': [0.0, -1.0, 0.0],
# right command
'NUMPAD_4': [0.0, 1.0, 0.0],
'LEFT': [0.0, 1.0, 0.0],
# yaw command (positive)
'NUMPAD_7': [0.0, 0.0, 1.0],
'N': [0.0, 0.0, 1.0],
# yaw command (negative)
'NUMPAD_9': [0.0, 0.0, -1.0],
'M': [0.0, 0.0, -1.0],
}
def _sub_keyboard_event(self, event, *args, **kwargs):
self._event_flag = False
# when a key is pressedor released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._input_keyboard_mapping:
self.sr.apply_veloc(
self._input_keyboard_mapping[event.input.name]
)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self.sr.apply_veloc([0, 0, 0])
# print(self._input_keyboard_mapping[event.input.name])
return True
def force_reload(self):
self._world.initialize_physics()
self.setup_scene()
async def _on_load_world_async(self):
await omni.kit.app.get_app().next_update_async()
# print('[company.hello.world] company hello world startup')
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
# await self._world.reset_async()
async def load_sample(self) -> None:
"""Function called when clicking load buttton"""
if World.instance() is None:
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = World.instance()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load()
def setup_scene(self):
self.world = self.get_world()
async def init_world(self) -> None:
if World.instance() is None:
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = World.instance()
await self._world.reset_async()
await self._world.pause_async()
self.world_cleanup()
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.timeline = omni.timeline.get_timeline_interface()
self._world_settings = {
'physics_dt': 1.0 / 60.0,
'stage_units_in_meters': 1.0,
'rendering_dt': 1.0 / 60.0,
}
self._appwindow = omni.appwindow.get_default_app_window()
print('The world is initialized.')
async def setup_post_load(self):
self._world_settings = {
'physics_dt': 1.0 / 60.0,
'stage_units_in_meters': 1.0,
'rendering_dt': 1.0 / 60.0,
}
# self.init_sensor_and_semantics()
# self.init_sensor_rig()
# print('Aquiring keyboard interface')
# self._appwindow = omni.appwindow.get_default_app_window()
# self._input = carb.input.acquire_input_interface()
# self._keyboard = self._appwindow.get_keyboard()
# self._sub_keyboard = self._input.subscribe_to_keyboard_events(
# self._keyboard, self._sub_keyboard_event
# )
def remove_all_objects(self):
for i in reversed(range(len(self.__created_objs))):
try:
self._world.scene.remove_object(self.__created_objs[i])
except:
pass # already deleted from world
del self.__created_objs[i]
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists('sim_step'):
world.remove_physics_callback('sim_step')
if world.physics_callback_exists('sim_timestep'):
world.remove_physics_callback('sim_timestep')
stage = omni.usd.get_context().get_stage()
print('Pre rest setup over')
# self.sr.initialize_waypoints('', stage)
def world_cleanup(self):
self.remove_all_objects()
def init_semantics_in_scene(self):
self.stage = omni.usd.get_context().get_stage()
print(f"{self._o} Adding semantics to scene. Please wait until complete... ... ... ")
self.__add_semantics_to_all2(self.stage)
print(f"{self._o} All semantics added to scene. Complete.")
def init_sensor_and_semantics(self):
"""Initializes sensors and the replicator package"""
self.world_cleanup()
stage = omni.usd.get_context().get_stage()
# self.__sensor = Lidar()
self.__add_semantics_to_all(stage)
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.timeline = omni.timeline.get_timeline_interface()
def add_semantic(self, p, prim_class):
"""Adds semantic to prim"""
sem_dict = get_semantics(p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(p)
if 'Semantics' not in sem_dict:
# print(
# 'adding semantics and collider to ',
# p.GetPrimPath(),
# ' of class ',
# prim_class,
# )
sem = Semantics.SemanticsAPI.Apply(p, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(prim_class)
def __add_semantics_to_all2(self, stage):
"""Add semantic information to all prims on stage based on parent xform"""
prim_class = self.__undefined_class_string
completed_classes = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
len_of_prim = len(prim_ref_name.split('/'))
for word in prim_ref_name.split('/'):
if 'class' in word and word not in completed_classes:
prim_class = word
# self.add_semantic(prim_ref, prim_class)
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
len_of_child = len(
str(prim_child.GetPrimPath()).split('/')
)
# print(len_of_prim, ' : ', len_of_child)
if abs(len_of_prim - len_of_child) == 1:
# print(prim_child)
self.add_semantic(prim_child, prim_class)
completed_classes.append(prim_class)
def init_sensor_rig(self):
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
"""Initializes the sensor rig and adds individual sensors"""
self.sr.create_rig(
np.array([0, 5, 0]), np.asarray([1, 1, 1, 1]), self.stage
)
# self.sr.add_depth_camera_to_rig( (0, 0, 0), (0, 0, 0), (512, 512), True,"DepthCamera")
self.sr.add_sensor_to_rig(DepthCamera(name='depthcam2'))
self.sr.add_sensor_to_rig(Lidar(path='coolLidar'))
def init_sensor_rig_from_file(self, path,out_path):
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.sr.create_rig_from_file(path, self.stage, self._world)
self.sr.setup_sensor_output_path(out_path)
def sample_sensors(self):
self.sr.sample_sensors()
def attach_sensor_waypoint_callback(self, srx):
# print(self.get_world())
# print(self._world)
#
# self._world = World.instance()
# self.get_world().initialize()
# un comment to enalbe wAYPOINT
# self.get_world().add_physics_callback('sim_step', callback_fn=srx.move)
self._world.add_physics_callback('sim_step', callback_fn=srx.move)
def attach_sensor_sample_callback(self):
# un comment to enalbe wAYPOINT
self.get_world().add_physics_callback('sim_sample_step', callback_fn=self.sr.sample_sensors)
# def spawn_asset(
# self,
# asset_path,
# class_name,
# prim_name,
# x,
# y,
# z,
# scale,
# object_scale_delta,
# allow_rot,
# orign_p_name = "",
# override=False,
# rot = (0,0,0),
# ):
#
# prim_path = '/World/' + 'class_' + class_name + '/' + prim_name
#
# # if not override:
# add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
#
# stage = omni.usd.get_context().get_stage()
# prim = stage.GetPrimAtPath(prim_path)
# # prim.GetReferences().AddReference(assetPath=asset_path, primPath=prim_path)
# prim.SetInstanceable(True)
#
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
# sem.CreateSemanticTypeAttr()
# sem.CreateSemanticDataAttr()
# sem.GetSemanticTypeAttr().Set('class')
# sem.GetSemanticDataAttr().Set(class_name)
#
#
# # omni.kit.commands.execute('CopyPrim',
# # path_from=orign_p_name,
# # path_to=prim_path,
# # duplicate_layers=False,
# # combine_layers=False,
# # exclusive_select=False,
# # flatten_references=False,
# # copy_to_introducing_layer=False)
# # here we want to modify the scale
# low_lim = scale - object_scale_delta
# high_lim = scale + object_scale_delta
# scale = random.uniform(low_lim, high_lim) #/ 100
#
# random_rotation = 0
# if allow_rot:
# random_rotation = random.uniform(0, 360)
#
#
# # omni.kit.commands.execute('CreatePayloadCommand',
# # usd_context=omni.usd.get_context(),
# # path_to=Sdf.Path(prim_path),
# # asset_path=asset_path,
# # instanceable=True)
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=prim_path, # f"/World/{p_name}",
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(0, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=prim_path, # f"/World/{p_name}",
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(0, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
#
# def spawn_loop(
# self,
# path,
# class_name,
# p_name,
# coll,
# height_map,
# scale=1,
# object_scale_delta=0,
# allow_rot=True,
# ):
#
# for i, n in enumerate(coll):
# override=False
# # if i == 1:
# #
# # prim_path = '/World/' + 'class_' + class_name + '/' + p_name
# #
# # add_reference_to_stage(usd_path=path, prim_path=prim_path)
# #
# # override=True
#
# x, y = n
# x = float(x)
# y = float(y)
# mesh_scale = 10
# x_ind = x * mesh_scale
# y_ind = y * mesh_scale
# mesh_height_modifier = 10
# # if x_ind >= 2560:
# # print('x, overfilled', x_ind)
# # x_ind = 2559
# # if y_ind >= 2560:
# #
# # print('y, overfilled', y_ind)
# # y_ind = 2559
# z = float(height_map[int(y_ind/10)][int(x_ind/10)])# / mesh_height_modifier # was abs
#
# cc =(int(y_ind/10),int(x_ind/10) )
# ind = np.ravel_multi_index(cc, (len(height_map), len(height_map)))
# # print(np.asarray(self.t_normals))
# poss_rot = np.asarray(self.t_normals)[ind]
# # print("triangle normals")
# # print(poss_rot)
# # second one is iterated fasted
# if self.occupancy[int(y_ind/10)][int(x_ind/10)] != 0:
# # print("skipping oj spawn")
# continue
#
# self.occupancy[int(y_ind/10)][int(x_ind/10)]= 1
# _p_name = f'{p_name}_{i}'
# self.spawn_asset(
# path,
# class_name,
# _p_name,
# x,
# y,
# z,
# scale,
# object_scale_delta,
# allow_rot,
# override = override,
# orign_p_name = p_name,
# rot = poss_rot
# )
#
# def create_terrains(self, terrain_info):
#
# # create the parent
#
# omni.kit.commands.execute(
# 'CreatePrimWithDefaultXform',
# prim_type='Xform',
# prim_path='/World/t',
# attributes={},
# select_new_prim=True,
# )
#
# for key in terrain_info:
# mesh_path = terrain_info[key].mesh_path
# scale = terrain_info[key].scale
# mat_path = terrain_info[key].material_path
# mat_name = mat_path.split('/')[-1]
# mat_name = mat_name.replace('.mdl', '')
# mesh_path = mesh_path.replace('.obj', '.usd')
# # spawn prim
#
# prim_p = f'/World/t/class_{mat_name}'
# # prim_p = f'/World/t/terrain{key}'
#
# stage = omni.usd.get_context().get_stage()
# scale = 1#0.01
# # X SCALE SHOULD BE NEGATIVE TO FLIP IT CORRECTLY
# random_rotation = 0.0
# x, y, z = 0, 0, 0
# add_reference_to_stage(usd_path=mesh_path, prim_path=prim_p)
# self.create_material_and_bind(
# mat_name, mat_path, prim_p, scale, stage
# )
# prim=stage.GetPrimAtPath(prim_p)
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
# sem.CreateSemanticTypeAttr()
# sem.CreateSemanticDataAttr()
# sem.GetSemanticTypeAttr().Set('class')
# sem.GetSemanticDataAttr().Set(mat_name)
#
# scale = 1#0.1
# random_rotation = 0.0
# x, y, z = 0, 0, 0
# # stage = self.usd_context.get_stage()
#
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=f'/World/t',
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# # old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# # old_rotation_order=Gf.Vec3i(0, 1, 2),
# # new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# # new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=f'/World/t',
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# # old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# # old_rotation_order=Gf.Vec3i(0, 1, 2),
# # new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# # new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
#
# async def spawn_all(self, obs_to_spawn, object_dict, height_map, normals):
# self.t_normals = normals
# length = len(obs_to_spawn)
# counter = 1
# for key in obs_to_spawn:
#
# obj = object_dict[key]
# path = object_dict[key].usd_path
# print(f"{self._o} Spawning {len(obs_to_spawn[key])} of {path}. {counter} / {length}")
# class_name = obj.class_name
# if class_name == '':
# class_name = obj.unique_id
# self.spawn_loop(
# path,
# class_name,
# f'{obj.unique_id}_',
# obs_to_spawn[key],
# height_map,
# scale=obj.object_scale,
# object_scale_delta=obj.object_scale_delta,
# allow_rot=obj.allow_y_rot,
# )
# print("spawned, now we wait till stage loads")
# await update_stage_async()
# # print("some time should have passed")
# # return
# counter += 1
#
# def generate_world_generator(self, obj_path, world_path):
#
#
# print("Tring to generator worldf rom file")
# (
# obs_to_spawn,
# object_dict,
# terrain_info,
# meshGen,
# ) = AreaMaskGenerator.generate_world_from_file(obj_path, world_path)
# height_map = meshGen._points2
# self.occupancy = np.zeros((len(height_map),len(height_map)))
# self.create_terrains(terrain_info)
# meshGen.clean_up_files()
#
# return obs_to_spawn, object_dict, height_map, meshGen.normals
#
#
# def create_material_and_bind(
# self, mat_name, mat_path, prim_path, scale, stage
# ):
#
# obj_prim = stage.GetPrimAtPath(prim_path)
# mtl_created_list = []
#
# omni.kit.commands.execute(
# 'CreateAndBindMdlMaterialFromLibrary',
# mdl_name=mat_path,
# mtl_name=mat_name,
# mtl_created_list=mtl_created_list,
# )
# mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
# omni.usd.create_material_input(
# mtl_prim,
# 'project_uvw',
# True,
# Sdf.ValueTypeNames.Bool,
# )
#
# omni.usd.create_material_input(
# mtl_prim,
# 'texture_scale',
# Gf.Vec2f(scale, scale),
# Sdf.ValueTypeNames.Float2,
# )
# cube_mat_shade = UsdShade.Material(mtl_prim)
#
# UsdShade.MaterialBindingAPI(obj_prim).Bind(
# cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
# )
# def generate_world(self, obj_path, world_path):
# print('Starting world gen')
#
# if World.instance() is None:
# self._world = World(**self._world_settings)
# self.setup_scene()
# else:
# self._world = World.instance()
# print('checking if world is activev')
# print(self._world)
# obs_to_spawn, object_dict = AreaMaskGenerator.generate_world_from_file(
# obj_path, world_path
# )
# length = len(obs_to_spawn)
# counter = 1
# for key in obs_to_spawn:
# obj = object_dict[key]
# path = object_dict[key].usd_path
#
# # print("checking if world is activev")
# # print(self._world)
# print('trying to spawn ', path, ' ', counter, ' / ', length)
# class_name = obj.class_name
# if class_name == '':
# class_name = obj.unique_id
# self.spawn_loop(
# path,
# class_name,
# f'{obj.unique_id}_',
# obs_to_spawn[key],
# scale=obj.object_scale,
# object_scale_delta=obj.object_scale_delta,
# allow_rot=obj.allow_y_rot,
# )
# counter += 1
# print('AREA GENERATION FINISHED')
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/World/t.xformOp:orient'),
# value=Gf.Quatd(0.7071067811865476, Gf.Vec3d(-0.7071067811865476, 0.0, 0.0)),
# prev=Gf.Quatd(1.0, Gf.Vec3d(0.0, 0.0, 0.0)),
# )
#
#
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/World/t.xformOp:orient'),
# value=Gf.Quatd(6.123233995736766e-17, Gf.Vec3d(-4.329780281177467e-17, -0.7071067811865476, -0.7071067811865476)),
# prev=Gf.Quatd(0.7071067811865476, Gf.Vec3d(-0.7071067811865476, 0.0, 0.0)),
# )
# self.spawn_asset(
# mesh_path,
# 'terrain',
# f'terrainmesh_{key}',
# 0,
# 0,
# 0,
# 1,
# 0,
# False,
# )
# self.create_material_and_bind(mat_name,mat_path,)
# def test_spawn1(self):
#
# # asyncio.ensure_future(self.init_world())
# mesh_path = 'C:\\Users\\jonem\\Documents\\Kit\\apps\\Isaac-Sim\\exts\\IsaacSyntheticPerception\\com\\SyntheticPerception\\app\\PCG\\mesh_0.usd'
#
# add_reference_to_stage(usd_path=mesh_path, prim_path='/World/terrain')
# # self.spawn_asset(
# # mesh_path,
# # 'terrain',
# # f'terrainmesh',
# # 0,
# # 0,
# # 0,
# # 1,
# # 0,
# # False,
# # )
# def add_asset_to_stage(
# self, asset_path, prim_name, prim_path, scene, **kwargs
# ):
# # print('adding asset to stage ', asset_path, prim_path)
#
# add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
| 24,894 | Python | 34.871758 | 153 | 0.524303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/sensors.py | """
This class represents the SensorRig and supporting functions.
The goal of this class is to provide an easy control method to sample an array of sensors
and control its movement within and environment.
The SensorRig primarily has a collection of sensors that is read in from a json file.
These sensors are created and stored depending on the parameters and are contructed within
their own classes. See the Sensors folder for all available implemented sensors.
The rig also handles sampling rates and timestamps.
"""
from omni.syntheticdata.scripts.sensors import enable_sensors
from omni.syntheticdata import helpers
from omni.isaac.core.utils.prims import define_prim, delete_prim
import pathlib
import json
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import omni
import omni.kit.commands
import omni.timeline
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import omni.kit.commands
import omni.replicator.core as rep
import math
import numpy as np
import scipy.spatial.transform as tf
from dataclasses import dataclass
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.physx import get_physx_scene_query_interface
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
import carb
from pxr import Sdf
from .Sensors.LIDAR import Lidar
from .Sensors.IMU import IMUSensor
from .Sensors.Camera import DepthCamera
from omni.isaac.core.utils.rotations import (
lookat_to_quatf,
quat_to_euler_angles,
gf_quat_to_np_array,
)
# .
from numpy.linalg import norm
import copy
import traceback
from scipy.spatial.transform import Rotation
def quat_to_euler_angles(q):
q_img = q.GetImaginary()
q_real = q.GetReal()
# roll (x-axis rotation)
sinr_cosp = 2 * (q_real * q_img[0] + q_img[1] * q_img[2])
cosr_cosp = 1 - 2 * (q_img[0] * q_img[0] + q_img[1] * q_img[1])
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2 * (q_real * q_img[1] - q_img[2] * q_img[0])
if abs(sinp) >= 1:
pitch = math.copysign(math.pi / 2, sinp) # use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2 * (q_real * q_img[2] + q_img[0] * q_img[1])
cosy_cosp = 1 - 2 * (q_img[1] * q_img[1] + q_img[2] * q_img[2])
yaw = math.atan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def normalize(v):
if norm(v) == 0:
traceback.print_stack()
v /= norm(v)
return v
def normalized(v):
if v is None:
return None
return normalize(copy.deepcopy(v))
def proj_orth(v1, v2, normalize_res=False, eps=1e-5):
v2_norm = norm(v2)
if v2_norm < eps:
return v1
v2n = v2 / v2_norm
v1 = v1 - np.dot(v1, v2n) * v2n
if normalize_res:
return normalized(v1)
else:
return v1
def axes_to_mat(axis_x, axis_z, dominant_axis="z"):
if dominant_axis == "z":
axis_x = proj_orth(axis_x, axis_z)
elif dominant_axis == "x":
axis_z = proj_orth(axis_z, axis_x)
elif dominant_axis is None:
pass
else:
raise RuntimeError("Unrecognized dominant_axis: %s" % dominant_axis)
axis_x = axis_x / norm(axis_x)
axis_z = axis_z / norm(axis_z)
axis_y = np.cross(axis_z, axis_x)
R = np.zeros((3, 3))
R[0:3, 0] = axis_x
R[0:3, 1] = axis_y
R[0:3, 2] = axis_z
return R
# Projects T to align with the provided direction vector v.
def proj_to_align(R, v):
max_entry = max(
enumerate([np.abs(np.dot(R[0:3, i], v)) for i in range(3)]),
key=lambda entry: entry[1],
)
return axes_to_mat(R[0:3, (max_entry[0] + 1) % 3], v)
def get_world_translation(prim):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
)
return transform.GetTranslation()
def get_world_pose(prim):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
)
return transform.GetRotation()
class SensorRig:
def __init__(self, name, path) -> None:
self.__sensors = []
self.__waypoints = []
self.__curr_waypoint_id = 0
self._prim_path = path
self._prim_name = name
self._full_prim_path = f"{self._prim_path}/{self._prim_name}"
self._prim = None
self._dc = None
self._rb = None
self.start_time = 0
self.velocity = 10
self.sample_rate = 10
self._waypoints_parent = None
self.time = 0
self.sample_time_counter = 0
self._o = "[SensorRig] "
def reset(self):
self.time = 0
def ray_cast(self, origin):
# pos, _ = self.get_pos_rot()
# print(pos)
hit = get_physx_scene_query_interface().raycast_closest(
origin, [0, 0, -1], 100000.0
)
if hit["hit"]:
distance = hit["distance"]
print(hit["position"][2])
return hit["position"][2]
return 0
def create_rig_from_file(self, path, stage, world):
self._world = world
pos, ori = self.load_sensors_from_file(path, stage)
print(
f"{self._o} Creating sensor righ with initial position of: {pos} and rot of {ori}"
)
position = np.array([pos[0], pos[1], pos[2]])
orientation = np.array([ori[0], ori[1], ori[2], ori[3]])
self._prim = XFormPrim(
name=self._prim_name,
prim_path=self._full_prim_path,
position=position / get_stage_units(),
orientation=orientation,
)
omni.kit.commands.execute(
"AddPhysicsComponent",
usd_prim=stage.GetPrimAtPath(self._full_prim_path),
component="PhysicsRigidBodyAPI",
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path(f"{self._full_prim_path}.physxRigidBody:disableGravity"),
value=True,
prev=None,
)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
def hide_waypoints_an_rig(self):
pass
def create_rig(self, position, orientation, stage):
self._dc = _dynamic_control.acquire_dynamic_control_interface()
self._prim = XFormPrim(
name=self._prim_name,
prim_path=self._full_prim_path,
position=position / get_stage_units(),
orientation=orientation,
)
self.actual_prim = stage.GetPrimAtPath(self._full_prim_path)
self.orient_val = self.actual_prim.GetAttribute("xformOp:orient")
# collisionAPI = PhysicsRigidBodyAPI.Apply(self._prim)
omni.kit.commands.execute(
"AddPhysicsComponent",
usd_prim=stage.GetPrimAtPath(self._full_prim_path),
component="PhysicsRigidBodyAPI",
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path(f"{self._full_prim_path}.physxRigidBody:disableGravity"),
value=True,
prev=None,
)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
def apply_veloc(self, veloc, ang_veloc):
# print('applying ', veloc)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
self._dc.set_rigid_body_linear_velocity(self._rb, veloc)
x = ang_veloc
self._dc.set_rigid_body_angular_velocity(self._rb, x)
def setup_sensor_output_path(self, path):
print(path)
instance_mapping = helpers.get_instance_mappings()
print(" ================== initiating mapping")
print(instance_mapping)
np.save(f"{path}/mapping.npy", instance_mapping, allow_pickle=True)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
pathlib.Path(path + "/timestamps.csv")
self._time_stamp_file = open(path + "/timestamps.csv", "a")
for sensor in self.__sensors:
sensor.init_output_folder(path)
def add_sensor_to_rig(self, sensor):
self.__sensors.append(sensor)
self.__sensors[-1].init_sensor(self._full_prim_path)
def sample_sensors(self, n):
# print("sampling sensors")
self.time += n
self.sample_time_counter += n
# print(self.time)
# log timestep
# Sample all sensors
if self.sample_time_counter >= (1 / self.sample_rate):
# print("sampling at ", self.time)
for sensor in self.__sensors:
# print(sensor)
sensor.sample_sensor()
self._time_stamp_file.write(f"{str(self.time)}\n")
self.sample_time_counter = 0
def sample_sensors_return(self):
sensor_output = []
for sensor in self.__sensors:
sensor_output.append(sensor.sample_sensor())
return sensor_output
def sample_all_sensors(self):
for sensor in self.__sensors:
sensor.sample_sensor()
def get_pos_rot(self):
self._rb = self._dc.get_rigid_body(self._full_prim_path)
object_pose = self._dc.get_rigid_body_pose(self._rb)
return object_pose.p, object_pose.r
def initialize_waypoints(self, waypoint_parent_tag, stage):
# Reset the waypoints
self.__waypoints = []
# Get the current sensor rig position and orientation
# current_pos, current_rot = self.get_pos_rot()
# iter over the stage and get all the waypoints
# go through each child and save its tranform details to the waypoints list.
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
if "_waypoints_" in prim_ref_name:
self._waypoints_parent = prim_ref
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
self.__waypoints.append(get_world_translation(prim_child))
print(f"{self._o} SensorRig waypoints initialization complete:")
print(self.__waypoints)
def initialize_waypoints_preloaded(self, waypoints, parent_prim):
self.__waypoints = []
self.__waypoints = waypoints
self._waypoints_parent = parent_prim
print(f"{self._o} loaded waypoints from file ")
for i in range(len(self.__waypoints)):
origin = self.__waypoints[i]
z = self.ray_cast(origin)
z += 0.7
self.__waypoints[i][2] = z
print(f"{self._o} Synced waypoints to ground")
def _waypoint_update(self, pos):
print(f"{self._o} Waypoint {self.__curr_waypoint_id}/{len(self.__waypoints)}")
# Get the goal position and convert it into the correct type
# print("moving")
goal_pos = self.__waypoints[self.__curr_waypoint_id]
# goal_pos[2] = z_val
goal_pos = Gf.Vec3d(goal_pos)
ori_ = lookat_to_quatf(pos, goal_pos, Gf.Vec3d(0, 0, 1))
rot_vec = quat_to_euler_angles(ori_)
rot_float = 0.0
# Calculate the diff vector
move_vec = goal_pos - pos
distance = np.linalg.norm(goal_pos - pos)
move_vec = (move_vec / distance) * self.velocity
goal_pos_arr = np.array([[goal_pos[0], goal_pos[1], 0]])
pos_arr = np.array([[pos[0], pos[1], 0]])
ori_now = self.orient_val.Get()
rvg = rot_vec
rvc = quat_to_euler_angles(ori_now)
rot_ang = Gf.Vec3d(0, 0, rvg[2] - rvc[2])
calc = rvg[2] - rvc[2]
calc *= 57.2
x_ = rvg[0] - rvc[0]
y_ = rvg[1] - rvc[1]
rot_float = Gf.Vec3d(0, 0, calc / 5.73)
if distance < 0.5:
self.__curr_waypoint_id += 1
if self.__curr_waypoint_id >= len(self.__waypoints):
self.__curr_waypoint_id = 0
timeline = omni.timeline.get_timeline_interface()
timeline.pause()
return self._waypoint_update(pos)
return move_vec, rot_vec, rot_float
def move(self, time_step):
# timeline = omni.timeline.get_timeline_interface()
# timecode = (
# timeline.get_current_time() * timeline.get_time_codes_per_seconds()
# )
self.start_time += time_step
if len(self.__waypoints) == 0:
return
# Retrieve the current position and orientation of the sensor rig
current_pos, current_rot = self.get_pos_rot()
current_pos = Gf.Vec3d(current_pos[0], current_pos[1], current_pos[2])
# Load the correct waypoint, check if we should change to next one ..
# and then calculate the required move vector.
move_vec, rot_vec, rot_float = self._waypoint_update(current_pos)
# Apply the required veloc
self.apply_veloc(move_vec, rot_float)
def load_sensors_from_file(self, file_path, stage):
with open(file_path, "r+") as infile:
print(f"{self._o} Loading sensor rig from file at {file_path}.")
data = json.load(infile)
# print(data)
pos = data["POSITION"]
ori = data["ORIENTATION"]
self.velocity = data["VELOCITY"]
self.sample_rate = data["SAMPLE_RATE"]
self.create_rig(np.array(pos), np.asarray(ori), stage)
sensors = data["SENSORS"]
print(sensors)
for key in sensors:
if key == "LIDAR":
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
lidar = Lidar()
lidar.read_from_json(sensor_settings)
self.add_sensor_to_rig(lidar)
elif key == "CAMERA":
print("creating camera")
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
cam = DepthCamera()
cam.read_from_json(sensor_settings)
self.add_sensor_to_rig(cam)
elif key == "IMU":
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
imu = IMUSensor()
imu.read_from_json(sensor_settings)
self.add_sensor_to_rig(imu)
else:
print(" ERROR, tried adding sensor with type ", key)
return pos, ori
def init_output_folder(self, path):
# create any paths needed
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
pathlib.Path(path + "/timestamps.csv")
print(instance_mapping)
self._time_stamp_file = open(path + "/timestamps.csv", "a")
# create any needed directories for the sensors
for sensor in self.__sensors:
sensor.init_output_folder(path)
"""
{"POSITION" : [0,0,0],
"ORIENTATION" : [0,0,0,0],
"SENSORS":{
"IMU":{
"instances":
{"1" :
{
"name" : "imu",
"position": [0.0, 0.0, 0.0],
"rotation" : [0.0,0.0,0.0]
}
}
},
"CAMERA" :
{"instances" :
{"1" :
{
"name" : "camera",
"focal_length": 24.0,
"focus_distance" : 400.0,
"f_stop": 0.0,
"horizontal_aperture": 20.955,
"horizontal_aperture_offset": 0,
"vertical_aperture_offset": 0,
"clipping_range": [1.0, 1000000.0],
"resolution": [1024,1024],
"position" : [0.0,0.0,0.0],
"rotation" : [0.0,0.0,0.0]
}
}
},
"LIDAR":
{"instances" :
{"1" :
{"name": 1,
"min_range": 0.4,
"max_range": 100.0,
"draw_points": false,
"draw_lines" : false,
"horizontal_fov": 360,
"vertical_fov": 60.0,
"rotation_rate": 0.0,
"horizontal_resolution": 0.4,
"vertical_resolution" : 0.4,
"high_lod":true,
"yaw_offset": 0.0,
"enable_semantics":true,
"origin_pos": [0,0,0],
"rotation" : [0.0,0.0,0.0]
}
}
}
}
}
def add_depth_camera_to_rig(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
image_size=(512, 512),
attach=True,
name='/DepthCamera',
):
self.__sensors.append(
DepthCamera(
position,
rotation,
image_size,
attach,
self._full_prim_path,
name,
)
)
def add_lidar_to_rig(self, name, origin_pos):
self.__sensors.append(
Lidar(
path=name, parent=self._full_prim_path, origin_pos=origin_pos
)
)
"""
| 17,079 | Python | 30.571164 | 94 | 0.569471 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
from .synthetic_perception import SyntheticPerception
from .synthetic_perception_extension import SyntheticPerceptionExtension
# from .sensors import Lidar
| 765 | Python | 33.81818 | 76 | 0.783007 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/setup.py | from setuptools import setup, find_packages
setup(
name = 'your_package_name',
packages = find_packages(),
) | 116 | Python | 22.399996 | 43 | 0.698276 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/synthetic_perception_extension.py | """
This class and functions handles the UI/UX side of the extension.
All UX is collected and managed here and call their respective functions in the main
SyntheticPerception sample.
"""
import time
import random
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.physx import acquire_physx_interface
import os
from pxr import Usd, Gf, Ar, Pcp, Sdf, UsdRi, UsdGeom, UsdPhysics
from pxr import UsdShade, Sdf
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.kit.window.popup_dialog import FormDialog
import asyncio
import omni.ui as ui
from omni.isaac.ui.ui_utils import (
btn_builder,
int_builder,
float_builder,
dropdown_builder,
combo_floatfield_slider_builder,
str_builder,
xyz_builder,
) # , str_builder
from omni.isaac.ui import (
FloatField,
CheckBox,
StateButton,
DropDown,
StringField,
Button,
CheckBox,
)
from omni.isaac.core import SimulationContext
from .PCG.WorldGenerator import WorldManager
from .synthetic_perception import SyntheticPerception
import omni
import json
from omni.isaac.core.utils.stage import (
update_stage,
add_reference_to_stage,
is_stage_loading,
update_stage_async,
)
from omni.isaac.core.utils.prims import define_prim, delete_prim
from .PCG.MeshGenerator import MeshGen
# from .Utils.EnvDataTool.EnvCreator import EnvTool
import open3d as o3d
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import omni.kit.asset_converter
import carb
from omni.kit.window.popup_dialog.dialog import PopupDialog
class SelectedPrim:
def __init__(self) -> None:
self.prim = None
self.prim_path = None
self.object_scale = 1
self.object_scale_delta = 0
self.allow_y_rot = False
self.unique_id = ''
self.usd_path = ''
self.class_name = ''
self.posson_size = 1
def get_y_rot_state(self):
if self.allow_y_rot:
return 'Enabled'
return 'Disabled'
def __str__(self) -> str:
return f'prim: {self.prim} \n prim_path: {self.prim_path}\n Object Scale: {self.object_scale}\n \
object scale delta: {self.object_scale_delta}\n allow y rot: {self.allow_y_rot}\n usdpath: {self.usd_path}\n unique_id: {self.unique_id}'
class SyntheticPerceptionExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name='ExtensionName',
submenu_name='',
name='Synthetic perception',
title='Synthetic Perception',
doc_link='https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html',
overview="This extension provides tools to both generate procedural environments within Isaac as well as capturing and saving sensor data. This also includes waypoint movement and other such movement types.",
sample=SyntheticPerception(),
file_path=os.path.abspath(__file__),
number_of_extra_frames=12,
window_width=700,
)
self.object_data_save_path = ''
self.task_ui_elements = {}
self.world_gen_ui_elements = {}
self.usd_context = omni.usd.get_context()
self.selected_prim = SelectedPrim()
self.selected_prim_dict = {}
self._object_selector = False
self.prim = None
self._object_path = ''
self._world_path = ''
self.mm = False
self.OBJECT_EDITING_ALLOWED = False
# frame = self.get_frame(index=0)
# self.build_task_controls_ui(frame)
# frame = self.get_frame(index=0)
# self.build_sensor_ui(frame)
# frame = self.get_frame(index=1)
# self.build_worldgen_ui(frame)
self._window.visible = True
frame = self.get_frame(index=0)
self.build_pcg_env_ui(frame)
self._window.visible = True
frame = self.get_frame(index=1)
self.build_sensor_rig_ui(frame)
frame = self.get_frame(index=2)
self.setup_worldgen_ui(frame)
self.events = self.usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self._get_obj_details, name='Object Info Selection Update'
)
def on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
prim_path = (
self.usd_context.get_selection().get_selected_prim_paths()
)
if not prim_path:
return
def shutdown_cleanup(self):
self.sample.remove_all_objects()
def build_popup(self, errors):
message_out = ''.join([str + '\n' for str in errors])
dialog = FormDialog(
title='ERROR',
message=message_out,
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
def build_sensor_rig_ui(self, frame):
self.build_sensor_rig_ui_values = {}
self.build_sensor_rig_ui_values['RigPath'] = ''
self.build_sensor_rig_ui_values['WaypointPath'] = ''
self.build_sensor_rig_ui_values['MovementType'] = None
self.build_sensor_rig_ui_values['OutputSavePath'] = ''
self.sample.setup_scene()
def has_missing_inputs_init_rig():
errors = []
if self.build_sensor_rig_ui_values['RigPath'] == '':
errors.append('No Sensor Rig path supplied')
if self.build_sensor_rig_ui_values['OutputSavePath'] == '':
errors.append('No output path supplied')
# Check if both files exist
if not self._check_file_exists(
self.build_sensor_rig_ui_values['RigPath']
):
errors.append(
'Sensor rig parameter file invalid or does not exist.'
)
if len(errors) != 0:
self.build_popup(errors)
return True
return False
def has_missing_inputs_wp():
errors = []
if self.build_sensor_rig_ui_values['WaypointPath'] == '':
errors.append('No waypoint path supplied')
# Check if both files exist
if not self._check_file_exists(
self.build_sensor_rig_ui_values['WaypointPath']
):
errors.append('Waypoint file is not valid or does not exist.')
if len(errors) != 0:
self.build_popup(errors)
return True
return False
async def init_rig_and_waypoints():
# await asyncio.ensure_future(self.sample.init_world())
self.sample.init_sensor_rig_from_file(
self.build_sensor_rig_ui_values['RigPath'],
self.build_sensor_rig_ui_values['OutputSavePath'],
)
def load_sensor_rig_from_path():
if has_missing_inputs_init_rig():
return
asyncio.ensure_future(init_rig_and_waypoints())
stage = omni.usd.get_context().get_stage()
parent = stage.GetPrimAtPath('/_WAYPOINTS_')
if not parent:
# parent = XFormPrim(
# name="_WAYPOINTS_",
# prim_path = "/"
# )
parent = define_prim('/_WAYPOINTS_', 'Xform')
cube_prim = stage.GetPrimAtPath('/_WAYPOINTS_/w_01')
if not cube_prim:
cube_prim = stage.DefinePrim('/_WAYPOINTS_/w_01', 'Cube')
UsdGeom.Xformable(cube_prim).AddTranslateOp().Set((0.0, 0.0, 0.0))
def update_sensor_rig_path(val):
self.build_sensor_rig_ui_values['RigPath'] = val
def update_rig_movement_type(val):
self.build_sensor_rig_ui_values['MovementType'] = val
def update_waypoint_path(val):
self.build_sensor_rig_ui_values['WaypointPath'] = val
def load_waypoints_intermediate():
asyncio.ensure_future(load_waypoints())
async def load_waypoints():
if has_missing_inputs_wp():
return
# self.sample.force_reload()
await asyncio.ensure_future(self.sample._on_load_world_async())
# await asyncio.ensure_future(self.sample.init_world())
# print(self.sample._world.GetAttributes())
# print(self.sample._world.__dir__())
stage = omni.usd.get_context().get_stage()
# Add a physics scene prim to stage
scene = UsdPhysics.Scene.Define(
stage, Sdf.Path('/World/physicsScene')
)
stage = omni.usd.get_context().get_stage()
if not self.build_sensor_rig_ui_values['WaypointPath']:
dialog = FormDialog(
title='ERROR No path',
message='No waypoint file was given. Not saving - please input a save path.',
)
return
with open(
self.build_sensor_rig_ui_values['WaypointPath'], 'r'
) as fh:
json_data = json.load(fh)
# print('Trying to load waypoints')
# print(json_data)
initial_prim_path = '/_WAYPOINTS_'
prim_check= stage.GetPrimAtPath(initial_prim_path)
parent = prim_check
if not prim_check:
parent = define_prim('/_WAYPOINTS_', 'Xform')
initial_prim_wp = '/_WAYPOINTS_/w_01'
prim_check = stage.GetPrimAtPath(initial_prim_wp)
if prim_check:
delete_prim(initial_prim_path)
for i, c in enumerate(json_data):
# parent = define_prim('/_WAYPOINTS_', 'Xform')
cube_prim = stage.DefinePrim(
'/_WAYPOINTS_/w_{:02d}'.format(i + 1), 'Cube'
)
UsdGeom.Xformable(cube_prim).AddTranslateOp().Set(
Gf.Vec3d(c)
)
self.sample.sr.initialize_waypoints_preloaded(json_data,stage.GetPrimAtPath("/_WAYPOINTS_"))
self.sample._world.add_physics_callback(
'sim_step', callback_fn=self.sample.sr.move
)
self.sample.attach_sensor_sample_callback()
def update_output_save_path(val):
self.build_sensor_rig_ui_values['OutputSavePath'] = val
def save_waypoints():
def __n():
print('')
if not self.build_sensor_rig_ui_values['WaypointPath']:
dialog = FormDialog(
title='ERROR No path',
message='No waypoint file was given. Not saving - please input a save path.',
)
return
stage = omni.usd.get_context().get_stage()
waypoints = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
if '_WAYPOINTS_' in prim_ref_name:
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
translate = prim_child.GetAttribute(
'xformOp:translate'
).Get()
waypoints.append(
[translate[0], translate[1], translate[2]]
)
with open(
self.build_sensor_rig_ui_values['WaypointPath'], 'w'
) as fh:
json.dump(waypoints, fh, indent=1)
def run():
def run_step_temp(n):
curr_time = time.time() - self.minhan_timer_start
# print(curr_time)
if curr_time > 2:
self.minhan_timer_start = time.time()
scale = random.uniform(1.0,10.0)
self.plane_prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d(scale,scale,scale))
# self.sample.sr.hide_waypoints_an_rig()
self.minhan_timer_start = 0
stage = omni.usd.get_context().get_stage()
self.plane_prim = stage.GetPrimAtPath("/World/Plane")
# print(plane_prim)
# print(plane_prim.GetAttributes())
self.plane_prim.GetAttribute('physics:angularVelocity').Set(Gf.Vec3d(5.0,5.0,5.0))
self.sample._world.add_physics_callback(
'demo_step', callback_fn=run_step_temp
)
def sample():
print("trying to sample")
self.sample.sr.sample_all_sensors()
self._sensor_rig_ui_inputs = {}
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Sensor Rig'
frame.visible = True
self._sensor_rig_ui_inputs['RigPath'] = StringField(
'Sensor Rig settings path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_sensor_rig_path,
)
self._sensor_rig_ui_inputs['OutputSavePath'] = StringField(
'Output path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_output_save_path,
)
self._sensor_rig_ui_inputs['LoadRig'] = Button(
'Load sensor rig',
'Load',
on_click_fn=load_sensor_rig_from_path,
)
self._sensor_rig_ui_inputs['MovementType'] = DropDown(
'Movement Type: ', on_selection_fn=update_rig_movement_type
)
self._sensor_rig_ui_inputs['MovementType'].set_items(
['WAYPOINT', 'KEYBOARD']
)
self._sensor_rig_ui_inputs['WaypointPath'] = StringField(
'Waypoints path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_waypoint_path,
)
self._sensor_rig_ui_inputs['LoadWaypoints'] = Button(
'Load & attach waypoints',
'Load',
on_click_fn=load_waypoints_intermediate,
)
self._sensor_rig_ui_inputs['SaveWaypoints'] = Button(
'Save waypoints',
'Save',
on_click_fn=save_waypoints,
)
# self._sensor_rig_ui_inputs['Run Simulation'] = Button(
# 'Run Simulation',
# 'Run',
# on_click_fn=run_sim,
# )
self._sensor_rig_ui_inputs['run'] = Button(
'Minghan run',
'run',
on_click_fn=run,
)
self._sensor_rig_ui_inputs['sample'] = Button(
'Sample sensors',
'sampe',
on_click_fn=sample,
)
def init_semantics_in_scene(self):
self.sample.init_semantics_in_scene()
def _rebuild_update(self, e):
if str(e) == 'Manual':
self.mm = True
if str(e) == 'Waypoints':
self.mm = False
print(self.mm)
return e
def update_scale(self, val):
if self.prim and val > 0:
_ = self.prim.GetAttribute('xformOp:scale').Set(
Gf.Vec3d([val, val, val])
)
self.selected_prim.object_scale = val
self.selected_prim_dict[self.current_path].object_scale = val
def update_scale_delta(self, val):
if self.prim and val > 0:
# update the local info
# _ = self.prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d([val,val,val]))
self.selected_prim.object_scale_delta = val
self.selected_prim_dict[self.current_path].object_scale_delta = val
def update_poisson_size(self, val):
if self.prim and val > 0:
# update the local info
# _ = self.prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d([val,val,val]))
self.selected_prim_dict[self.current_path].posson_size = val
def update_yrot(self, val):
if self.prim and val != 'Not Selected':
enable_y_rot = False
if (
val == 'Enabled'
): # self.world_gen_ui_elements["AllowYRot"].get_selected() == "Enabled":
enable_y_rot = True
self.selected_prim.allow_y_rot = enable_y_rot
self.selected_prim_dict[
self.current_path
].allow_y_rot = enable_y_rot
def prim_name_update(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].unique_id = val
def class_name_update(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].class_name = val
def update_usd_path(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].usd_path = val
def save_path_update(self, val):
self.object_data_save_path = val
def _true(self, val):
return True
def save_object_data_to_file(self):
def where_json(file_name):
return os.path.exists(file_name)
# Guard statement to catch no save path
if self.object_data_save_path == '':
dialog = FormDialog(
title='ERROR No path',
message='No save file was given. Not saving - please input a save path.',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
if '.json' not in self.object_data_save_path:
dialog = FormDialog(
title='ERROR no specific file',
message='No save file was given. Not saving - please input a save path with a filename and the .json extension.',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
if self.selected_prim_dict[self.current_path].usd_path == '':
dialog = FormDialog(
title='ERROR no usd path',
message='No USD path was specified. This is required and must exist!',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
# Check if file exists at path
# print('Attempting to edit or create the save obj file')
data = {}
with open(self.object_data_save_path, 'r+') as infile:
try:
data = json.load(infile)
except:
pass
selected = self.selected_prim_dict[self.current_path]
with open(self.object_data_save_path, 'w+') as outfile:
specific_data = {
'object_scale': selected.object_scale,
'object_scale_delta': selected.object_scale_delta,
'poisson_size': selected.posson_size,
'allow_y_rot': selected.allow_y_rot,
'class_name': selected.class_name,
'usd_path': selected.usd_path,
}
data[selected.unique_id] = specific_data
# data[local_selected.unique_id]=
json.dump(data, outfile)
def setup_worldgen_ui(self, frame):
def test_check(val):
self.OBJECT_EDITING_ALLOWED = val
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Object set up'
frame.visible = True
self.world_gen_ui_elements['toggle'] = CheckBox(
'Object setup mode', on_click_fn=test_check
)
# print(self.world_gen_ui_elements["toggle"].__dir__())
self.world_gen_ui_elements['SavePath'] = StringField(
'SavePath',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self.save_path_update,
)
self.world_gen_ui_elements['PrimName'] = StringField(
'Unique Name',
'None',
read_only=False,
on_value_changed_fn=self.prim_name_update,
)
self.world_gen_ui_elements['PrimName'].set_value('None')
self.world_gen_ui_elements['ClassName'] = StringField(
'Class Name',
'None',
read_only=False,
on_value_changed_fn=self.class_name_update,
)
self.world_gen_ui_elements['SelectedObjScale'] = FloatField(
'Object Scale',
default_value=1.0,
on_value_changed_fn=self.update_scale,
)
self.world_gen_ui_elements[
'SelectedObjScaleDelta'
] = FloatField(
'Object Scale Delta +/-',
on_value_changed_fn=self.update_scale_delta,
)
self.world_gen_ui_elements['PoissonSize'] = FloatField(
'Poisson Point Size',
default_value=1.0,
on_value_changed_fn=self.update_poisson_size,
)
# self.world_gen_ui_elements["AllowYRot"] = CheckBox("Allow Y-axis rotation", default_value = False, on_click_fn=self.update_yrot)
self.world_gen_ui_elements['AllowYRot'] = DropDown(
'Allow Y-axis rotation', on_selection_fn=self.update_yrot
)
self.world_gen_ui_elements['AllowYRot'].set_items(
['Not Selected', 'Enabled', 'Disabled']
)
self.world_gen_ui_elements['USDPath'] = StringField(
'USD Path',
use_folder_picker=True,
on_value_changed_fn=self.update_usd_path,
)
self.world_gen_ui_elements['SAVE'] = Button(
'Save this object to file',
'SAVE',
on_click_fn=self.save_object_data_to_file,
)
self.prim = None
self.position = [0, 0, 0]
def _get_obj_details(self, event):
# if not self._object_selector:
# return
if not self.OBJECT_EDITING_ALLOWED:
return
prim_path = self.usd_context.get_selection().get_selected_prim_paths()
self.world_gen_ui_elements['SavePath'] = self.object_data_save_path
if not prim_path:
for key in self.world_gen_ui_elements:
if type(self.world_gen_ui_elements[key]) == FloatField:
self.world_gen_ui_elements[key].set_value(0)
if type(self.world_gen_ui_elements[key]) == DropDown:
self.world_gen_ui_elements[key].set_selection(
'Not Selected'
)
if type(self.world_gen_ui_elements[key]) == StringField:
self.world_gen_ui_elements[key].set_value('')
return
stage = self.usd_context.get_stage()
prim = stage.GetPrimAtPath(prim_path[0])
self.prim = prim
self.current_path = prim_path[0]
# Check if the prim exists in our current dictionary
if self.current_path not in self.selected_prim_dict:
# Create the entry
self.selected_prim_dict[self.current_path] = SelectedPrim()
# This entry should now exist so we can use it.
self.selected_prim.prim = prim
self.selected_prim.prim_path = prim_path[0]
# print('prim: ' + str(prim), " ", self.prim.GetAttributes())
obj_scale = self.prim.GetAttribute('xformOp:scale').Get()
# self.selected_prim.object_scale = sum(obj_scale) / len(obj_scale)
self.selected_prim_dict[self.current_path].object_scale = sum(
obj_scale
) / len(obj_scale)
if (
self.selected_prim_dict[self.current_path].unique_id == ''
or self.selected_prim_dict[self.current_path].unique_id == 'None'
):
self.selected_prim_dict[
self.current_path
].unique_id = self.current_path.split('/')[-1]
self.world_gen_ui_elements['PrimName'].set_value(
self.selected_prim_dict[self.current_path].unique_id
)
self.world_gen_ui_elements['ClassName'].set_value(
self.selected_prim_dict[self.current_path].class_name
)
self.world_gen_ui_elements['SelectedObjScale'].set_value(
self.selected_prim_dict[self.current_path].object_scale
)
self.world_gen_ui_elements['SelectedObjScaleDelta'].set_value(
self.selected_prim_dict[self.current_path].object_scale_delta
)
self.world_gen_ui_elements['PoissonSize'].set_value(
self.selected_prim_dict[self.current_path].posson_size
)
self.world_gen_ui_elements['AllowYRot'].set_selection(
self.selected_prim_dict[self.current_path].get_y_rot_state()
)
self.world_gen_ui_elements['USDPath'].set_value(
self.selected_prim_dict[self.current_path].usd_path
)
def _update_object_path(self, val):
if val != '':
self._object_path = val
def _update_world_path(self, val):
if val != '':
self._world_path = val
def _check_file_exists(self, path):
try:
with open(path, 'r+') as infile:
return True
except:
return False
def _run_world_creation(self):
# (
# obs_to_spawn,
# object_dict,
# height_map,
# ) = self.sample.generate_world_generator(
# 'C:\\Users\\jonem\\Desktop\\worlddata2.json',
# 'C:\\Users\\jonem\\Desktop\\new_objects_save.json',
# )
#
# asyncio.ensure_future(self.sample._on_load_world_async())
# asyncio.ensure_future(
# self.sample.spawn_all(obs_to_spawn, object_dict, height_map)
# )
# return
errors = []
if self._object_path == '':
errors.append('No Object path specified.')
if '.json' not in self._object_path:
errors.append('Object path does not contain .json extension.')
if self._world_path == '':
errors.append('No world path environment file was specified.')
if '.json' not in self._world_path:
errors.append('World path does not contain .json exntension.')
# Check if both files exist
if not self._check_file_exists(self._object_path):
errors.append('Object path file specified does not exist.')
if not self._check_file_exists(self._world_path):
errors.append('World path file specified does not exist.')
if len(errors) != 0:
message_out = ''.join([str + '\n' for str in errors])
dialog = FormDialog(
title='ERROR',
message=message_out,
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
print("Starting world gen")
WG = WorldManager()
WG.create_world(self._world_path, self._object_path)
print("world creation finished")
return
# (
# obs_to_spawn,
# object_dict,
# height_map,
# normals
# ) = self.sample.generate_world_generator(
# self._world_path, self._object_path
# )
# print("Starting obj spawn")
# asyncio.ensure_future(self.sample._on_load_world_async())
# asyncio.ensure_future(
# self.sample.spawn_all(obs_to_spawn, object_dict, height_map, normals)
# )
def build_pcg_env_ui(self, frame):
def open_world_creator():
pass
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Generate World Set up'
frame.visible = True
self.world_gen_ui_elements['RunCreateTool'] = Button(
'Open the world creator tool',
'Open',
on_click_fn=open_world_creator,
)
self.world_gen_ui_elements['ObjectsPath'] = StringField(
'Objects Path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self._update_object_path,
)
self.world_gen_ui_elements['WorldPath'] = StringField(
'World Path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self._update_world_path,
)
self.world_gen_ui_elements['SAVE'] = Button(
'Initialize world generation',
'Create World',
on_click_fn=self._run_world_creation,
)
self.world_gen_ui_elements['InitSemantics'] = Button(
'Initialize semantics and physics (must do)',
'Initialize',
on_click_fn=self.init_semantics_in_scene,
)
# async def material_test(self):
#
# shape = (256, 256)
# threshold = 0.5
# region_value = 1
# # Convert to pymeshlab mesh
# l = shape[0] * 10 # 2560
# data = generate_perlin_noise_2d(shape, (8, 8))
# data = (data - np.min(data)) / (np.max(data) - np.min(data))
# data[data < threshold] = 0
# data[data >= threshold] = region_value
# mGen = MeshGen(
# 256,
# 10,
# data,
# 'C:/Users/jonem/Documents/Kit/apps/Isaac-Sim/exts/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG',
# )
# mGen.generate_terrain_mesh()
# return
# # asyncio.ensure_future(self.sample.init_world())
# print(' =========================== ')
# mat_path = 'http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Natural/Dirt.mdl'
# prim_path = '/World/mesh_1'
# mat = '/World/Looks/Dirt'
#
# stage = omni.usd.get_context().get_stage()
# obj_prim = stage.GetPrimAtPath(prim_path)
# mat_name = 'Dirt'
# # omni.kit.commands.execute(
# # 'CreateMdlMaterialPrimCommand',
# # mtl_url=mat_path,
# # mtl_name=f'{mat_name}',
# # mtl_path=f'/World/Looks/{mat_name}',
# # )
#
# # omni.kit.commands.execute(
# # 'CreateMdlMaterialPrimCommand',
# # mtl_url=mat_path,
# # mtl_name=f'{mat_name}',
# # mtl_path=f'/World/Looks/{mat_name}',
# # )
# #
# # # update_stage()
# # _ = omni.kit.commands.execute(
# # 'BindMaterialCommand',
# # prim_path=prim_path,
# # material_path=f'/World/Looks/{mat_name}',
# # )
# mtl_created_list = []
#
# omni.kit.commands.execute(
# 'CreateAndBindMdlMaterialFromLibrary',
# mdl_name=mat_path,
# mtl_name=mat_name,
# mtl_created_list=mtl_created_list,
# )
#
# mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
#
# omni.usd.create_material_input(
# mtl_prim,
# 'project_uvw',
# True,
# Sdf.ValueTypeNames.Bool,
# )
#
# omni.usd.create_material_input(
# mtl_prim,
# 'texture_scale',
# Gf.Vec2f(0.001, 0.001),
# Sdf.ValueTypeNames.Float2,
# )
# cube_mat_shade = UsdShade.Material(mtl_prim)
#
# UsdShade.MaterialBindingAPI(obj_prim).Bind(
# cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
# )
# return
#
# # Set material inputs, these can be determined by looking at the .mdl file
#
# # or by selecting the Shader attached to the Material in the stage window and looking at the details panel
#
# print('wait')
# await update_stage_async()
# print('continue')
# update_stage()
# while is_stage_loading():
# await update_stage_async()
#
# stage = omni.usd.get_context().get_stage()
# p = stage.GetPrimAtPath(f'{mat}/Shader')
# not_set = False
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# print('wait')
# await update_stage_async()
# print('continue')
# update_stage()
# while is_stage_loading():
# await update_stage_async()
# # while not not_set:
# # try:
# # material_attributes = p.GetAttributes()
# # p.GetAttribute('inputs:project_uvw').Set(True)
# # not_set = True
# # print("success: ", _)
# # except:
# #
# # print("failure: ", _)
# # await update_stage_async()
# #
#
# material_attributes = p.GetAttributes()
# p.GetAttribute('inputs:project_uvw').Set(True)
# p.GetAttribute('inputs:texture_scale').Set((0.001, 0.001))
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
# def build_task_controls_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'Sensor Controls'
# frame.visible = True
#
# self.add_button_title(
# 'Attach Sys To Scene', 'Attach', self._loadtest
# )
# self.add_button_title(
# 'Init waypoints & attach', 'Attach', self._testRigWaypoint
# )
#
# # self.add_button('veloc', self._save_lidar_info_event)
# # self.task_ui_elements['veloc'].enabled = True
#
# self.add_button('sample sensors', self._on_sample_sensors)
# self.task_ui_elements['sample sensors'].enabled = True
# # self.add_string_field("test", self._empty_func)
#
# self.add_button('init_world', self.ui_init_world)
# self.task_ui_elements['init_world'].enabled = True
#
# self.add_button(
# 'load_sensors', self.test_load_sensors_from_file
# )
# self.task_ui_elements['load_sensors'].enabled = True
# # OTHER UI NEEDED
# # load sensor rig
# # ^ let the above handle waypoints and everything
#
# # self.add_button('init_semantics', self.ui_init_semantics)
# # self.task_ui_elements['init_semantics'].enabled = True
# # self.add_button('area gen test', self._empty_func)
# # self.task_ui_elements['area gen test'].enabled = True
# def build_sensor_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'Sensors'
# frame.visible = True
# self.task_ui_elements['movement_mode'] = dropdown_builder(
# items=['Waypoints', 'Manual', 'Linear'],
# on_clicked_fn=self._rebuild_update,
# )
# self.task_ui_elements['movement_speed'] = int_builder(
# 'move speed'
# )
# def add_button(self, label, on_clicked_fn):
# """Adds a button to the task frame"""
# dict = {
# 'label': label,
# 'type': 'button',
# 'text': label,
# 'tooltip': label,
# 'on_clicked_fn': on_clicked_fn,
# }
#
# self.task_ui_elements[label] = btn_builder(**dict)
# self.task_ui_elements[label].enabled = False
# async def ini(self):
# await asyncio.ensure_future(self.sample.init_world())
# self.sample.init_sensor_rig_from_file()
#
# stage = omni.usd.get_context().get_stage()
# self.sample.sr.initialize_waypoints('', stage)
# print('Attach move to callback')
# self.sample.attach_sensor_waypoint_callback(self.sample.sr)
# def _add_to_scene_event(self):
# self.sample.init_sensor_and_semantics()
#
# def _on_load_scene_button_event(self):
# self._add_to_scene_event()
# def build_worldgen_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'World Gen'
# frame.visible = True
# self.add_button('init_world', self.ui_init_world)
# self.task_ui_elements['init_world'].enabled = True
#
# self.add_button('init_semantics', self.ui_init_semantics)
# self.task_ui_elements['init_semantics'].enabled = True
# # self.add_button('area gen test', self._empty_func)
# # self.task_ui_elements['area gen test'].enabled = True
| 39,562 | Python | 35.430018 | 220 | 0.518048 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/worldUtils.py |
import open3d as o3d
import numpy as np
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import colorsys
import json
import asyncio
import numpy.typing as npt
class ObjectPrim:
def __init__(
self,
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
) -> None:
self.object_scale = scale
self.object_scale_delta = scale_delta
self.allow_y_rot = y_rot
self.unique_id = u_id
self.usd_path = usd_path
self.class_name = class_name
self.poisson_size = poisson_size
def __str__(self) -> str:
return f"""
{self.unique_id}
scale: {self.object_scale} +/- {self.object_scale_delta}
allow y rot: {self.allow_y_rot}
poisson size: {self.poisson_size}
class name: {self.class_name}
usd path: {self.usd_path}
"""
pass
class TerrainPrim:
def __init__(self, mesh_path, mat_path, scale=0.001) -> None:
self.mesh_path = mesh_path
self.material_path = mat_path
self.scale = scale
class WorldHandler:
def __init__(self, world_path, object_path) -> None:
# self.objects = []
self.objects_dict = {}
self._object_path = object_path
self._world_path = world_path
self.objects_to_spawn = {}
self._WORLD_TO_POISSON_SCALE = 1.6
def _read_objects(self):
with open(self._object_path, 'r+') as infile:
data = json.load(infile)
# print(data)
for key in data:
scale = data[key]['object_scale']
scale_delta = data[key]['object_scale_delta']
y_rot = data[key]['allow_y_rot']
u_id = key
usd_path = data[key]['usd_path']
class_name = data[key]['class_name']
poisson_size = data[key]['poisson_size']
tmp = ObjectPrim(
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
)
# self.objects.append(tmp)
self.objects_dict[u_id] = tmp
# for i in self.objects:
# print(i)
def _read_world(self):
# print("here")
self.objects_to_spawn = {}
data = None
objs_per_region = {}
with open(self._world_path, 'r+') as infile:
data = json.load(infile)
if data != None:
n = data['size']
arr = np.zeros((n, n))
total_arr = np.zeros((n, n))
regions = data['regions']
terrain_info = {}
# print( " == ", np.unique(total_arr))
for region_id in regions:
region_id = str(region_id)
terrain_info[region_id] = TerrainPrim(
'',
regions[region_id]['material_path'],
regions[region_id]['material_scale'],
)
# print("terrrain info key type ", type(region_id))
new_arr = PerlinNoise.generate_region2(
seed=int(region_id),
shape=(n, n),
threshold=float(regions[region_id]['threshold']),
show_plot=False,
region_value=int(region_id),
)
arr = append_to_area(arr, new_arr, int(region_id))
total_arr = arr
# handle objects in the zone
objs = regions[region_id]['objects']
objs_per_region[region_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[str(obj_uid)]
objs_per_region[region_id].append(object_prim)
# now we need to deal with sub zones
zones = regions[region_id]['zones']
for zone_id in zones:
terrain_info[str(zone_id)] = TerrainPrim(
'',
zones[zone_id]['material_path'],
zones[zone_id]['material_scale'],
)
new_arr = PerlinNoise.generate_region2(
seed=int(zone_id),
shape=(n, n),
threshold=float(zones[zone_id]['threshold']),
show_plot=False,
region_value=int(zone_id),
)
zone_to_save = append_inside_area(
arr, new_arr, int(zone_id)
)
# print("zone == ", zone_id, " ", zone_id)
total_arr = zone_to_save
objs = zones[zone_id]['objects']
objs_per_region[zone_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[obj_uid]
objs_per_region[zone_id].append(object_prim)
for key in objs_per_region:
obs = objs_per_region[key]
if len(obs) > 0:
for obj in obs:
area, coords = fill_area(
total_arr,
obj.poisson_size / self._WORLD_TO_POISSON_SCALE,
int(key),
999,
)
self.objects_to_spawn[obj.unique_id] = coords
return total_arr, n, terrain_info
| 6,051 | Python | 33.582857 | 76 | 0.455627 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/WorldGenerator.py | import asyncio
import random
import omni
import numpy as np
from .AreaMaskGenerator import generate_world_from_file
from omni.isaac.core.utils.semantics import get_semantics
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from pxr import Usd, Gf
from omni.isaac.core.utils.stage import (
add_reference_to_stage,
is_stage_loading,
update_stage_async,
update_stage,
)
from pxr import UsdShade, Sdf
class WorldManager:
def __init__(self) -> None:
self.__undefined_class_string = "NAN"
self.occupancy = []
self._o = "[World generator] "
def add_semantic(self, p, prim_class):
"""Adds semantic to prim"""
sem_dict = get_semantics(p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(p)
if 'Semantics' not in sem_dict:
# print(
# 'adding semantics and collider to ',
# p.GetPrimPath(),
# ' of class ',
# prim_class,
# )
sem = Semantics.SemanticsAPI.Apply(p, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(prim_class)
def __add_semantics_to_all2(self, stage):
"""Add semantic information to all prims on stage based on parent xform"""
prim_class = self.__undefined_class_string
completed_classes = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
len_of_prim = len(prim_ref_name.split('/'))
for word in prim_ref_name.split('/'):
if 'class' in word and word not in completed_classes:
prim_class = word
# self.add_semantic(prim_ref, prim_class)
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
len_of_child = len(
str(prim_child.GetPrimPath()).split('/')
)
# print(len_of_prim, ' : ', len_of_child)
if abs(len_of_prim - len_of_child) == 1:
# print(prim_child)
self.add_semantic(prim_child, prim_class)
completed_classes.append(prim_class)
def spawn_asset(
self,
asset_path,
class_name,
prim_name,
x,
y,
z,
scale,
object_scale_delta,
allow_rot,
orign_p_name = "",
override=False,
rot = (0,0,0),
):
prim_path = '/World/' + 'class_' + class_name + '/' + prim_name
# if not override:
add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(prim_path)
# prim.GetReferences().AddReference(assetPath=asset_path, primPath=prim_path)
prim.SetInstanceable(True)
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(class_name)
# omni.kit.commands.execute('CopyPrim',
# path_from=orign_p_name,
# path_to=prim_path,
# duplicate_layers=False,
# combine_layers=False,
# exclusive_select=False,
# flatten_references=False,
# copy_to_introducing_layer=False)
# here we want to modify the scale
low_lim = scale - object_scale_delta
high_lim = scale + object_scale_delta
scale = random.uniform(low_lim, high_lim) #/ 100
random_rotation = 0
if allow_rot:
random_rotation = random.uniform(0, 360)
# omni.kit.commands.execute('CreatePayloadCommand',
# usd_context=omni.usd.get_context(),
# path_to=Sdf.Path(prim_path),
# asset_path=asset_path,
# instanceable=True)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=prim_path, # f"/World/{p_name}",
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
old_rotation_euler=Gf.Vec3f(0, 0, 0),
old_rotation_order=Gf.Vec3i(0, 1, 2),
new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=prim_path, # f"/World/{p_name}",
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
old_rotation_euler=Gf.Vec3f(0, 0, 0),
old_rotation_order=Gf.Vec3i(0, 1, 2),
new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
def spawn_loop(
self,
path,
class_name,
p_name,
coll,
height_map,
scale=1,
object_scale_delta=0,
allow_rot=True,
):
for i, n in enumerate(coll):
override=False
# if i == 1:
#
# prim_path = '/World/' + 'class_' + class_name + '/' + p_name
#
# add_reference_to_stage(usd_path=path, prim_path=prim_path)
#
# override=True
x, y = n
x = float(x)
y = float(y)
mesh_scale = 10
x_ind = x * mesh_scale
y_ind = y * mesh_scale
mesh_height_modifier = 10
# if x_ind >= 2560:
# print('x, overfilled', x_ind)
# x_ind = 2559
# if y_ind >= 2560:
#
# print('y, overfilled', y_ind)
# y_ind = 2559
z = float(height_map[int(y_ind/10)][int(x_ind/10)])# / mesh_height_modifier # was abs
cc =(int(y_ind/10),int(x_ind/10) )
ind = np.ravel_multi_index(cc, (len(height_map), len(height_map)))
# print(np.asarray(self.t_normals))
poss_rot = np.asarray(self.t_normals)[ind]
# print("triangle normals")
# print(poss_rot)
# second one is iterated fasted
if self.occupancy[int(y_ind/10)][int(x_ind/10)] != 0:
# print("skipping oj spawn")
continue
self.occupancy[int(y_ind/10)][int(x_ind/10)]= 1
_p_name = f'{p_name}_{i}'
self.spawn_asset(
path,
class_name,
_p_name,
x,
y,
z,
scale,
object_scale_delta,
allow_rot,
override = override,
orign_p_name = p_name,
rot = poss_rot
)
def create_terrains(self, terrain_info):
# create the parent
omni.kit.commands.execute(
'CreatePrimWithDefaultXform',
prim_type='Xform',
prim_path='/World/t',
attributes={},
select_new_prim=True,
)
for key in terrain_info:
mesh_path = terrain_info[key].mesh_path
scale = terrain_info[key].scale
mat_path = terrain_info[key].material_path
mat_name = mat_path.split('/')[-1]
mat_name = mat_name.replace('.mdl', '')
mesh_path = mesh_path.replace('.obj', '.usd')
# spawn prim
prim_p = f'/World/t/class_{mat_name}'
# prim_p = f'/World/t/terrain{key}'
stage = omni.usd.get_context().get_stage()
scale = 1#0.01
# X SCALE SHOULD BE NEGATIVE TO FLIP IT CORRECTLY
random_rotation = 0.0
x, y, z = 0, 0, 0
add_reference_to_stage(usd_path=mesh_path, prim_path=prim_p)
self.create_material_and_bind(
mat_name, mat_path, prim_p, scale, stage
)
prim=stage.GetPrimAtPath(prim_p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(mat_name)
scale = 1#0.1
random_rotation = 0.0
x, y, z = 0, 0, 0
# stage = self.usd_context.get_stage()
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=f'/World/t',
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=f'/World/t',
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
def spawn_all(self, obs_to_spawn, object_dict, height_map, normals):
self.t_normals = normals
length = len(obs_to_spawn)
counter = 1
for key in obs_to_spawn:
obj = object_dict[key]
path = object_dict[key].usd_path
print(f"{self._o} Spawning {len(obs_to_spawn[key])} of {path}. {counter} / {length}")
class_name = obj.class_name
if class_name == '':
class_name = obj.unique_id
self.spawn_loop(
path,
class_name,
f'{obj.unique_id}_',
obs_to_spawn[key],
height_map,
scale=obj.object_scale,
object_scale_delta=obj.object_scale_delta,
allow_rot=obj.allow_y_rot,
)
print("spawned, now we wait till stage loads")
update_stage()
# print("some time should have passed")
# return
counter += 1
def generate_world_generator(self, obj_path, world_path):
print("Tring to generator worldf rom file")
(
obs_to_spawn,
object_dict,
terrain_info,
meshGen,
) =generate_world_from_file(obj_path, world_path)
height_map = meshGen._points2
self.occupancy = np.zeros((len(height_map),len(height_map)))
self.create_terrains(terrain_info)
meshGen.clean_up_files()
return obs_to_spawn, object_dict, height_map, meshGen.normals
def create_material_and_bind(
self, mat_name, mat_path, prim_path, scale, stage
):
obj_prim = stage.GetPrimAtPath(prim_path)
mtl_created_list = []
omni.kit.commands.execute(
'CreateAndBindMdlMaterialFromLibrary',
mdl_name=mat_path,
mtl_name=mat_name,
mtl_created_list=mtl_created_list,
)
mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
omni.usd.create_material_input(
mtl_prim,
'project_uvw',
True,
Sdf.ValueTypeNames.Bool,
)
omni.usd.create_material_input(
mtl_prim,
'texture_scale',
Gf.Vec2f(scale, scale),
Sdf.ValueTypeNames.Float2,
)
cube_mat_shade = UsdShade.Material(mtl_prim)
UsdShade.MaterialBindingAPI(obj_prim).Bind(
cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
)
def create_world(self, world_path, obj_path):
(
obs_to_spawn,
object_dict,
height_map,
normals
) =self.generate_world_generator(
world_path, obj_path
)
self.spawn_all(obs_to_spawn, object_dict, height_map, normals)
update_stage()
stage = omni.usd.get_context().get_stage()
self.__add_semantics_to_all2(stage)
| 13,308 | Python | 32.523929 | 99 | 0.514953 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/PerlinNoise.py | import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from perlin_numpy import (
generate_perlin_noise_2d, generate_fractal_noise_2d
)
def generate_region(threshold=0.5, shape=(256,256), region_value=1, show_plot=False) -> npt.NDArray[np.float64]:
# np.random.seed(0)
data = generate_perlin_noise_2d(shape, (8, 8))
data = (data-np.min(data))/(np.max(data)-np.min(data))
data[data < threshold] = 0
data[data >= threshold] = region_value
if show_plot:
plt.imshow(data, cmap='gray', interpolation='lanczos')
plt.colorbar()
plt.show()
return data
def generate_region2(seed = 1, threshold=0.5, shape=(256,256), region_value=1, show_plot=False) -> npt.NDArray[np.float64]:
np.random.seed(seed)
data = generate_perlin_noise_2d(shape, (8, 8))
data = (data-np.min(data))/(np.max(data)-np.min(data))
data[data < threshold] = 0
data[data >= threshold] = region_value
if show_plot:
plt.imshow(data, cmap='gray', interpolation='lanczos')
plt.colorbar()
plt.show()
return data
if __name__ == "__main__":
np.random.seed(0)
generate_region(show_plot=True)
| 1,189 | Python | 28.02439 | 123 | 0.640875 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/PoissonDisk.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def Bridson_sampling(width=1.0, height=1.0, radius=0.025, k=30):
# References: Fast Poisson Disk Sampling in Arbitrary Dimensions
# Robert Bridson, SIGGRAPH, 2007
def squared_distance(p0, p1):
return (p0[0]-p1[0])**2 + (p0[1]-p1[1])**2
def random_point_around(p, k=1):
# WARNING: This is not uniform around p but we can live with it
R = np.random.uniform(radius, 2*radius, k)
T = np.random.uniform(0, 2*np.pi, k)
P = np.empty((k, 2))
P[:, 0] = p[0]+R*np.sin(T)
P[:, 1] = p[1]+R*np.cos(T)
return P
def in_limits(p):
return 0 <= p[0] < width and 0 <= p[1] < height
def neighborhood(shape, index, n=2):
row, col = index
row0, row1 = max(row-n, 0), min(row+n+1, shape[0])
col0, col1 = max(col-n, 0), min(col+n+1, shape[1])
I = np.dstack(np.mgrid[row0:row1, col0:col1])
I = I.reshape(I.size//2, 2).tolist()
I.remove([row, col])
return I
def in_neighborhood(p):
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
if M[i, j]:
return True
for (i, j) in N[(i, j)]:
if M[i, j] and squared_distance(p, P[i, j]) < squared_radius:
return True
return False
def add_point(p):
points.append(p)
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
P[i, j], M[i, j] = p, True
# Here `2` corresponds to the number of dimension
cellsize = radius/np.sqrt(2)
rows = int(np.ceil(width/cellsize))
cols = int(np.ceil(height/cellsize))
# Squared radius because we'll compare squared distance
squared_radius = radius*radius
# Positions cells
P = np.zeros((rows, cols, 2), dtype=np.float32)
M = np.zeros((rows, cols), dtype=bool)
# Cache generation for neighborhood
N = {}
for i in range(rows):
for j in range(cols):
N[(i, j)] = neighborhood(M.shape, (i, j), 2)
points = []
add_point((np.random.uniform(width), np.random.uniform(height)))
while len(points):
i = np.random.randint(len(points))
p = points[i]
del points[i]
Q = random_point_around(p, k)
for q in Q:
if in_limits(q) and not in_neighborhood(q):
add_point(q)
points = P[M]
return points
if __name__ == '__main__':
plt.figure()
plt.subplot(1, 1, 1, aspect=1)
points = Bridson_sampling()
X = [x for (x, y) in points]
Y = [y for (x, y) in points]
plt.scatter(X, Y, s=10)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.show()
| 2,673 | Python | 29.044943 | 73 | 0.546951 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/MeshGenerator.py | import open3d as o3d
import numpy as np
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import colorsys
import asyncio
import omni.kit.asset_converter
import carb
# 0.001
# enable project uvw coordinates
class MeshGen:
def __init__(self, map_size, map_scale, regions_map, save_path) -> None:
pass
self._size = map_size
self._scale = map_scale
self._scale = 1
# REMOVE THIS NEXT LINE``
l = self._size * self._scale
self._map_shape = (self._size * self._scale, self._size * self._scale)
self._points = np.zeros(shape=(l * l, 3))
self._points2 = np.zeros(shape=(l , l))
self._noise_map_xy = None
self._faces = []
self._mesh = None
self._regions_map = cv2.resize(
regions_map,
dsize=(self._size * self._scale, self._size * self._scale),
interpolation=cv2.INTER_NEAREST,
)
self._save_path = save_path
self.meshes = []
self._o = '[MeshGenerator] '
self._files_to_clean = []
self.final_mesh_paths = []
self.final_mesh_paths_dict = {}
self.region_to_path = {}
async def convert(self, in_file, out_file, load_materials=False):
# This import causes conflicts when global
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.ignore_materials = not load_materials
converter_context.ignore_animation = True
converter_context.ignore_cameras = True
converter_context.single_mesh = True
converter_context.smooth_normals = True
# converter_context.preview_surface = False
# converter_context.support_point_instancer = False
# converter_context.embed_mdl_in_usd = False
converter_context.use_meter_as_world_unit = True
# converter_context.create_world_as_default_root_prim = False
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(
in_file, out_file, progress_callback, converter_context
)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
def cnv(self):
print(f'{self._o} Converting .obj files to .usd')
for file_path in self._files_to_clean:
new_path = file_path.replace('.obj', '.usd')
self.final_mesh_paths.append(new_path)
print(f'{self._o} Trying to convert {file_path} to {new_path}')
status = asyncio.get_event_loop().run_until_complete(
self.convert(file_path, new_path)
)
def generate_terrain_mesh(self):
self._create_noise_map()
self._compute_base_mesh()
self._save_meshes()
self.cnv()
def clean_up_files(self):
def file_exists(file_path):
return os.path.exists(file_path)
for file_path in self._files_to_clean:
if file_exists(file_path):
os.remove(file_path)
def _save_meshes(self):
print(f'{self._o} Saving meshes to folder {self._save_path}.')
for i, key in enumerate(list(self.meshes_dict.keys())):
self._files_to_clean.append(f'{self._save_path}/mesh_{i}.obj')
self._files_to_clean.append(f'{self._save_path}/mesh_{i}.usd')
self.final_mesh_paths_dict[key] = f'{self._save_path}/mesh_{i}.obj'
o3d.io.write_triangle_mesh(
filename=f'{self._save_path}/mesh_{i}.obj',
mesh=self.meshes_dict[int(key)],
compressed=False,
write_vertex_normals=True,
# write_vertex_colors=True,
# write_triangle_uvs=True,
print_progress=False,
)
def _create_noise_map(self):
scale = 5#250.0
print(f'{self._o} Creating Noise Map for terrain heights.')
# self._noise_map_xy = generate_fractal_noise_2d(
# self._map_shape, (8, 8), 5
# )
self._noise_map_xy = generate_perlin_noise_2d(
self._map_shape, (8, 8)
)
x = np.linspace(
0,
self._size * self._scale,
self._size * self._scale,
dtype=np.int32,
)
y = np.linspace(
0,
self._size * self._scale,
self._size * self._scale,
dtype=np.int32,
)
self._noise_map_xy *= scale
noise_flat = self._noise_map_xy.flatten()
X, Y = np.meshgrid(x, y)
self._points = np.column_stack(
(X.ravel(),Y.ravel(), noise_flat) # was abs::with
)
def _compute_base_mesh(self):
subdivisions = (self._size * self._scale) - 1
materials = list(np.unique(self._regions_map))
print(f"There are {len(materials)}, {materials}")
self.meshes_dict = {}
for key in materials:
self.meshes_dict[int(key)] = o3d.geometry.TriangleMesh()
print(f'{self._o} Computing the base mesh.')
self._faces = []
for j in range(subdivisions):
for i in range(subdivisions):
index = j * (subdivisions + 1) + i
face1 = [index, index + 1, index + subdivisions + 2]
face2 = [
index,
index + subdivisions + 2,
index + subdivisions + 1,
]
self._faces.append(face1)
self._faces.append(face2)
res_ind = int(self._regions_map[j,i])
self.meshes_dict[res_ind].triangles.append(face1)
self.meshes_dict[res_ind].triangles.append(face2)
self._mesh = o3d.geometry.TriangleMesh()
self._mesh.vertices = o3d.utility.Vector3dVector(self._points)
self._mesh.triangles = o3d.utility.Vector3iVector(
np.array(self._faces)
)
self._mesh.paint_uniform_color([1, 0.706, 0])
self._mesh.compute_vertex_normals()
self._mesh = self._mesh.compute_vertex_normals()
self._mesh = self._mesh.remove_unreferenced_vertices()
self._mesh = self._mesh.remove_duplicated_vertices()
self.normals = self._mesh.triangle_normals
l = self._scale * self._size
for i in range(len(self._mesh.vertices)):
ind = np.unravel_index(i, (l, l))
self._points2[ind] = self._mesh.vertices[i][2]
N = len(materials)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
for i, key in enumerate(list(self.meshes_dict.keys())):
self.meshes_dict[key].vertices = self._mesh.vertices
self.meshes_dict[key].vertex_normals = self._mesh.vertex_normals
self.meshes_dict[key] = self.meshes_dict[
key
].remove_unreferenced_vertices()
self.meshes_dict[key].paint_uniform_color(RGB_tuples[i])
self.meshes_dict[key] = self.meshes_dict[
key
].compute_vertex_normals()
self.meshes_dict[key] = self.meshes_dict[
key
].compute_triangle_normals()
print(np.array(self.meshes_dict[key].triangle_normals))
| 7,745 | Python | 33.735426 | 79 | 0.559458 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/AreaMaskGenerator.py | """
This module handles area and point generation.
"""
from .MeshGenerator import MeshGen
# import omni.kit.commands
import json
import numpy as np
import numpy.typing as npt
import tempfile
from . import PoissonDisk
import matplotlib.colors
from . import PerlinNoise
import matplotlib.pyplot as plt
from typing import Tuple
from pxr import Usd, Sdf, Gf
def append_inside_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a new mask that is only within the first mask
"""
mask_indices = np.where((area_to_add >= area_value) & (area != 0))
area2 = np.copy(area)
area2[mask_indices] = area_value # area_value
return area2
def append_to_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a mask appended to another one
"""
mask_indices = np.where(area_to_add >= area_value)
area[mask_indices] = area_value
return area
def show_plot(area):
cvals = [0, 1, 2, 3, 4]
colors = ['lightgreen', 'green', 'yellow', 'brown', 'red']
norm = plt.Normalize(min(cvals), max(cvals))
tuples = list(zip(map(norm, cvals), colors))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', tuples)
plt.imshow(area, cmap=cmap, norm=norm)
plt.colorbar()
plt.show()
def fill_area(
area: npt.NDArray[np.float64],
size: int,
region_value: int,
object_value: int,
) -> Tuple[npt.NDArray[np.float64], list]:
# Generate points and fill the area with objects using Poisson
points = PoissonDisk.Bridson_sampling(
width=area.shape[0], height=area.shape[1], radius=size, k=30
)
new_points = []
for p in points:
x_int = int(p[0])
y_int = int(p[1])
if area[y_int][x_int] == region_value:
# area[y_int][x_int] = object_value
new_points.append(p)
return area, new_points
class ObjectPrim:
def __init__(
self,
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
) -> None:
self.object_scale = scale
self.object_scale_delta = scale_delta
self.allow_y_rot = y_rot
self.unique_id = u_id
self.usd_path = usd_path
self.class_name = class_name
self.poisson_size = poisson_size
def __str__(self) -> str:
return f"""
{self.unique_id}
scale: {self.object_scale} +/- {self.object_scale_delta}
allow y rot: {self.allow_y_rot}
poisson size: {self.poisson_size}
class name: {self.class_name}
usd path: {self.usd_path}
"""
pass
class TerrainPrim:
def __init__(self, mesh_path, mat_path, scale=0.001) -> None:
self.mesh_path = mesh_path
self.material_path = mat_path
self.scale = scale
class WorldHandler:
def __init__(self, world_path, object_path) -> None:
# self.objects = []
self.objects_dict = {}
self._object_path = object_path
self._world_path = world_path
self.objects_to_spawn = {}
self._WORLD_TO_POISSON_SCALE = 1.6
def _read_objects(self):
with open(self._object_path, 'r+') as infile:
data = json.load(infile)
# print(data)
for key in data:
scale = data[key]['object_scale']
scale_delta = data[key]['object_scale_delta']
y_rot = data[key]['allow_y_rot']
u_id = key
usd_path = data[key]['usd_path']
class_name = data[key]['class_name']
poisson_size = data[key]['poisson_size']
tmp = ObjectPrim(
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
)
# self.objects.append(tmp)
self.objects_dict[u_id] = tmp
# for i in self.objects:
# print(i)
def _read_world(self):
# print("here")
self.objects_to_spawn = {}
data = None
objs_per_region = {}
with open(self._world_path, 'r+') as infile:
data = json.load(infile)
if data != None:
n = data['size']
arr = np.zeros((n, n))
total_arr = np.zeros((n, n))
regions = data['regions']
terrain_info = {}
# print( " == ", np.unique(total_arr))
for region_id in regions:
region_id = str(region_id)
terrain_info[region_id] = TerrainPrim(
'',
regions[region_id]['material_path'],
regions[region_id]['material_scale'],
)
# print("terrrain info key type ", type(region_id))
new_arr = PerlinNoise.generate_region2(
seed=int(region_id),
shape=(n, n),
threshold=float(regions[region_id]['threshold']),
show_plot=False,
region_value=int(region_id),
)
arr = append_to_area(arr, new_arr, int(region_id))
total_arr = arr
# handle objects in the zone
objs = regions[region_id]['objects']
objs_per_region[region_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[str(obj_uid)]
objs_per_region[region_id].append(object_prim)
# now we need to deal with sub zones
zones = regions[region_id]['zones']
for zone_id in zones:
terrain_info[str(zone_id)] = TerrainPrim(
'',
zones[zone_id]['material_path'],
zones[zone_id]['material_scale'],
)
new_arr = PerlinNoise.generate_region2(
seed=int(zone_id),
shape=(n, n),
threshold=float(zones[zone_id]['threshold']),
show_plot=False,
region_value=int(zone_id),
)
zone_to_save = append_inside_area(
arr, new_arr, int(zone_id)
)
# print("zone == ", zone_id, " ", zone_id)
total_arr = zone_to_save
objs = zones[zone_id]['objects']
objs_per_region[zone_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[obj_uid]
objs_per_region[zone_id].append(object_prim)
for key in objs_per_region:
obs = objs_per_region[key]
if len(obs) > 0:
for obj in obs:
print(f"{key} has poisson of size {obj.poisson_size} which ends up being {obj.poisson_size / self._WORLD_TO_POISSON_SCALE}")
area, coords = fill_area(
total_arr,
obj.poisson_size / self._WORLD_TO_POISSON_SCALE,
int(key),
999,
)
self.objects_to_spawn[obj.unique_id] = coords
return total_arr, n, terrain_info
def generate_world_from_file(world_path, object_path):
print("creating world handler")
world = WorldHandler(world_path, object_path)
print("reading objects")
world._read_objects()
print("reading world")
res = world._read_world()
mesh_scale = 10
terrain_mesh_paths = []
if res:
region_map, map_size, terrain_info = res
# print(" ------- ")
# print(map_size, 10, region_map.shape)
# print(set(region_map.flatten()))
# unique, counts = np.unique(region_map, return_counts=True)
# print(dict(zip(unique, counts)))
# return None
m_path = tempfile.gettempdir()#'C:/Users/jonem/Documents/Kit/apps/Isaac-Sim/exts/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG'
meshGen = MeshGen(map_size, mesh_scale, region_map, m_path)
meshGen.generate_terrain_mesh()
regs = list(np.unique(region_map))
for key in terrain_info:
print(key)
if float(key) in regs:
terrain_info[
str(key)
].mesh_path = meshGen.final_mesh_paths_dict[int(key)]
print(
f'[AreaMaskGenerator] All terrain infos updated. Passing data back to main sample to genereate objects and load the terrain in.'
)
return (
world.objects_to_spawn,
world.objects_dict,
terrain_info,
meshGen,
) # ._points2#_noise_map_xy
return world.objects_to_spawn, world.objects_dict, None, None
| 9,469 | Python | 31.655172 | 148 | 0.50829 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/DataFormatter.py | import numpy as np
import open3d as o3d
import numpy as np
import glob
import random
from tqdm import tqdm
id = 100
from pathlib import Path
import sys
def vis_pc(pc):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
o3d.visualization.draw_geometries([pcd])
distance = 200
data_path = "/home/jon/Desktop/SparseNewDS"
folder_path = f"{data_path}/velodyne/"
core_path="/home/jon/Documents/Datasets/SparseFinal/sequences/"
def fix_orb_around_pc(data , distance):
# print(" ================= ")
start = np.array([0.0,0.0,0.0])
points =np.concatenate(data,axis=0)
# print(f"Points before: {len(points)}")
new_arr = []
indexes_to_remove = []
for i,point in enumerate(points):
dist = np.linalg.norm(start - point)
# print(dist)
if dist<distance:
new_arr.append(point)
else:
indexes_to_remove.append(i)
# print(f"Points after: {len(new_arr)}")
limit = 4096*5
if len(new_arr) < limit:
print("array too small")
return np.array(new_arr), indexes_to_remove
# load mappings
# map_dict = {}
# print(map_dict)
# sys.exit()
#
# {1: 'Asphalt', 3: 'Carpet_Beige', 2: 'Carpet_Pattern_Squares_Multi', 4: 'ladder', 5: 'sofa', 6: 'table', 7: 'tree'}
class_names = {
0:"unlabled",
1:"ground",
2:"tree",
3:"vegetation",
4:"ladder",
5:"sofa",
6:"table",
7:"bicycle",
8:"pole",
9:"fence",
}
class_to_id_remap= {
"Grass_Countryside": 1,
"Leaves" : 2,
"Carpet_Pattern_Squares_Multi":1,
"tree":2,
"vegetation":3,
"Asphalt": 1,
"Carpet_Beige":1,
"ladder":4,
"sofa":5,
"table":6,
"bicycle":7,
"fence":9,
"pole":8,
"sign":8,
}
computed_remap = {}
mappings = np.load(f"{data_path}/mapping.npy", allow_pickle=True)
print(mappings)
# print(np.unique(mappings,axis=2))
unique_dict = {}
for row in mappings:
unique_dict[row[3]] = row[2]
print(unique_dict)
# sys.exit()
for tup in mappings:
current_val = tup[2]
class_name = tup[3]
real_class_val = class_to_id_remap[class_name]
computed_remap[current_val] = real_class_val
print("Computed remap")
print(computed_remap)
mapping = computed_remap
# {3: 'Grass_Countryside', 1: 'Leaves', 2: 'Carpet_Pattern_Squares_Multi', 4: 'tree', 5: 'vegetation'}
Path(core_path+"00/velodyne").mkdir(parents=True, exist_ok=True)
Path(core_path+"00/labels").mkdir(parents=True, exist_ok=True)
txtfiles = glob.glob(f"{folder_path}/*.npy")
txtfiles = sorted(txtfiles)
num_files = len(txtfiles)
num_seq = 8
num_files_per_seq = int(num_files/num_seq)
seq_id = 0
seq_id_addresses = []
count = 0
pcs_removed = 0
for seq_id in range(num_seq):
Path(core_path+f"{seq_id:02d}/velodyne/").mkdir(parents=True, exist_ok=True)
Path(core_path+f"{seq_id:02d}/labels/").mkdir(parents=True, exist_ok=True)
seq_id_addresses.append(0)
for file in tqdm(txtfiles):
id_name = file.split("/")[-1]
data = np.load(file)
# print(data, len(data))
if len(data) == 0:
# print("data too small")
pcs_removed +=1
continue
# now handle the labels
labels = np.load(f"{data_path}/velodyneLabels/{id_name}")
labels = np.concatenate(labels,axis=0)
if len(labels) == 0:
continue
k = np.array(list(mapping.keys()))
v = np.array(list(mapping.values()))
out= np.zeros_like(labels)
for key,val in zip(k,v):
out[labels==key] = val
labels = out
original_pc, inds_to_remove = fix_orb_around_pc(data, distance)
print(original_pc)
print(original_pc.shape)
# vis_pc(original_pc)
labels = np.delete(labels, inds_to_remove)
mu, sigma = 0, 0.1
noise = np.random.normal(mu, sigma, [original_pc.shape[0],original_pc.shape[1]])
noisified_pc = original_pc + noise
# vis_pc(noisified_pc)
limit = 4096*5
if noisified_pc.shape[0] <= limit:
pcs_removed += 1
continue
# print(noisified_pc)
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(noisified_pc)
# o3d.visualization.draw_geometries([pcd])
seq_id +=1
if seq_id >= num_seq:
seq_id = 0
# sys.exit()
"""
if count >= num_files_per_seq:
seq_id+=1
count = 0
Path(core_path+f"{seq_id:02d}/velodyne/").mkdir(parents=True, exist_ok=True)
Path(core_path+f"{seq_id:02d}/labels/").mkdir(parents=True, exist_ok=True)
"""
id_name = count
id_name = seq_id_addresses[seq_id]
seq_id_addresses[seq_id]+=1
np.save(f"{core_path}{seq_id:02d}/velodyne/{id_name}",noisified_pc)
np.save(f"{core_path}{seq_id:02d}/labels/{id_name}",labels)
count +=1
print(f"removed {pcs_removed} files")
print(f"kept {count}")
"""
re_map_natural = {0:0,
1:2, # leaves to tree
2:1, #carpet to ground
3:1, #grass ground to ground
4:2, #tree to tree
5:3 # veg to veg
}
re_map_manmade = {
1:1, # asphalt to ground
2:1, # carpet to ground
3:1,#carpet to ground
4:4,# ladder to ladder
5:5,# sofa to sofa
6:6,#Table to table
7:2,# tree to tree
}
mapping= re_map_natural
mapping = re_map_manmade
"""
| 5,345 | Python | 25.73 | 117 | 0.592891 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/vis_results.py |
import pptk
from os.path import join
import numpy as np
import os, argparse, pickle
import open3d as o3d
import yaml
from os.path import exists, join, isfile, dirname, abspath
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import sys
np.set_printoptions(threshold=sys.maxsize)
# from main_SemanticKITTI import SemanticKITTI
import sys
def rgba2rgb( rgba, background=(255,255,255) ):
row, col, ch = rgba.shape
if ch == 3:
return rgba
assert ch == 4, 'RGBA image has 4 channels.'
rgb = np.zeros( (row, col, 3), dtype='float32' )
r, g, b, a = rgba[:,:,0], rgba[:,:,1], rgba[:,:,2], rgba[:,:,3]
a = np.asarray( a, dtype='float32' ) / 255.0
R, G, B = background
rgb[:,:,0] = r * a + (1.0 - a) * R
rgb[:,:,1] = g * a + (1.0 - a) * G
rgb[:,:,2] = b * a + (1.0 - a) * B
return np.asarray( rgb, dtype='uint8' )
def process_clouds(pc, cols):
print("begin proc")
pc_out = []
cols_out = []
for x in range(len(pc)):
for y in range(len(pc[x])):
pc_out.append(pc[x][y])
cols_out.append((cols[x][y][0]/255,cols[x][y][1]/255,cols[x][y][2]/255))
return np.array(pc_out), np.array(cols_out)
def process_clouds2(pc, cols):
print("begin proc")
pc_out = []
cols_out = []
for x in range(len(pc)):
for y in range(len(pc[x])):
pc_out.append(pc[x][y])
cols_out.append(cols[x][y])
return np.array(pc_out), np.array(cols_out)
def lin_col_to_norm(cols):
print("linear: ", cols.shape[0])
new_cols = []
for i in range(0,cols.shape[0],4):
new_cols.append((cols[i]/255,cols[i+1]/255,cols[i+2]/255))
return np.array(new_cols)
if __name__ == '__main__':
COLOR_MAP = {
0: (0, 0, 0),
1: (245, 150, 100),
2: (245, 230, 100),
3: (150, 60, 30),
4: (180, 30, 80),
5: (255, 0., 0),
6: (30, 30, 255),
7: (200, 40, 255),
8: (90, 30, 150),
9: (255, 0, 255),
10: (255, 150, 255),
11: (75, 0, 75),
12: (75, 0., 175),
13: (0, 200, 255),
14: (50, 120, 255),
15: (0, 175, 0),
16: (0, 60, 135),
17: (80, 240, 150),
18: (150, 240, 255),
19: (0, 0, 255),
}
for label in COLOR_MAP:
COLOR_MAP[label] = tuple(val/255 for val in COLOR_MAP[label])
seq_id = sys.argv[1:][0]
file_id = sys.argv[1:][1]
full_pc_path = f"{seq_id}/full_pointcloud/{file_id}.npy"
colour_path = f"{seq_id}/rgb/{file_id}.png"
full_pc_path = "_pc.npy"
colour_path = "_sem.npy"
im = np.load(colour_path, allow_pickle=True)
full_pc = np.load(full_pc_path,allow_pickle=True)
full_pc, im = process_clouds2(full_pc, im)
pcd = o3d.geometry.PointCloud()
colors = [COLOR_MAP[clr] for clr in im]
pcd.points = o3d.utility.Vector3dVector(full_pc)
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([pcd])
sys.exit()
| 3,018 | Python | 26.953703 | 84 | 0.541087 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/main.py | from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
print('starting')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# plot function is created for
# plotting the graph in
# tkinterim window
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from PCG import AreaMaskGenerator
from PCG import PerlinNoise
def plot():
# the figure that will contain the plot
fig = Figure(figsize=(5, 5), dpi=100)
# adding the subplot
plot1 = fig.add_subplot(111)
n = 256
forrest_region = PerlinNoise.generate_region(
shape=(n, n), threshold=0.5, show_plot=False
)
# plotting the graph
plot1.imshow(forrest_region)
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(fig, master=right_frame)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack() # grid(row=0,column=0, padx=5, pady=5)
# placing the toolbar on the Tkinter window
# canvas.get_tk_widget().pack() # grid(row=0,column=0, padx=5, pady=5)
# Creating Toolbar using Matplotlib
toolbar = NavigationToolbar2Tk(canvas, right_frame)
toolbar.update()
canvas.get_tk_widget().pack()
# button that displays the plot
# plot_button = Button(master = window,
# command = plot,
# height = 2,
# width = 10,
# text = "Plot")
# place the button
# in main window
# plot_button.pack()
def draw_main_menu(m_window):
load_create_base_window_button = Button(
master=m_window,
command=plot,
height=2,
width=18,
text='Create Base Grid',
)
load_create_base_window_button.grid(row=0, column=0, padx=5, pady=5)
# the main Tkinter window
window = Tk()
root = window
# setting the title
window.title('Main Menu')
# dimensions of the main window
window.geometry('800x800')
left_frame = Frame(root, width=200, height=400, bg='grey')
left_frame.grid(row=0, column=0, padx=10, pady=5)
right_frame = Frame(root, width=650, height=400, bg='grey')
right_frame.grid(row=0, column=1, padx=10, pady=5)
draw_main_menu(window)
# run the gui
window.mainloop()
| 2,416 | Python | 22.930693 | 75 | 0.66846 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/test.py | import tkinter as tk
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
print('starting')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# plot function is created for
# plotting the graph in
# tkinterim window
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from PCG import AreaMaskGenerator
from PCG import PerlinNoise
from PCG.AreaMaskGenerator import ObjectPrim, WorldHandler
# def load_objects():
# # Code for the "Load Objects" page
# print('Load Objects page')
#
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
import numpy as np
from matplotlib import colors
cbar = None
import json
import tkinter.ttk as ttk
from tkinter.filedialog import askopenfilename, askdirectory
class EntryInfo:
def __init__(self, name, threshold):
self.name = name
self.threshold = threshold
self.identifier = None
self.color = None
self.in_region = None
self.objects_in_zone = []
self.is_region = True
self.material_path = None
self.material_scale = None
def get_objs_as_str(self):
return ''.join(self.objects_in_zone)
worldHandler = WorldHandler(',', '')
def load_objects():
# Code for the "Load Objects" page
print('Load Objects page')
filename = askopenfilename()
worldHandler._object_path = filename
worldHandler._read_objects()
def create_regions():
# Code for the "Create Regions" page
print('Create Regions page')
# Create a new window for the "Create Regions" page
regions_window = tk.Toplevel()
regions_window.title('Create Regions')
# First column: List of entries with delete buttons
entries_frame = tk.Frame(regions_window)
entries_frame.grid(row=0, column=0, padx=10, pady=10, sticky='nsew')
yscrollbar = Scrollbar(entries_frame)
yscrollbar.pack(side=RIGHT, fill=Y)
entries_label = tk.Label(entries_frame, text='Entries:')
entries_label.pack()
options = ['Option 1', 'Option 2', 'Option 3', 'Option 4', 'Option 5']
options = [str(i) for i in range(100)]
selected_items = []
# List to store entry objects
entry_list = []
def write_data():
global n
data = {}
for entry in entry_list:
if entry.is_region:
data[entry.identifier] = {}
data[entry.identifier]['objects'] = entry.objects_in_zone
data[entry.identifier]['zones'] = {}
data[entry.identifier]['threshold'] = entry.threshold
data[entry.identifier]['material_path'] = entry.material_path
data[entry.identifier]['material_scale'] =entry.material_scale
else:
# we are in a zone - get the region we are in
id = int(entry.in_zone)
print(id)
# if not data[id]["zones"][entry.identifier]:
if not id in data.keys():
data[id]['zones'][entry.identifier] = {}
if not entry.identifier in data[id]['zones'].keys():
data[id]['zones'][entry.identifier] = {}
data[id]['zones'][entry.identifier][
'objects'
] = entry.objects_in_zone
data[id]['zones'][entry.identifier][
'threshold'
] = entry.threshold
data[id]['zones'][entry.identifier]['material_path'] = entry.material_path
data[id]['zones'][entry.identifier]['material_scale'] =entry.material_scale
# json.dump(data)
full_data = {}
full_data['seed'] = 0
full_data['regions'] = data
full_data['size'] = n
folder_path = askdirectory()
with open(f'{folder_path}/worlddata2.json', 'w') as f:
json.dump(full_data, f)
print(full_data)
# Function to delete an entry from the list
def delete_entry(entry, index):
entry.destroy()
entry_list.pop(index)
update_identifiers()
update_plot()
def update_identifiers():
for i, entry_info in enumerate(entry_list):
entry_info.identifier = i + 1
def add_entry():
name = input_entry1.get()
threshold = input_entry2.get()
parent_zone = input_entry3.get()
mat_path = input_entry_mat_path.get()
mat_scale = input_entry_mat_scale.get()
if name and threshold:
entry_frame = tk.Frame(entries_frame)
entry_frame.pack(anchor='w')
entry_info = EntryInfo(name, threshold)
entry_info.material_path = mat_path
entry_info.material_scale = mat_scale
for i in listbx.curselection():
entry_info.objects_in_zone.append(listbx.get(i))
entry_info.identifier = len(entry_list) #+ 1
id = entry_info.identifier
entry_info.color = generate_random_color()
if parent_zone != '':
entry_info.in_zone = parent_zone
entry_info.is_region = False
else:
entry_info.in_zone = 0
parent_zone = 0
entry_list.append(entry_info)
entry_label = tk.Label(
entry_frame,
text=f'ID: {id}, Name: {name}, Threshold: {threshold}, parent zone: {parent_zone}, objects: {entry_info.get_objs_as_str()}',
fg=entry_info.color,
)
entry_label.pack(side='left')
delete_button = tk.Button(
entry_frame,
text='Delete',
command=lambda entry=entry_frame, index=len(
entry_list
) - 1: delete_entry(entry, index),
)
delete_button.pack(side='left')
# entries_listbox.insert(
# tk.END, f'Name: {name}, Threshold: {threshold}'
# )
input_entry1.delete(0, tk.END)
input_entry2.delete(0, tk.END)
input_entry3.delete(0, tk.END)
update_plot()
def update_plot():
# fig.clear()
global cbar
global n
cbar.remove()
ax.clear()
arr = np.zeros((n, n))
past_id = 0
for entry in entry_list:
print(
'identigier ',
entry.identifier,
' in int form ',
int(entry.identifier),
)
print('base array')
print(arr)
# check the parent zone. if it is not 0 we need to generate it inside this zone
# we want to keep both tho.
# the inside zone one must not completely overwite the parent REGION
# in this case we dont add it to the main array we just perfrom the calculation and save it
new_arr = PerlinNoise.generate_region2(
seed=int(entry.identifier),
shape=(n, n),
threshold=float(entry.threshold),
show_plot=False,
region_value=int(entry.identifier),
)
print('new array')
print(new_arr)
# This zone will be saved and used later
if entry.in_zone != 0:
zone_to_save = AreaMaskGenerator.append_inside_area(
arr, new_arr, int(entry.identifier)
)
arr = zone_to_save
else:
print('Adding region to general area')
arr = AreaMaskGenerator.append_to_area(
arr, new_arr, int(entry.identifier)
)
i = ax.imshow(arr)
cbar = fig.colorbar(i)
cbar_ticks = [
int(e.identifier) for e in entry_list
] # np.linspace(0.0, 1.0, num=6, endpoint=True)
cbar.set_ticks(cbar_ticks)
cbar.draw_all()
# ax.bar(x, y, color=colors)
# ax.set_xlabel('Entry')
# ax.set_ylabel('Threshold')
canvas.draw()
def extract_regions(arr):
regions = []
for entry in entry_list:
if int(entry.in_zone) == 0:
# remove all the non identifier values in the array and save it
mask_indices = np.where(arr != int(entry.identifier))
area2 = np.copy(arr)
area2[mask_indices] = 0 # area_value
regions.append(area2)
def generate_random_color():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
color = f'#{r:02x}{g:02x}{b:02x}'
return color
# Second column: Text entries and a button
inputs_frame = tk.Frame(regions_window)
inputs_frame.grid(row=0, column=1, padx=10, pady=10, sticky='nsew')
input_label1 = tk.Label(inputs_frame, text='Name:')
input_label1.pack()
input_entry1 = tk.Entry(inputs_frame)
input_entry1.pack()
input_label2 = tk.Label(inputs_frame, text='Threshold:')
input_label2.pack()
input_entry2 = tk.Entry(inputs_frame)
input_entry2.pack()
input_label3 = tk.Label(inputs_frame, text='In zone ID:')
input_label3.pack()
input_entry3 = tk.Entry(inputs_frame)
input_entry3.pack()
input_label_mat_path = tk.Label(inputs_frame, text='Material Path')
input_label_mat_path.pack()
input_entry_mat_path = tk.Entry(inputs_frame)
input_entry_mat_path.pack()
input_label_mat_scale = tk.Label(inputs_frame, text='Material Scale')
input_label_mat_scale.pack()
input_entry_mat_scale = tk.Entry(inputs_frame)
input_entry_mat_scale.pack()
# process_button = tk.Button(
# inputs_frame, text='Add Entry', command=add_entry
# )
# process_button.pack()
# separator = ttk.Separator(inputs_frame, orient='horizontal')
# separator.pack(fill='x')
ttk.Label(inputs_frame, text='Add objects to zone').pack()
input_label4 = tk.Label(inputs_frame, text='Add to zone with the ID of:')
input_label4.pack()
# input_entry4 = tk.Entry(inputs_frame)
# input_entry4.pack()
# combobox = ttk.Combobox(
# inputs_frame,
# values=options,
# width=25,
# state='readonly',
# justify='left',
# selectmode="multiple",
# )
# combobox.set('Select Options')
# combobox.pack(padx=10, pady=10)
yscrollbar = Scrollbar(inputs_frame)
yscrollbar.pack(side=RIGHT, fill=Y)
listbx = Listbox(
inputs_frame, selectmode='multiple', yscrollcommand=yscrollbar.set
)
listbx.pack(padx=10, pady=10, expand=YES, fill='both')
x = []
for item in worldHandler.objects:
x.append(item.unique_id)
for each_item in range(len(x)):
listbx.insert(END, x[each_item])
listbx.itemconfig(each_item, bg='white')
yscrollbar.config(command=listbx.yview)
process_button = tk.Button(
inputs_frame, text='Add entry', command=add_entry
)
process_button.pack()
# Third column: Empty column
third_column_frame = tk.Frame(regions_window)
third_column_frame.grid(row=0, column=2, padx=10, pady=10, sticky='nsew')
save_all_button = tk.Button(
inputs_frame, text='save all', command=write_data
)
save_all_button.pack()
# Example Matplotlib plot
fig, ax = plt.subplots()
canvas = FigureCanvasTkAgg(fig, master=third_column_frame)
canvas.get_tk_widget().pack()
global n
global cbar
arr = np.zeros((n, n))
i = ax.imshow(arr)
cbar = plt.colorbar(i)
cbar.ax.set_autoscale_on(True)
def create_zones():
# Code for the "Create Zones" page
print('Create Zones page')
def main_page():
main_window = tk.Tk()
main_window.title('Main Window')
load_objects_button = tk.Button(
main_window, text='Load Objects', command=load_objects
)
load_objects_button.pack()
input_sizelabel= tk.Label(main_window, text='World Size:')
input_sizelabel.pack()
input_sizeentry = tk.Entry(main_window)
input_sizeentry.pack()
def size():
global n
n = int(input_sizeentry.get())
if not n or n < 0:
n = 256
set_size_button = tk.Button(
main_window, text='set size', command=size
)
set_size_button.pack()
create_regions_button = tk.Button(
main_window, text='Create Regions', command=create_regions
)
create_regions_button.pack()
# create_zones_button = tk.Button(
# main_window, text='Create Zones', command=create_zones
# )
# create_zones_button.pack()
main_window.mainloop()
if __name__ == '__main__':
main_page()
| 12,927 | Python | 29.92823 | 140 | 0.580104 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/EnvCreator.py | import tkinter as tk
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
from PCG import PoissonDisk
import matplotlib.colors
from PCG import PerlinNoise
import matplotlib.pyplot as plt
from typing import Tuple
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
# from PCG import AreaMaskGenerator
from PCG import PerlinNoise
# from PCG.AreaMaskGenerator import ObjectPrim, WorldHandler
from PCG.worldUtils import WorldHandler,ObjectPrim
import numpy.typing as npt
# def load_objects():
# # Code for the "Load Objects" page
# print('Load Objects page')
#
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
import numpy as np
from matplotlib import colors
import json
import tkinter.ttk as ttk
from tkinter.filedialog import askopenfilename, askdirectory, asksaveasfile
class EntryInfo:
def __init__(self, name, threshold):
self.name = name
self.threshold = threshold
self.identifier = None
self.color = None
self.in_region = None
self.objects_in_zone = []
self.is_region = True
self.material_path = None
self.material_scale = None
def get_objs_as_str(self):
return ''.join(self.objects_in_zone)
def append_inside_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a new mask that is only within the first mask
"""
mask_indices = np.where((area_to_add >= area_value) & (area != 0))
area2 = np.copy(area)
area2[mask_indices] = area_value # area_value
return area2
def append_to_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a mask appended to another one
"""
mask_indices = np.where(area_to_add >= area_value)
area[mask_indices] = area_value
return area
def show_plot(area):
cvals = [0, 1, 2, 3, 4]
colors = ['lightgreen', 'green', 'yellow', 'brown', 'red']
norm = plt.Normalize(min(cvals), max(cvals))
tuples = list(zip(map(norm, cvals), colors))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', tuples)
plt.imshow(area, cmap=cmap, norm=norm)
plt.colorbar()
plt.show()
def fill_area(
area: npt.NDArray[np.float64],
size: int,
region_value: int,
object_value: int,
) -> Tuple[npt.NDArray[np.float64], list]:
# Generate points and fill the area with objects using Poisson
points = PoissonDisk.Bridson_sampling(
width=area.shape[0], height=area.shape[1], radius=size, k=30
)
new_points = []
for p in points:
x_int = int(p[0])
y_int = int(p[1])
if area[y_int][x_int] == region_value:
# area[y_int][x_int] = object_value
new_points.append(p)
return area, new_points
class EnvTool:
def __init__(self) -> None:
self.worldHandler = WorldHandler(',', '')
self.size =256
self.seed = 0
self.cbar = None
def load_objects(self):
# Code for the "Load Objects" page
print('Load Objects page')
filename = askopenfilename()
self.worldHandler._object_path = filename
self.worldHandler._read_objects()
def write_data(self):
data = {}
entry_list = self.entry_list
for entry in entry_list:
if entry.is_region:
data[entry.identifier] = {}
data[entry.identifier]['objects'] = entry.objects_in_zone
data[entry.identifier]['zones'] = {}
data[entry.identifier]['threshold'] = entry.threshold
data[entry.identifier]['material_path'] = entry.material_path
data[entry.identifier]['material_scale'] =entry.material_scale
else:
# we are in a zone - get the region we are in
id = int(entry.in_zone)
print(id)
# if not data[id]["zones"][entry.identifier]:
if not id in data.keys():
data[id]['zones'][entry.identifier] = {}
if not entry.identifier in data[id]['zones'].keys():
data[id]['zones'][entry.identifier] = {}
data[id]['zones'][entry.identifier][
'objects'
] = entry.objects_in_zone
data[id]['zones'][entry.identifier][
'threshold'
] = entry.threshold
data[id]['zones'][entry.identifier]['material_path'] = entry.material_path
data[id]['zones'][entry.identifier]['material_scale'] =entry.material_scale
# json.dump(data)
full_data = {}
full_data['seed'] = self.seed
full_data['regions'] = data
full_data['size'] = self.size
# folder_path = askdirectory()
files = [('json', "*.json")]
folder_path = asksaveasfile(filetypes=files,defaultextension=files)
print(folder_path)
folder_path = folder_path.name
with open(f'{folder_path}', 'w') as f:
json.dump(full_data, f)
print(full_data)
# Function to delete an entry from the list
def delete_entry(self, entry, index):
entry.destroy()
self.entry_list.pop(index)
self.update_identifiers()
self.update_plot()
def update_identifiers(self):
for i, entry_info in enumerate(self.entry_list):
entry_info.identifier = i + 1
def add_entry(self):
name = self.input_entry1.get()
threshold = self.input_entry2.get()
parent_zone = self.input_entry3.get()
mat_path = self.input_entry_mat_path.get()
mat_scale = self.input_entry_mat_scale.get()
if name and threshold:
self.entry_frame = tk.Frame(self.entries_frame)
self.entry_frame.pack(anchor='w')
self.entry_info = EntryInfo(name, threshold)
self.entry_info.material_path = mat_path
self.entry_info.material_scale = mat_scale
for i in self.listbx.curselection():
self.entry_info.objects_in_zone.append(self.listbx.get(i))
self.entry_info.identifier = len(self.entry_list) + 1
id = self.entry_info.identifier
self.entry_info.color = "BLACK"#generate_random_color()
if parent_zone != '':
self.entry_info.in_zone = parent_zone
self.entry_info.is_region = False
else:
self.entry_info.in_zone = 0
parent_zone = 0
self.entry_list.append(self.entry_info)
self.entry_label = tk.Label(
self.entry_frame,
text=f'ID: {id}, Name: {name}, Threshold: {threshold}, parent zone: {parent_zone}, objects: {self.entry_info.get_objs_as_str()}',
fg=self.entry_info.color,
)
self.entry_label.pack(side='left')
self.delete_button = tk.Button(
self.entry_frame,
text='Delete',
command=lambda entry=self.entry_frame, index=len(
self.entry_list
) - 1: self.delete_entry(entry, index),
)
self.delete_button.pack(side='left')
# entries_listbox.insert(
# tk.END, f'Name: {name}, Threshold: {threshold}'
# )
self.input_entry1.delete(0, tk.END)
self.input_entry2.delete(0, tk.END)
self.input_entry3.delete(0, tk.END)
self.update_plot()
def update_plot(self):
# fig.clear()
self.cbar.remove()
self.ax.clear()
self.arr = np.zeros((self.size, self.size))
self.past_id = 0
for entry in self.entry_list:
print(
'identigier ',
entry.identifier,
' in int form ',
int(entry.identifier),
)
# check the parent zone. if it is not 0 we need to generate it inside this zone
# we want to keep both tho.
# the inside zone one must not completely overwite the parent REGION
# in this case we dont add it to the main array we just perfrom the calculation and save it
print("here")
print(self.size, entry.threshold)
self.new_arr = PerlinNoise.generate_region2(
seed=int(entry.identifier),
shape=(self.size, self.size),
threshold=float(entry.threshold),
show_plot=False,
region_value=int(entry.identifier),
)
# This zone will be saved and used later
if entry.in_zone != 0:
self.zone_to_save = append_inside_area(
self.arr, self.new_arr, int(entry.identifier)
)
self.arr = self.zone_to_save
else:
print('Adding region to general area')
self.arr =append_to_area(
self.arr, self.new_arr, int(entry.identifier)
)
self.i = self.ax.imshow(self.arr)
self.cbar = self.fig.colorbar(self.i)
cbar_ticks = [
int(e.identifier) for e in self.entry_list
] # np.linspace(0.0, 1.0, num=6, endpoint=True)
self.cbar.set_ticks(cbar_ticks)
self.cbar.draw_all()
# ax.bar(x, y, color=colors)
# ax.set_xlabel('Entry')
# ax.set_ylabel('Threshold')
self.canvas.draw()
def create_regions(self):
# Code for the "Create Regions" page
print('Create Regions page')
# Create a new window for the "Create Regions" page
self.regions_window = tk.Toplevel()
self.regions_window.title('Create Regions')
# First column: List of entries with delete buttons
self.entries_frame = tk.Frame(self.regions_window)
self.entries_frame.grid(row=0, column=0, padx=10, pady=10, sticky='nsew')
self.yscrollbar = Scrollbar(self.entries_frame)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.entries_label = tk.Label(self.entries_frame, text='Entries:')
self.entries_label.pack()
options = ['Option 1', 'Option 2', 'Option 3', 'Option 4', 'Option 5']
options = [str(i) for i in range(100)]
self.selected_items = []
# List to store entry objects
self.entry_list = []
# Second column: Text entries and a button
self.inputs_frame = tk.Frame(self.regions_window)
self.inputs_frame.grid(row=0, column=1, padx=10, pady=10, sticky='nsew')
self.input_label1 = tk.Label(self.inputs_frame, text='Name:')
self.input_label1.pack()
self.input_entry1 = tk.Entry(self.inputs_frame)
self.input_entry1.pack()
self.input_label2 = tk.Label(self.inputs_frame, text='Threshold:')
self.input_label2.pack()
self.input_entry2 = tk.Entry(self.inputs_frame)
self.input_entry2.pack()
self.input_label3 = tk.Label(self.inputs_frame, text='In zone ID:')
self.input_label3.pack()
self.input_entry3 = tk.Entry(self.inputs_frame)
self.input_entry3.pack()
self.input_label_mat_path = tk.Label(self.inputs_frame, text='Material Path')
self.input_label_mat_path.pack()
self.input_entry_mat_path = tk.Entry(self.inputs_frame)
self.input_entry_mat_path.pack()
self.input_label_mat_scale = tk.Label(self.inputs_frame, text='Material Scale')
self.input_label_mat_scale.pack()
self.input_entry_mat_scale = tk.Entry(self.inputs_frame)
self.input_entry_mat_scale.pack()
ttk.Label(self.inputs_frame, text='Add objects to zone').pack()
self.input_label4 = tk.Label(self.inputs_frame, text='Add to zone with the ID of:')
self.input_label4.pack()
self.yscrollbar = Scrollbar(self.inputs_frame)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.listbx = Listbox(
self.inputs_frame, selectmode='multiple', yscrollcommand=self.yscrollbar.set
)
self.listbx.pack(padx=10, pady=10, expand=YES, fill='both')
x = []
for key in self.worldHandler.objects_dict:
x.append(key)
for each_item in range(len(x)):
self.listbx.insert(END, x[each_item])
self.listbx.itemconfig(each_item, bg='white')
self.yscrollbar.config(command=self.listbx.yview)
self.process_button = tk.Button(
self.inputs_frame, text='Add entry', command=self.add_entry
)
self.process_button.pack()
# Third column: Empty column
self.third_column_frame = tk.Frame(self.regions_window)
self.third_column_frame.grid(row=0, column=2, padx=10, pady=10, sticky='nsew')
self.save_all_button = tk.Button(
self.inputs_frame, text='save all', command=self.write_data
)
self.save_all_button.pack()
# Example Matplotlib plot
self.fig, self.ax = plt.subplots()
self.canvas = FigureCanvasTkAgg(self.fig, master=self.third_column_frame)
self.canvas.get_tk_widget().pack()
self.arr = np.zeros((self.size, self.size))
self.i = self.ax.imshow(self.arr)
self.cbar = plt.colorbar(self.i)
self.cbar.ax.set_autoscale_on(True)
def set_size(self):
print(" =========== updating size to ==============")
self.size = int(self.input_sizeentry.get())
if not self.size or self.size < 0:
self.size = 256
print(self.size)
def set_seed(self):
self.seed = int(self.input_seed_entry.get())
def main_page(self):
self.main_window = tk.Tk()
self.main_window.geometry("500x500")
self.main_window.title('Main Window')
self.load_objects_button = tk.Button(
self.main_window, text='Load Objects', command=self.load_objects
)
self.load_objects_button.pack()
self.input_sizelabel= tk.Label(self.main_window, text='World Size:')
self.input_sizelabel.pack()
self.input_sizeentry = tk.Entry(self.main_window)
self.input_sizeentry.pack()
self.set_size_button = tk.Button(
self.main_window, text='set size', command=self.set_size
)
self.set_size_button.pack()
self.input_seed_label= tk.Label(self.main_window, text='seed:')
self.input_seed_label.pack()
self.input_seed_entry = tk.Entry(self.main_window)
self.input_seed_entry.pack()
self.set_seed_button = tk.Button(
self.main_window, text='set seed', command=self.set_seed
)
self.set_seed_button.pack()
self.create_regions_button = tk.Button(
self.main_window, text='Open map creator', command=self.create_regions
)
self.create_regions_button.pack()
# create_zones_button = tk.Button(
# main_window, text='Create Zones', command=create_zones
# )
# create_zones_button.pack()
self.main_window.mainloop()
if __name__ == '__main__':
# main_page()
tool = EnvTool()
tool.main_page()
| 15,702 | Python | 32.842672 | 145 | 0.587441 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/Camera.py | import pathlib
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import numpy as np
import omni.replicator.core as rep
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from PIL import Image
class DepthCamera:
def __init__(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
image_size=(512, 512),
attach=True,
parent='/World/DepthCamera',
name='DepthCamera',
) -> None:
self.__rgb_annot: Annotator
self.__save_path = ''
self.__pos = position
self.__rot = rotation
self.__image_size = image_size
self.__attach = attach
self.__name = name
self.__focal_length = 24.0
self.__focus_distance = 400.0
self.__f_stop = 0.0
self.__horizontal_aperture = 20.955
self.__horizontal_aperture_offset = 0.0
self.__vertical_aperture_offset = 0.0
self.__clipping_range = (1.0, 10000000.0)
self.__resolution = (512, 512)
self.sample_count = 0
self.save_path = None
self._o = "[DepthCamera] "
def init_output_folder(self, path):
self.save_path = path
print(f"{self._o} Initializing output folders")
pathlib.Path(path +"/camera").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraDepth").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraLabels").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraPC").mkdir(parents=True, exist_ok=True)
def init_sensor(self, parent):
print(self.__clipping_range)
self.__cam = rep.create.camera(
position=self.__pos,
parent=parent,
name=self.__name,
rotation=self.__rot,
focal_length=self.__focal_length,
focus_distance=self.__focus_distance,
f_stop=self.__f_stop,
horizontal_aperture=self.__horizontal_aperture,
horizontal_aperture_offset=self.__horizontal_aperture_offset,
vertical_aperture_offset=self.__vertical_aperture_offset,
clipping_range=self.__clipping_range,
)
print("resolution ", self.__resolution)
self.__rp: og.Node = rep.create.render_product(
self.__cam, self.__resolution
)
print(f"{self._o} Attaching annotaors to camera.")
if self.__attach:
self.__init_annotators()
self.__attach_annotoators()
def read_from_json(self, data):
# We have been given data["LIDAR"]
# for instance_ids in data:
camera_settings = data
self.__name = camera_settings['name']
self.__focal_length = camera_settings['focal_length']
self.__focus_distance = camera_settings['focus_distance']
self.__f_stop = camera_settings['f_stop']
self.__horizontal_aperture = camera_settings['horizontal_aperture']
self.__horizontal_aperture_offset = camera_settings[
'horizontal_aperture_offset'
]
self.__vertical_aperture_offset = camera_settings[
'vertical_aperture_offset'
]
self.__clipping_range = (camera_settings['clipping_range'][0],camera_settings["clipping_range"][1])
self.__resolution = camera_settings['resolution']
self.__pos = camera_settings["position"]
self.__rot = camera_settings["rotation"]
def construct_pc(self, rgb_image, depth_image):
pass
def __init_annotators(self):
self.rgb_annot = rep.AnnotatorRegistry.get_annotator('rgb')
self.depth_annot = rep.AnnotatorRegistry.get_annotator(
'distance_to_camera'
)
# self.pc_annot = rep.AnnotatorRegistry.get_annotator("pointcloud")
self.sem_annot = rep.AnnotatorRegistry.get_annotator(
'semantic_segmentation'
)
def __attach_annotoators(self):
self.depth_annot.attach(self.__rp)
self.rgb_annot.attach(self.__rp)
self.sem_annot.attach(self.__rp)
# self.pc_annot.attach(self.__rp)
def __detatch_annototators(self):
self.depth_annot.detach(self.__rp)
self.rgb_annot.detach(self.__rp)
self.sem_annot.detach(self.__rp)
# self.pc_annot.dettach(self.__rp)
def sample_sensor(self):
# return
# await rep.orchestrator.step_async()
rgb_data = self.rgb_annot.get_data()
np.save(f"{self.save_path}camera/{self.sample_count}.npy", rgb_data)
# print(rgb_data)
im = Image.fromarray(rgb_data,"RGBA")
path = f"{self.save_path}camera/{self.sample_count}_img.png"
im.save(path)
depth_data = self.depth_annot.get_data()
np.save(f"{self.save_path}cameraDepth/{self.sample_count}.npy",depth_data)
# np.save('/home/jon/Documents/temp/depth.npy', depth_data)
sem_data = self.sem_annot.get_data()
np.save(f"{self.save_path}cameraLabels/{self.sample_count}.npy",sem_data)
# pc_data = self.pc_annot.get_data()
# np.save(f"{self.save_path}cameraPC/{self.sample_count}.npy",pc_data)
self.sample_count += 1
# np.save('/home/jon/Documents/temp/sem.npy', sem_data)
return
def sample_sensor_return(self):
# return
# await rep.orchestrator.step_async()
rgb_data = self.rgb_annot.get_data()
return rgb_data
| 5,705 | Python | 35.113924 | 107 | 0.605434 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/IMU.py | import pathlib
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import numpy as np
import omni.replicator.core as rep
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
import omni
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from pxr import Usd, Gf, UsdGeom
from omni.isaac.sensor import _sensor
class IMUSensor:
def __init__(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
orientation=(1, 1, 1, 1),
parent='/World',
name='/DepthCamera',
) -> None:
self.__pos = position
self.__ori = orientation
self.__rot = rotation
self.__name = name
# self.__imu_prim
self._is = _sensor.acquire_imu_sensor_interface()
self.__path = ''
self.save_path = ""
self.sample_count = 0
# self.__attach_annotoators()
self._o = "[IMUSensor] "
def init_output_folder(self, path):
print(f"{self._o} Initializing output folders")
self.save_path =path +"/posesIMU"
pathlib.Path(self.save_path).mkdir(parents=True, exist_ok=True)
def init_sensor(self, parent):
x,y,z = self.__pos
qx,qw,qy,qz = self.__rot
self.__path = parent + "/" + self.__name
result, self.__imu_prim = omni.kit.commands.execute(
'IsaacSensorCreateImuSensor',
path='/' + self.__name,
parent=parent,
sensor_period=-1.0,
translation=Gf.Vec3d(x,y,z),
orientation=Gf.Quatd(qx,qw,qy,qz),
visualize=True,
)
def read_from_json(self, data):
# We have been given data["LIDAR"]
self.__name = data['name']
self.__pos = data['position']
self.__rot =data['rotation']
def sample_sensor(self):
# print(self.__path)
# return
# await rep.orchestrator.step_async()
reading = self._is.get_sensor_readings(self.__path)
np.save(f"{self.save_path}/{self.sample_count}.npy",reading)
self.sample_count += 1
| 2,316 | Python | 26.915662 | 71 | 0.585924 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/LIDAR.py | import pathlib
from omni.syntheticdata.scripts.sensors import enable_sensors
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from omni.isaac.range_sensor import _range_sensor
import omni
import omni.kit.commands
import omni.timeline
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import omni.kit.commands
import numpy as np
import omni.replicator.core as rep
import numpy as np
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from pxr import Sdf
class Lidar:
def __init__(
self,
path='/Lidar1',
parent='/World',
min_range=0.4,
max_range=100.0,
draw_points=False,
draw_lines=False,
horizontal_fov=360.0,
vertical_fov=60.0,
horizontal_resolution=0.4,
vertical_resolution=0.4,
rotation_rate=0,
high_lod=True,
yaw_offset=0.0,
enable_semantics=False,
origin_pos=(2.0, 0.0, 4.0),
):
self.__path = '/' + path
self.__min_range = min_range
self.__max_range = max_range
self.__draw_points = draw_points
self.__draw_lines = draw_lines
self.__horizontal_fov = horizontal_fov
self.__vertical_fov = vertical_fov
self.__horizontal_resolution = horizontal_resolution
self.__vertical_resolution = vertical_resolution
self.__rotation_rate = rotation_rate
self.__high_lod = high_lod
self.__yaw_offset = yaw_offset
self.__enable_semantics = enable_semantics
self.__origin_pos = origin_pos
self.__rotation = [0.0,0.0,0.0]
self.sample_count = 0
self.save_path = None
self._o = "[LiDAR] "
def init_output_folder(self, path):
print(f"{self._o} Initializing output folders")
self.save_path = path
pathlib.Path(path +"/velodyne").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/velodyneLabels").mkdir(parents=True, exist_ok=True)
def read_from_json(self, data):
# We have been given data["LIDAR"]
# for instance_ids in data:
lidar_settings = data
print(lidar_settings["name"])
self.__path = '/' + lidar_settings['name']
self.__min_range = lidar_settings['min_range']
self.__max_range = lidar_settings['max_range']
self.__draw_points = lidar_settings['draw_points']
self.__draw_lines = lidar_settings['draw_lines']
self.__horizontal_fov = lidar_settings['horizontal_fov']
self.__vertical_fov = lidar_settings['vertical_fov']
self.__horizontal_resolution = lidar_settings[
'horizontal_resolution'
]
self.__vertical_resolution = lidar_settings['vertical_resolution']
self.__rotation_rate = lidar_settings['rotation_rate']
self.__high_lod = lidar_settings['high_lod']
self.__yaw_offset = lidar_settings['yaw_offset']
self.__enable_semantics = lidar_settings['enable_semantics']
self.__origin_pos = lidar_settings['origin_pos']
self.__rotation = lidar_settings['rotation']
def init_sensor(self, parent):
print(f'init the lidar {parent}')
# self.__lidarInterface = _range_sensor.acquire_lidar_sensor_interface()
_, self.__lidar_prim = omni.kit.commands.execute(
'RangeSensorCreateLidar',
path=self.__path,
parent=parent,
min_range=self.__min_range,
max_range=self.__max_range,
draw_points=self.__draw_points,
draw_lines=self.__draw_lines,
horizontal_fov=self.__horizontal_fov,
vertical_fov=self.__vertical_fov,
horizontal_resolution=self.__horizontal_resolution,
vertical_resolution=self.__vertical_resolution,
rotation_rate=self.__rotation_rate,
high_lod=self.__high_lod,
yaw_offset=self.__yaw_offset,
enable_semantics=self.__enable_semantics,
)
UsdGeom.XformCommonAPI(self.__lidar_prim).SetTranslate(
self.__origin_pos
)
self.__lidar_path = parent + self.__path
print(f'lidar path should be {self.__lidar_path}')
self.__lidarInterface = _range_sensor.acquire_lidar_sensor_interface()
# def sample_sensor(self):
# self.get_pc_and_semantic()
def sample_sensor(self):
# return
self.get_pc_and_semantic()
self.sample_count += 1
def get_pc_and_semantic(self, save_path='/home/jon/Documents/temp/a'):
pointcloud = self.__lidarInterface.get_point_cloud_data(
self.__lidar_path
)
semantics = self.__lidarInterface.get_semantic_data(self.__lidar_path)
# print(semantics)
np.save(f"{self.save_path}velodyne/{self.sample_count}.npy",pointcloud)
np.save(f"{self.save_path}velodyneLabels/{self.sample_count}.npy",semantics, allow_pickle=True)
return pointcloud, semantics
def __get_position(self):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(self.__lidar_prim).ComputeLocalToWorldTransform(
Usd.TimeCode.Default()
)
)
return transform.GetTranslation()
def __clear_max_lidar_points(self, pc, sem, lidar_pos, max_dist):
new_points = []
new_sems = []
for seq_id in range(len(pc)):
for point_id in range(len(pc[seq_id])):
point = pc[seq_id][point_id]
dist = np.linalg.norm(point - lidar_pos)
if dist < max_dist - 10:
new_points.append(pc[seq_id][point_id])
new_sems.append(sem[seq_id][point_id])
return np.array(new_points), np.array(new_sems)
| 6,074 | Python | 35.377245 | 103 | 0.609318 |
RPL-CS-UCL/IsaacSyntheticPerception/config/extension.toml | [core]
reloadable = true
display_name = "Synthetic Perception"
[package]
title="Synthetic Perception"
description="Synthetic Perception desc"
category=""
authors=['Jon']
keywords=['custom']
[dependencies]
"omni.isaac.dynamic_control" = {}
"omni.isaac.range_sensor" = {}
"omni.syntheticdata" = {}
[[python.module]]
name = "com.SyntheticPerception.app"
[[native.plugin]]
recursive = false
| 393 | TOML | 15.416666 | 39 | 0.722646 |
RPL-CS-UCL/IsaacSyntheticPerception/docs/source/index.rst | .. SyntheticPerception documentation master file, created by
sphinx-quickstart on Tue Mar 14 13:24:15 2023.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to SyntheticPerception's documentation!
===============================================
.. toctree::
:maxdepth: 2
:caption: Contents:
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 473 | reStructuredText | 21.571428 | 76 | 0.625793 |
RPL-CS-UCL/IsaacSyntheticPerception/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'SyntheticPerception'
copyright = '2023, Jonathan Edward Embley-Riches'
author = 'Jonathan Edward Embley-Riches'
release = '0.1'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = []
templates_path = ['_templates']
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| 978 | Python | 32.75862 | 87 | 0.635992 |
profK/Worldwizards-Export-Tools/exts/worldwizards.export.tools/worldwizards/export/tools/extension.py | from asyncio import Future, Task
import traceback
import carb
import omni.ext
import omni.ui as ui
from .ww_omniverse_utils import *
import os
import shutil
from omni import usd
from pxr import Usd,UsdShade
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[worldwizards.export.tools] some_public_function was called with x: ", x)
return x ** x
def get_kind(prim:Usd.Prim):
kindAPI = Usd.ModelAPI(prim)
return kindAPI.GetKind()
def recurse_list_components(prim:Usd.Prim, components:list):
if (get_kind(prim)=="component"):
print("Found component "+str(prim.GetPath()))
components.append(prim.GetPath())
else:
for child in prim.GetChildren():
recurse_list_components(child,components)
def recurse_list_material_paths(prim:Usd.Prim, materials:list):
print("recurse_list_material_paths "+str(prim.GetPath())+
" type "+prim.GetTypeName())
if (prim.GetTypeName()=="Mesh"):
materialAPI:UsdShade.MaterialBindingAPI = \
UsdShade.MaterialBindingAPI(prim)
material: UsdShade.Material = materialAPI.ComputeBoundMaterial()[0]
if not material is None:
#print("material "+str(material.GetPath()))
materials.append(material)
for child in prim.GetChildren():
recurse_list_material_paths(child,materials)
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class WorldwizardsExportToolsExtension(ExtensionFramework):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
# add to menu
self._menu_path = f"Tileset/Export All Components"
self._window = None
self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, self._export_components, True)
return super().on_startup(ext_id)
def on_stage_opened(self, event: carb.events.IEvent):
return super().on_stage_opened(event)
def _export_components(self, event, *args):
print("INFO:Export components called")
task:Task = \
asyncio.create_task(self._export_components_async(event, *args))
async def _export_components_async(self, event, *args):
try:
filepath:str = get_current_stage().GetRootLayer().realPath
if filepath is None:
print("Could not find root layer path "+filepath)
return;
materials_path:str = os.path.join(os.path.dirname(filepath),"Materials")
if not os.path.exists(materials_path):
print("Could not find materials file "+materials_path)
return;
#copy materials to output root
new_root = await get_directory_async("/")
if new_root is None:
print("User canceled output ")
return;
new_materials_path = os.path.join(new_root,"Materials")
if os.path.exists(new_materials_path):
shutil.rmtree(new_materials_path)
shutil.copytree(materials_path,new_materials_path)
root:Usd.Prim = get_current_stage().GetPseudoRoot()
component_paths:list = []
recurse_list_components(root,component_paths)
print("INFO: Components in tree:"+str(component_paths))
for path in component_paths:
component:Usd.Prim = get_current_stage().GetPrimAtPath(path)
self.export_component(component,new_root)
print("INFO: Exported "+str(len(component_paths))+" components to "+new_root)
except Exception:
print(traceback.format_exc())
return
def export_component(self,prim:Usd.Prim, outDir:str):
print("INFO: Exporting component "+str(prim.GetPath()))
if not (get_kind(prim)=="component"):
print("Not a component "+str(prim.GetPath()))
return
#localize materials
material_list = []
recurse_list_material_paths(prim,material_list)
print(str(prim.GetPath())+" has materials "+str(material_list))
for material_prim in material_list:
self._localize_material(prim,material_prim)
#create directory
componentPath:str = prim.GetPath()
componentDir:str = os.path.join(outDir,str(componentPath))
print("component dir "+componentDir)
if not os.path.exists(componentDir):
os.makedirs(componentDir)
'''#export prim
self.export_prim(componentPath)
#export materials
self.export_materials(componentPath,componentDir)
#export textures
self.export_textures(componentPath,componentDir)
#export meshes
self.export_meshes(componentPath,componentDir)
'''
def _localize_material(self,prim:Usd.Prim, material_prim:UsdShade.Material):
material_path:str = str(material_prim.GetPath())
prim_path:str = str(prim.GetPath())
print("copying material from"+prim_path+" to "+material_path)
material_name:str = material_path.split("/")[-1]
new_material_path: str = prim_path +"/Looks/"+material_name
if not material_path == new_material_path:
stage = get_current_stage()
usd.duplicate_prim(stage,material_path,
new_material_path)
new_material_prim:UsdShade.Material = \
UsdShade.Material(stage.GetPrimAtPath(new_material_path))
materialApi:UsdShade.MaterialBindingAPI = \
UsdShade.MaterialBindingAPI(prim)
materialApi.Bind(new_material_prim)
def export_prim(self, path):
prim:Usd.Prim = get_current_stage().GetPrimAtPath(path)
| 6,211 | Python | 41.258503 | 119 | 0.631782 |
profK/Worldwizards-Export-Tools/exts/worldwizards.export.tools/worldwizards/export/tools/ww_omniverse_utils.py | from pxr import Usd, Sdf, UsdGeom, Tf
from omni import usd
import os
import omni
import carb
from omni.kit.window.file_exporter import get_file_exporter
import asyncio
def get_ext_root_path(extname:str):
#print("Get root of ext "+extname)
manager = omni.kit.app.get_app().get_extension_manager()
ext_id = manager.get_extension_id_by_module(extname)
path = manager.get_extension_path(ext_id)
#print("path is "+path)
return path
def get_current_stage() -> Usd.Stage:
return usd.get_context().get_stage()
def add_layer_reference(ref_path:str, file_path:str, visible:bool = True) -> Usd.PrimAllPrimsPredicate:
stage:Usd.Stage
stage = get_current_stage()
# You can use standard python list.insert to add the subLayer to any position in the list
refPrim:Usd.Prim = stage.DefinePrim(ref_path)
references: Usd.References = refPrim.GetReferences()
references.AddReference(
assetPath=file_path
)
#print("visible= "+str(visible))
set_prim_visibility(refPrim,visible)
return refPrim
def set_prim_visibility(prim:Usd.Prim,visible:bool = True):
imageable = UsdGeom.Imageable(prim)
#print("Setting visibility of "+prim.GetName()+" to "+str(visible))
if not visible:
imageable.MakeInvisible()
else:
imageable.MakeVisible()
async def get_directory_async(root:str) -> str :
file_exporter = get_file_exporter()
dir_name=None
def cb(filename, dirname, extension, selections):
nonlocal dir_name
dir_name= dirname
file_exporter.show_window(
title="Save components to ...",
export_button_label="Choose",
filename_url="root",
# The callback function called after the user has selected an export location.
export_handler= cb
)
while dir_name is None:
await asyncio.sleep(0.1)
print("selected dir "+dir_name)
return dir_name
class ExtensionFramework(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[extension.framework] extension framework startup")
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self._stage = get_current_stage() # none first time
self._events = self._usd_context.get_stage_event_stream()
self._ext_Id = ext_id
self._stage_event_sub = \
omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="WWStageEventSub")
#register selection listener
def on_shutdown(self):
print("[extension.framework] extension framework shutdown")
def _on_stage_event(self, event:carb.events.IEvent):
#No switch statenment in p3.7 :frown:
self._stage = get_current_stage()
if event.type == int(omni.usd.StageEventType.OPENED) :
Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_notice_changed, self._stage)
self.on_stage_opened(event)
elif event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self._on_selection_changed()
def on_stage_opened(self, event:carb.events.IEvent):
print("Stage opened")
def _on_selection_changed(self):
print("Selection Changed")
selections = self._selection.get_selected_prim_paths()
return self.on_selection_changed(selections)
def on_selection_changed(self,currently_selected: list):
print("current selections: "+str(currently_selected))
def _on_notice_changed(self, notice, stage):
print("Notice changed") | 3,793 | Python | 34.129629 | 119 | 0.663854 |
terryaic/omniverse_text3d/README.md | # omniverse_text3d is a extension of omniverse to generate 3d text with blender.
How to use?
Just copy the directoy to the extension folder of your omniverse app, for example, $CreateAppPath$\kit\exts. then, at your omniverse app, Windows->extensions, search for "text3d", and you will find it, enable it.
requirements:
omniverse
blender
NOTES:
before you generate text, please change the blender installed path first at the extension window.
if you want to use your own fonts, you can simple copy your font files to this location: $omniverse_text3d\cn\appincloud\text3d\scripts\fonts, and then re-enable the extension
| 623 | Markdown | 46.999996 | 212 | 0.791332 |
terryaic/omniverse_text3d/cn/appincloud/text3d/scripts/extension.py | import os
import asyncio
import carb
import carb.settings
from omni.kit.widget.settings import create_setting_widget, SettingType
import omni.kit.app
import omni.ext
import omni.ui
from pxr import UsdGeom, UsdShade, Vt, Gf, Sdf, Usd
WINDOW_NAME = "Make 3D Text"
EXTENSION_NAME = "Make 3D Text"
PY_PATH = os.path.dirname(os.path.realpath(__file__))
BLENDER_PATH = "cn.appincloud.text3d.blender_path"
class Extension(omni.ext.IExt):
def __init__(self):
self.num = 0
self.enabled = True
self.filepath = "tmptext.usd"
self.extrude = 1.5
self.fontsize = 20
self.bevelDepth = 0
self.text = "hello"
self.singleMesh = True
self._settings = carb.settings.get_settings()
self._settings.set_default_string(BLENDER_PATH, "")
self.load_fonts()
def load_fonts(self):
#self.fontfamily = "SourceHanSansCN.otf"
#self.fonts = ["SourceHanSansCN.otf", "SourceHanSerifCN.otf"]
self.fonts = []
fontpath = os.path.join(PY_PATH, "fonts")
for root, dir, files in os.walk(fontpath):
for file in files:
self.fonts.append(file)
self.fontfamily = self.fonts[0]
def on_startup(self, ext_id):
self._window = omni.ui.Window(EXTENSION_NAME, width=600, height=800, menu_path=f"{EXTENSION_NAME}")
self._window.deferred_dock_in("Property")
self._window.frame.set_build_fn(self._ui_rebuild)
self._ui_rebuild()
def on_shutdown(self):
pass
async def generate_text(self):
blender_path = self.get_blender_path()
import subprocess
try:
cmd = "%s -b -P %s/make3d.py %s %s %d %f %f %s %s" % (blender_path, PY_PATH, self.text, PY_PATH + "/fonts/" + self.fontfamily, self.fontsize, self.extrude, self.bevelDepth, self.singleMesh, self.filepath)
carb.log_info(f"cmd:{cmd}")
#p = subprocess.Popen(cmd, shell=False)
args = [blender_path, "-b", "-P", os.path.join(PY_PATH, "make3d.py"), self.text, \
os.path.join(PY_PATH, "fonts", self.fontfamily), str(self.fontsize), str(self.extrude), str(self.bevelDepth), str(self.singleMesh), self.filepath]
p = subprocess.Popen(args, shell=False)
p.wait()
except Exception as e:
print(e)
stage1 = omni.usd.get_context().get_stage()
selected_paths = omni.usd.get_context().get_selection().get_selected_prim_paths()
defaultPrimPath = str(stage1.GetDefaultPrim().GetPath())
if len(selected_paths) > 0:
path = selected_paths[0]
else:
path = defaultPrimPath
stage2 = Usd.Stage.Open(self.filepath)
selecteds = stage2.Traverse()
carb.log_info(f"{selecteds}")
for obj in selecteds:
if obj.GetTypeName() == 'Xform':
pass
elif obj.GetTypeName() == "Mesh":
newObj = stage1.DefinePrim(f"{path}/Text_{self.num}", "Mesh")
self.copy_mesh(obj, newObj)
self.num += 1
def copy_mesh(self, obj, newObj):
attributes = obj.GetAttributes()
for attribute in attributes:
attributeValue = attribute.Get()
if attributeValue is not None:
newAttribute = newObj.CreateAttribute(attribute.GetName(),attribute.GetTypeName(), False)
newAttribute.Set(attributeValue)
mesh = UsdGeom.Mesh(obj)
newMesh = UsdGeom.Mesh(newObj)
newMesh.SetNormalsInterpolation(mesh.GetNormalsInterpolation())
def fontsize_changed(self, text_model):
self.fontsize = text_model.get_value_as_int()
carb.log_info(f"fontsize changed:{self.fontsize}")
def extrude_changed(self, text_model):
self.extrude = text_model.get_value_as_float()
carb.log_info(f"extrude changed:{self.extrude}")
def beveldepth_changed(self, text_model):
self.bevelDepth = text_model.get_value_as_float()
carb.log_info(f"extrude changed:{self.bevelDepth}")
def text_changed(self, text_model):
self.text = text_model.get_value_as_string()
carb.log_info(f"text changed:{self.text}")
def combo_changed(self, combo_model, item):
all_options = [
combo_model.get_item_value_model(child).as_string
for child in combo_model.get_item_children()
]
current_index = combo_model.get_item_value_model().as_int
self.fontfamily = all_options[current_index]
carb.log_info(f"font changed to: {self.fontfamily}")
def singleMesh_changed(self, model):
self.singleMesh = model.get_value_as_bool()
carb.log_info(f"singleMesh changed:{self.singleMesh}")
def _ui_rebuild(self):
self._scroll_frame = omni.ui.ScrollingFrame()
with self._window.frame:
with self._scroll_frame:
with omni.ui.VStack(spacing=2):
# intro
with omni.ui.CollapsableFrame(title="Description", height=10):
with omni.ui.VStack(style={"margin": 5}):
omni.ui.Label(
"This extension will generate 3d text with blender, please change the following path to your blender installed path",
word_wrap=True,
)
with omni.ui.HStack(height=20):
omni.ui.Label("blender installed path", word_wrap=True, width=omni.ui.Percent(35))
create_setting_widget(BLENDER_PATH, SettingType.STRING, width=omni.ui.Percent(55))
blender_button = omni.ui.Button("...", height=5, style={"padding": 12, "font_size": 20})
blender_button.set_clicked_fn(self._on_file_select_click)
with omni.ui.HStack(height=20):
omni.ui.Label("text", word_wrap=True, width=omni.ui.Percent(35))
text = omni.ui.StringField(height=10, style={"padding": 5, "font_size": 20}).model
text.add_value_changed_fn(self.text_changed)
text.set_value(self.text)
with omni.ui.HStack(height=20):
omni.ui.Label("font", word_wrap=True, width=omni.ui.Percent(35))
fontFamily = omni.ui.ComboBox(0, *self.fonts, height=10, name="font family").model
fontFamily.add_item_changed_fn(self.combo_changed)
with omni.ui.HStack(height=20):
omni.ui.Label("font-size", word_wrap=True, width=omni.ui.Percent(35))
fontsize = omni.ui.IntField(height=10, style={"padding": 5, "font_size": 20}).model
fontsize.add_value_changed_fn(self.fontsize_changed)
fontsize.set_value(self.fontsize)
with omni.ui.HStack(height=20):
omni.ui.Label("extrude", word_wrap=True, width=omni.ui.Percent(35))
extrude = omni.ui.FloatField(height=10, style={"padding": 5, "font_size": 20}).model
extrude.add_value_changed_fn(self.extrude_changed)
extrude.set_value(self.extrude)
with omni.ui.HStack(height=20):
omni.ui.Label("bevel depth", word_wrap=True, width=omni.ui.Percent(35))
bevel = omni.ui.FloatField(height=10, style={"padding": 5, "font_size": 20}).model
bevel.add_value_changed_fn(self.beveldepth_changed)
bevel.set_value(self.bevelDepth)
with omni.ui.HStack(height=20):
omni.ui.Label("as a single mesh", word_wrap=True, width=omni.ui.Percent(35))
singleMesh = omni.ui.CheckBox(height=10, style={"padding": 5, "font_size": 20}).model
singleMesh.add_value_changed_fn(self.singleMesh_changed)
singleMesh.set_value(self.singleMesh)
with omni.ui.HStack(height=20):
button = omni.ui.Button("Generate 3D Text", height=5, style={"padding": 12, "font_size": 20})
button.set_clicked_fn(lambda: asyncio.ensure_future(self.generate_text()))
def get_blender_path(self):
s = self._settings.get(BLENDER_PATH)
return s
def _on_filepicker_cancel(self, *args):
self._filepicker.hide()
def _on_filter_item(self, item):
return True
async def _on_selection(self, filename, dirname):
path = os.path.join(dirname,filename)
if os.path.isfile(path):
pass
else:
path = os.path.join(path, "blender")
self._settings.set(BLENDER_PATH, path)
self._filepicker.hide()
self._window.frame.rebuild()
def _on_file_select_click(self):
self._filepicker = omni.kit.window.filepicker.FilePickerDialog(
f"{EXTENSION_NAME}/Select Blender installed path",
click_apply_handler=lambda f, d: asyncio.ensure_future(self._on_selection(f, d)),
click_cancel_handler= self._on_filepicker_cancel,
item_filter_options= ["*"],
item_filter_fn=self._on_filter_item,
) | 9,427 | Python | 46.857868 | 216 | 0.57452 |
terryaic/omniverse_text3d/cn/appincloud/text3d/scripts/make3d.py | #import argparse
import bpy
import math
from math import pi
import sys
def str2bool(str):
return True if str.lower() == 'true' else False
def parse_args():
"""parsing and configuration"""
desc = "3dtexts..."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-b', "--background", help="run at background", action="store_true")
parser.add_argument('-P', type=str, default='')
parser.add_argument('--text', type=str, default='hello', help='')
parser.add_argument('--fontFamily', type=str, default='FreeSerif.ttf', help='')
parser.add_argument('--extrude', type=float, default=0.2, help='')
parser.add_argument('--fontSize', type=int, default=3, help='font size')
parser.add_argument('--asSingleMesh', type=str2bool, default=True, help='as single mesh')
return parser.parse_args()
#config=parse_args()
print("runing make3d")
print(sys.argv)
def removeObjects( scn ):
for ob in scn.objects:
if (ob.type == 'FONT') or (ob.type == 'MESH'):
bpy.context.collection.objects.unlink( ob )
scn = bpy.context.scene
removeObjects( scn )
#fnt = bpy.data.fonts.load('/home/terry/auto/fontfiles/GenJyuuGothic-Bold.ttf')
DEFAULT_FONT = "/usr/share/fonts/truetype/freefont/FreeSerif.ttf"
#fnt = bpy.data.fonts.load(DEFAULT_FONT)
def text3d(text, fntFamily, fntSize, extrude, bevelDepth, asSingleMesh=True):
fnt = bpy.data.fonts.load(fntFamily)
if asSingleMesh:
makeobj(text, fnt, 'Text1', 0, fntSize, extrude, bevelDepth)
else:
i = 0
for t in text:
name = "Text%d" % i
makeobj(t, fnt, name, i, fntSize, extrude, bevelDepth)
i+=1
def makeobj(text, fnt, name = "Text1", offset = 0, size = 3, extrude = 0.2, bevelDepth = 0):
# Create and name TextCurve object
bpy.ops.object.text_add(
location=(offset,0,0),
rotation=(0,0,0))
ob = bpy.context.object
ob.name = name
# TextCurve attributes
ob.data.body = text
ob.data.font = fnt
ob.data.size = size
# Inherited Curve attributes
ob.data.extrude = extrude
ob.data.bevel_depth = bevelDepth
bpy.ops.object.convert(target='MESH', keep_original=False)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.smart_project()
bpy.ops.object.mode_set(mode='OBJECT')
n = 4
text = sys.argv[n]
fontFamily = sys.argv[n+1]
fontSize = int(sys.argv[n+2])
extrude = float(sys.argv[n+3])
bevelDepth = float(sys.argv[n+4])
asSingleMesh = str2bool(sys.argv[n+5])
filepath = sys.argv[n+6]
text3d(text, fontFamily, fontSize, extrude, bevelDepth, asSingleMesh)
#text3d(config.text, config.fontFamily, config.fontSize, config.extrude, config.asSingleMesh)
#bpy.ops.export_scene.fbx(filepath="text.fbx")
bpy.ops.wm.usd_export(filepath=filepath)
| 2,680 | Python | 31.301204 | 93 | 0.696269 |
terryaic/omniverse_text3d/config/extension.toml | [package]
version = "0.1.2"
title = "Make 3D Text"
[dependencies]
"omni.kit.mainwindow" = {}
[[python.module]]
name = "cn.appincloud.text3d"
| 143 | TOML | 13.399999 | 29 | 0.664336 |
dariengit/kit-app-template/NOTES.md | ## Build
```bash
cd ~/omniverse/kit-app-template
./build.sh
```
## Run App
```bash
cd ~/omniverse/kit-app-template
./_build/linux-x86_64/release/my_company.my_app.sh
```
- user.config.json path
```bash
ls ~/.local/share/ov/data/Kit/my_company.my_app/2023.0/user.config.json
```
## Run headless
```bash
cd ~/omniverse/kit-app-template/_build/linux-x86_64/release
./kit/kit ./apps/my_company.my_app.kit \
--enable omni.services.streaming.manager \
--enable omni.kit.livestream.native \
--no-window \
--allow-root
./kit/kit ./apps/my_company.my_app.kit \
--enable omni.services.streaming.manager \
--enable omni.kit.livestream.native \
--enable omni.kit.streamsdk.plugins \
--no-window \
--allow-root \
--/app/livestream/logLevel=debug
./kit/kit ./apps/omni.isaac.sim.headless.native.kit \
--no-window \
--allow-root \
--/app/livestream/logLevel=debug
```
## OK
```bash
cd ~/omniverse/kit-app-template/_build/linux-x86_64/release
./kit/kit ./apps/my_company.my_app.kit \
--enable omni.services.streaming.manager \
--enable omni.kit.livestream.native \
--no-window
```
## How to create extension template
```bash
cd ~/omniverse/kit-app-template
./repo.sh template new
```
| 1,236 | Markdown | 17.742424 | 71 | 0.669903 |
dariengit/kit-app-template/README.md | ## Build
```bash
cd ~/omniverse/kit-app-template
./build.sh
```
## Run as Desktop App
```bash
cd ~/omniverse/kit-app-template
./_build/linux-x86_64/release/my_company.my_app.sh
```
## Run as Headless App
```bash
cd ~/omniverse/kit-app-template/_build/linux-x86_64/release
./kit/kit ./apps/my_company.my_app.kit \
--enable omni.services.streaming.manager \
--enable omni.kit.livestream.native \
--no-window \
--allow-root \
--/app/livestream/logLevel=debug
```
| 482 | Markdown | 16.249999 | 59 | 0.670124 |
dariengit/kit-app-template/source/extensions/my_company.my_app.resources/my_company/my_app/resources/python_ext.py | import omni.ext
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print(f"[my_company.my_app.resources] some_public_function was called with {x}")
return x**x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class HelloPythonExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[my_company.my_app.resources] HelloPythonExtension startup")
def on_shutdown(self):
print("[my_company.my_app.resources] HelloPythonExtension shutdown")
| 964 | Python | 44.952379 | 119 | 0.741701 |
dariengit/kit-app-template/source/extensions/my_company.my_app.resources/my_company/my_app/resources/__init__.py | from .python_ext import *
| 26 | Python | 12.499994 | 25 | 0.730769 |
dariengit/kit-app-template/source/extensions/my_company.my_app.resources/my_company/my_app/resources/tests/__init__.py | from .test_hello import *
| 26 | Python | 12.499994 | 25 | 0.730769 |
dariengit/kit-app-template/source/extensions/my_company.my_app.resources/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
# [0.1.0]
### Added
- Initial release
| 133 | Markdown | 15.749998 | 80 | 0.676692 |
Innoactive/Innoactive-Portal-Omniverse-Extension/README.md | # Innoactive Portal Omniverse Extension
This extension for NVIDIA Omniverse allows you to create a sharing link to Innoactive Portal that will allow users to launch the USD file with Omniverse Enterprise in the cloud and stream it into their browser and/or Standalone VR headset.
## How it works:
1. Install the extension
2. Copy the sharing link and sent it to the user
3. User clicks the sharing link and can use USD file with Omniverse Enterprise on Innoactive Portal cloud.
## Benefits:
- Users can contribute and review USD files without need for a own workstation
- XR cloud streaming supported: stream not only to the browser but even to a Standalone VR headset
- Compliant with your IT: Both SaaS and Self-hosted options available

## Requirements:
- Innoactive Portal Account (get one at https://innoactive.io/)
- NVIDIA Omniverse Enterprise license
- USD file needs to be hosted on your Nucleus Server
- Users need to have access permissions to the USD file on the Nucleus Server
- Users need to have a Innoactive Portal user account and access permissions to the Omniverse runtime you want to use
## Installation:
1. In Omniverse, go to "Window / Extensions / Options / Settings"
2. Add this to the Extension Search Paths: git://github.com/Innoactive/Innoactive-Portal-Omniverse-Extension?branch=main&dir=exts
3. Search for "Innoactive" in "Third Party" and enable the Innoactive extension (innoactive.omniverse)
4. Enable "autoload" if desired
## Usage:
1. Load a USD file from Nucleus Server
2. Open Innoactive Extension
3. Click "From Stage" to load the current USD URL
2. Select OV runtime to use for the stream
3. Select streaming mode: browser, VR (CloudXR), local (no streaming)
4. Configure Base Url to match Innoactive Portal cloud domain
5. Click "Test" to start a cloud streaming session yourself
6. Click "Copy" to copy the sharing URL to the clipboard.
7. Send the sharing link to the user you want to view the USD file via cloud streaming
Hints:
- Ensure that the user has a Innoactive Portal account (Click "Invite user" button if needed)
- Ensure that the user has access permissions for the selected Omniverse runtime
Please contact [Innoactive Support](https://www.innoactive.io/support) for any questions
| 2,406 | Markdown | 51.326086 | 239 | 0.788861 |
Innoactive/Innoactive-Portal-Omniverse-Extension/exts/innoactive.omniverse/innoactive/omniverse/extension.py | import omni.ext
import omni.ui as ui
from omni.ui import color as cl
import omni.kit
import carb
import subprocess
import os
import webbrowser
import threading
from pxr import Usd
import omni.usd
from urllib.parse import quote
LABEL_WIDTH = 120
HEIGHT = 24
VSPACING = 8
HSPACING = 5
MODES = ("browser", "VR", "local")
MODES_TECHNICAL = ("cloud/browser", "cloud/standalone", "local/windows")
APPS = ("Omniverse USD Explorer 2023.2.1", "Omniverse USD Composer 2023.2.3", "Omniverse USD Composer 2023.2.3 with Cesium Extension")
APP_IDS = (4006, 3757, 4339)
DEFAULT_BASE_URL = "https://[yourcompany].innoactive.io"
DEFAULT_APP_ID = 3757
DEFAULT_MODE = "cloud/browser"
settings = carb.settings.get_settings()
# Public functions
def get_sharing_link():
print("[innoactive.omniverse] get_sharing_link ")
return self._sharing_url_model.as_string
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class DeInnoactiveExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def copy_to_clipboard(self, text):
try:
if os.name == 'nt': # For Windows
subprocess.run('clip', universal_newlines=True, input=text, shell=True)
elif os.name == 'posix':
if os.system("which pbcopy") == 0: # macOS has pbcopy
subprocess.run('pbcopy', universal_newlines=True, input=text)
elif os.system("which xclip") == 0 or os.system("which xsel") == 0: # Linux has xclip or xsel
subprocess.run('xclip -selection clipboard', universal_newlines=True, input=text, shell=True)
else:
print("Clipboard utilities pbcopy, xclip, or xsel not found.")
return False
else:
print("Unsupported OS.")
return False
print("Text copied successfully.")
return True
except Exception as e:
print(f"Failed to copy text: {e}")
return False
def set_notification(self, value, label):
self._notification_label.text = ""
self._notification_label.visible = False
self._warning_label.text = ""
self._warning_label.visible = False
label.text = value
label.visible = True
def delete_notification():
label.text = ""
label.visible = False
timer = threading.Timer(5, delete_notification)
timer.start()
def is_sharable_usd(self, file_path):
return file_path.startswith("omniverse://") or file_path.startswith("http://") or file_path.startswith("https://") # and not file_path.startswith("omniverse://localhost")
def update_sharing_link(self):
args = quote("--usd "+self._usd_url_model.as_string, safe='')
self._sharing_url_model.as_string = self._base_url_model.as_string + "/apps/" + self._app_id_model.as_string + "/launch/" + self._mode_str_model.as_string + "?args=" + args
self._sharing_url_model_label.text = self._sharing_url_model.as_string
def on_value_changed(self, item_model):
self.update_sharing_link()
self.save_settings()
def on_usd_value_changed(self, item_model):
self.update_sharing_link()
self.save_settings()
def on_mode_changed(self, item_model, item):
value_model = item_model.get_item_value_model(item)
current_index = value_model.as_int
self._mode_str_model.as_string = MODES_TECHNICAL[current_index]
self.update_sharing_link()
self.save_settings()
def on_app_changed(self, item_model, item):
value_model = item_model.get_item_value_model(item)
current_index = value_model.as_int
self._app_id_model.as_int = APP_IDS[current_index]
self.update_sharing_link()
self.save_settings()
def save_settings(self):
settings.set("/persistent/exts/de/innoactive/baseUrl", self._base_url_model.as_string)
settings.set("/persistent/exts/de/innoactive/renderMode", self._mode_str_model.as_string)
settings.set("/persistent/exts/de/innoactive/appId", self._app_id_model.as_int)
def load_settings(self):
try:
self._base_url_model.as_string = settings.get("/persistent/exts/de/innoactive/baseUrl")
self._mode_str_model.as_string = settings.get("/persistent/exts/de/innoactive/renderMode")
self._app_id_model.as_int = settings.get("/persistent/exts/de/innoactive/appId")
except Exception as e:
self._base_url_model.as_string = DEFAULT_BASE_URL
self._mode_str_model.as_string = DEFAULT_MODE
self._app_id_model.as_int = DEFAULT_APP_ID
def clear_usd(self):
# Clear USD file from field
self._usd_url_model.as_string = ""
def set_stage_usd(self, at_autoload=False):
# Implement logic to fetch the currently opened USD file path
try:
stage = omni.usd.get_context().get_stage()
rootLayer = stage.GetRootLayer()
file_path = rootLayer.realPath if rootLayer else ""
if self.is_sharable_usd(file_path):
self._usd_url_model.as_string = file_path
else:
if not at_autoload:
self.set_notification("Please load a valid omniverse:// or http(s):// USD file URL to your stage.", self._warning_label)
self._usd_url_model.as_string = ""
except Exception as e:
if not at_autoload:
self.set_notification("Please load a valid omniverse:// or http(s):// USD file URL to your stage.", self._warning_label)
self._usd_url_model.as_string = ""
def validate_form(self):
if not self._usd_url_model.as_string:
self.set_notification("No USD file selected. Please select a valid omniverse:// or http(s):// USD file URL", self._warning_label)
elif not self.is_sharable_usd(self._usd_url_model.as_string):
self.set_notification("USD file is not shareable. Use omniverse:// or http(s):// format.", self._warning_label)
elif self._base_url_model.as_string == DEFAULT_BASE_URL:
self.set_notification("Configure Base URL to match your organization's Innoactive Portal domain name.", self._warning_label)
else:
return True
return False
def copy_url(self):
# Copy the generated link to clipboard
if self.validate_form():
self.copy_to_clipboard(self._sharing_url_model.as_string)
self.set_notification("Sharing link copied to clipboard.", self._notification_label)
def open_url(self):
if self.validate_form():
webbrowser.open_new_tab(self._sharing_url_model.as_string)
self.set_notification("Sharing link opened in browser.", self._notification_label)
def open_invite_url(self):
if self.validate_form():
invite_url = self._base_url_model.as_string + "/control-panel/v2/users"
webbrowser.open_new_tab(invite_url)
def on_shutdown(self):
print("[innoactive.omniverse] shutdown")
def on_startup(self, ext_id):
print("Innoactive startup")
manager = omni.kit.app.get_app().get_extension_manager()
ext_path = manager.get_extension_path_by_module("innoactive.omniverse")
self._window = ui.Window("Innoactive Portal", width=600, height=400)
with self._window.frame:
with ui.VStack(spacing=VSPACING, height=0):
with ui.HStack(spacing=HSPACING):
img = ui.Image(height=80, alignment=ui.Alignment.RIGHT)
img.source_url = ext_path + "/data/innoactive_logo.png"
with ui.HStack(spacing=HSPACING):
ui.Label("USD file", name="usd_url", width=LABEL_WIDTH, height=HEIGHT, tooltip="Ensure the USD file is hosted on Nucleus and the user with whom you want to share access has permissions to access that file on Nucleus Server.")
self._usd_url_model = ui.SimpleStringModel()
self._usd_url_model.as_string = ""
ui.StringField(model=self._usd_url_model, height=HEIGHT, word_wrap=True)
self._usd_url_model_changed = self._usd_url_model.subscribe_value_changed_fn(self.on_usd_value_changed)
ui.Button("From Stage", clicked_fn=self.set_stage_usd, width=90, height=HEIGHT, tooltip="Use the currently loaded USD file from Stage")
with ui.HStack(spacing=HSPACING):
ui.Label("Runtime", name="app", width=LABEL_WIDTH, height=HEIGHT, tooltip="Select the OV Kit runtime you want to use. You can upload your own runtimes, please contact Innoactive support.")
self._app_id_model = ui.SimpleStringModel()
try:
self._app_id_model.as_int = settings.get("/persistent/exts/de/innoactive/appId")
except Exception as e:
self._app_id_model.as_int = DEFAULT_APP_ID
self._app_model = ui.ComboBox(APP_IDS.index(self._app_id_model.as_int), *APPS).model
self._app_model_changed = self._app_model.subscribe_item_changed_fn(self.on_app_changed)
with ui.HStack(spacing=HSPACING):
ui.Label("Streaming Mode", name="mode", width=LABEL_WIDTH, height=HEIGHT, tooltip="Select weather the link shall start a browser stream, VR stream or a locally rendered session")
self._mode_str_model = ui.SimpleStringModel()
try:
self._mode_str_model.as_string = settings.get("/persistent/exts/de/innoactive/renderMode")
except Exception as e:
self._mode_str_model.as_string = DEFAULT_MODE
print("renderMode: " + self._mode_str_model.as_string)
self._mode_model = ui.ComboBox(MODES_TECHNICAL.index(self._mode_str_model.as_string), *MODES).model
self._mode_model_changed = self._mode_model.subscribe_item_changed_fn(self.on_mode_changed)
with ui.HStack(spacing=HSPACING):
ui.Label("Base Url", name="base_url", width=LABEL_WIDTH, height=HEIGHT, tooltip="Set this to your match your Innoactive Portal cloud domain URL")
self._base_url_model = ui.SimpleStringModel()
try:
self._base_url_model.as_string = settings.get("/persistent/exts/de/innoactive/baseUrl")
except Exception as e:
self._base_url_model.as_string = DEFAULT_BASE_URL
ui.StringField(model=self._base_url_model, height=HEIGHT, word_wrap=True)
self._base_url_model_changed = self._base_url_model.subscribe_value_changed_fn(self.on_value_changed)
ui.Line()
with ui.HStack(spacing=HSPACING):
ui.Label("Sharing URL", name="sharing_url", width=LABEL_WIDTH, height=HEIGHT, tooltip="Copy and share this link with a user. You need to invite the user to Innoactive Portal as well.")
self._sharing_url_model = ui.SimpleStringModel()
self._sharing_url_model_label = ui.Label("", word_wrap=True, alignment=ui.Alignment.TOP)
with ui.HStack(spacing=HSPACING):
ui.Spacer( width=LABEL_WIDTH)
self.button_copy = ui.Button("Copy", clicked_fn=self.copy_url, width=60, height=HEIGHT, tooltip="Copy the sharing link to the clipboard")
self.button_test = ui.Button("Test", clicked_fn=self.open_url, width=60, height=HEIGHT, tooltip="Test the sharink link on your PC")
self.button_invite = ui.Button("Invite user", clicked_fn=self.open_invite_url, width=90, height=HEIGHT, tooltip="Invite a user to Innoactive Portal")
with ui.HStack(spacing=HSPACING, style={"Notification": {"color": cl("#76b900")}, "Error": {"color": cl("#d48f09")}}):
ui.Spacer( width=LABEL_WIDTH)
with ui.VStack(spacing=0, height=0):
self._notification_label = ui.Label("", word_wrap=True, name="notification", height=HEIGHT, visible=False, style_type_name_override="Notification")
self._warning_label = ui.Label("", word_wrap=True, name="notification", height=HEIGHT, visible=False, style_type_name_override="Error")
self.load_settings()
self.update_sharing_link()
self.set_stage_usd(at_autoload=True)
| 13,222 | Python | 50.451362 | 245 | 0.608153 |
Innoactive/Innoactive-Portal-Omniverse-Extension/exts/innoactive.omniverse/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.1] - 2024-02-13
- Added support to select USD Composer with Cesium
## [1.0.0] - 2024-02-08
- Initial version of Innoactive Omniverse Extension
| 249 | Markdown | 19.833332 | 80 | 0.702811 |
jasonsaini/OmniverseCubeClickExtension/README.md | # 🌌 Omniverse Cube Click Extension

## 📖 Introduction
Welcome to `OmniverseCubeSpawner`! This repository contains a tutorial-based NVIDIA Omniverse extension that allows users to spawn a 3D cube in the scene with a simple button click.
Check out the tutorial I followed [here](https://www.youtube.com/watch?v=eGxV_PGNpOg&t=20s).
| 451 | Markdown | 55.499993 | 181 | 0.800443 |
jasonsaini/OmniverseCubeClickExtension/spawn_cube/exts/spawn.cube/spawn/cube/extension.py | import omni.ext
import omni.ui as ui
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[spawn.cube] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class SpawnCubeExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[spawn.cube] spawn cube startup")
self._count = 0
self._window = ui.Window("Spawn a cube", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("Cube Spawner")
def on_click():
omni.kit.commands.execute("CreatePrimWithDefaultXform", prim_type="Cube", attributes={'size:':100, 'extent': [(-50,-50,-50), (50,50,50)]})
print("Cube spawned!")
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Spawn Cube", clicked_fn=on_click)
#ui.Button("Reset", clicked_fn=on_reset)
def on_shutdown(self):
print("[spawn.cube] spawn cube shutdown")
| 1,661 | Python | 36.772726 | 158 | 0.608067 |
jasonsaini/OmniverseCubeClickExtension/spawn_cube/exts/spawn.cube/docs/README.md | # Python Extension Example [spawn.cube]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 169 | Markdown | 32.999993 | 126 | 0.781065 |
Mariuxtheone/kit-extension-sample-camerastudio/README.md | # Camera Studio - NVIDIA Omniverse Extension
<img src="https://github.com/Mariuxtheone/kit-extension-sample-camerastudio/blob/main/exts/omni.example.camerastudio/data/icon.png" width="128">
This extension allows to open a CSV file containing information about Camera Settings and generate in-scene Cameras accordingly.

Usage:
The extension generates cameras with the following settings:
-Shot Name
-Focal Length (in mm)
-Horizontal Aperture (in mm)
-Distance from the subject the camera should be placed at the scene (in meters)
1) Create your .csv file with the following header:
```
shot_name,focal_length,aperture,distance
```
e.g.
```
shot_name,focal_length,aperture,distance
establishing_shot,24,2.8,4
wide_shot,14,2.0,4
over_the_shoulder_shot,50,2.8,0.5
point_of_view_shot,85,2.8,0.5
low_angle_shot,24,1.8,0.5
high_angle_shot,100,2.8,1.5
```
2) Open the .csv file via the Extension.
3) The extension will generate the cameras in your scene with the desired shots configured.
# chatGPT Prompt (also works with GPT-3)
This is the prompt that I perfected to generate shots, you might have to run it a few times to get the exact desired results, but this seems to do the trick:
```
list a series of 10 camera shots for an interior video shoot, specifying the focal length of the camera in mm, the horizontal aperture (as number), and the distance the camera should be put at (in meters)
put those settings in a CSV file using this header: shot_name, focal_length, aperture, distance
horizontal aperture should be indicated as number (for example, 2.8) and distance should be indicated as number (for example, for 1 meter, put 1). shot_name has to be represented with underscore format (for example, extreme_close_up_shot)
remove mm and m from the CSV
```
# Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "omni.example.camerastudio" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
| 4,001 | Markdown | 36.401869 | 258 | 0.763809 |
Mariuxtheone/kit-extension-sample-camerastudio/exts/omni.example.camerastudio/omni/example/camerastudio/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands
from .csvreader import CSVReader
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[omni.example.camerastudio] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class CamerastudioExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.camerastudio] omni example camerastudio startup")
self._count = 0
self.csvreader = CSVReader()
self._window = ui.Window("Camera Studio", width=300, height=250)
with self._window.frame:
with ui.VStack():
label = ui.Label("Click the button to import a CSV file\nwith the details to generate multiple cameras.")
with ui.HStack():
ui.Button("Open File...", clicked_fn=self.csvreader.on_open_file)
def on_shutdown(self):
print("[omni.example.camerastudio] omni example camerastudio shutdown")
| 1,538 | Python | 39.499999 | 121 | 0.675553 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.