file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/frankahand_experiment_learning.py | import numpy as np
from PIL import Image
## Get object indexes
# import os
# OBJ_INDEX_LIST = []
# for i in os.listdir("/home/yizhou/Research/temp/"):
# if str(i).isdigit():
# OBJ_INDEX_LIST.append(i)
# print(sorted(OBJ_INDEX_LIST, key = lambda x: int(x)))
from exp.params import OBJ_INDEX_LIST
SUCESS_PERCENTAGE = 20
result_file_path = "/home/yizhou/Research/Data/frankahand_exp_learning.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn.pth"
SHOW_IMAGE = False
import getpass
user = getpass.getuser()
from omni.isaac.kit import SimulationApp
# "/home/yizhou/Research/OpenAnyDrawer/scene0.usd" #
usd_path = f"omniverse://localhost/Users/{user}/scene4.usd"
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from franka.gripper import GripperHandEnv
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv()
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
controller = GripperHandEnv("/World/Franka/panda_link8", "/World/AnchorXform")
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath("/World/Franka")
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(model_path = MODEL_PATH, model_name = "fasterrcnn_resnet50_fpn")
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[:1]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
world.scene.add(mobility_obj)
world.reset()
world.render()
scene_instr = SceneInstructor()
scene_instr.analysis()
# export data and load model
# scene_instr.output_path = "/home/yizhou/Research/temp0/"
# scene_instr.export_data()
# omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
world.render()
world.render()
world.render()
image_array =env.get_image(return_array=True)
scene_instr.model = model
scene_instr.predict_bounding_boxes(image_array[:,:,:3])
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# if no valid predicted boundbox
if not scene_instr.is_pred_valid:
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX}, invalid prediction\n")
world.scene.remove_object(mobility_obj_name)
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
continue
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
# handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
the_box = scene_instr.get_box_from_desc(v_desc, h_desc)
handle_direction = "horizontal" if (the_box[2] - the_box[0]) > (the_box[3] - the_box[1]) else "vertical"
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", graps_pos, grasp_rot )
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(200):
world.step(render=False)
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=False)
# close
pos = np.array([[0.0, 0.0]])
for _ in range(100):
pos -= 0.01
controller.robots.set_joint_position_targets(pos)
world.step(render=False)
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(pos)
pos += 0.015
world.step(render=False)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{v_desc}|{h_desc}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 7,458 | Python | 34.018779 | 175 | 0.633414 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/experiment_learning_common_cliport.py | import numpy as np
from PIL import Image
from exp.params import OBJ_INDEX_LIST, GRASP_PROFILES
import getpass
user = getpass.getuser()
ROBOT_NAME = "frankahand" #"skeletonhand" # "shadowhand" # "allegro"
grasp_profile = GRASP_PROFILES[ROBOT_NAME]
SUCESS_PERCENTAGE = 20
print("SUCESS_PERCENTAGE: ", SUCESS_PERCENTAGE)
result_file_path = f"/home/yizhou/Research/Data/{ROBOT_NAME}_exp_cliport824.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/custom_cliport824.pth"
clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json"
load_nucleus = True # nucleus loading
usd_path = "omniverse://localhost/Users/yizhou/scene4.usd" #grasp_profile["usd_path"]
SHOW_IMAGE = True
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# import
try:
import transformers
except:
omni.kit.pipapi.install("transformers")
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from hand_env import HandEnv
from hand_common import HandBase
from render.utils import prim_random_color, LOOKS_PATH
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv(load_nucleus=load_nucleus)
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
# controller = HandEnv("/World/allegro*/allegro_mount", "/World/AnchorXform")
controller = HandBase(grasp_profile["articulation_root"], "/World/AnchorXform")
controller.grasp_profile = grasp_profile["offset"]
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath(grasp_profile["robot_path"])
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(
model_path = MODEL_PATH,
model_name = "custom_cliport",
clip_text_feature_path = clip_text_feature_path
)
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[:2]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
# randomize color
# reset look in scene
mat_look_prim = world.scene.stage.GetPrimAtPath(LOOKS_PATH)
if mat_look_prim:
omni.kit.commands.execute("DeletePrims", paths=[LOOKS_PATH])
world.step(render = False)
scene_instr = SceneInstructor()
scene_instr.analysis()
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
prim_random_color(handle_path_str)
world.scene.add(mobility_obj)
world.reset()
world.render()
world.render()
image_array = env.get_image(return_array=True)
image =env.get_image()
if SHOW_IMAGE:
world.render()
env.get_image().show()
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
# handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
cabinet_type = scene_instr.valid_handle_list[handle_path_str]["cabinet_type"]
# add_update_semantics(prim, "handle")
text = f"{v_desc}_{h_desc}_{cabinet_type}"
text = text.replace("_"," ").replace("-"," ").replace(" ", " ").strip()
print("task text", text)
bbox_center, handle_direction = model.pred_box_pos_and_dir(image.convert('RGB'), text)
the_box = scene_instr.get_bbox_world_position([bbox_center[1], bbox_center[0], bbox_center[1], bbox_center[0]])
# Task
# print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", the_box, graps_pos, grasp_rot )
if SHOW_IMAGE:
world.render()
env.get_image().show()
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(500):
world.step(render=SHOW_IMAGE)
print("move to handle")
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=SHOW_IMAGE)
# close finger
print("close finger")
finger_pos = grasp_profile["finger_pos"].copy()
if ROBOT_NAME == "allegro":
for i in range(120):
controller.robots.set_joint_position_targets(finger_pos * i / 120) #
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for _ in range(100):
finger_pos -= 0.01
controller.robots.set_joint_position_targets(finger_pos)
pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
dof_pos = finger_pos
for i in range(80):
# thumb
step_gain = 0.01
dof_pos[6] += step_gain
dof_pos[11] += 2 * step_gain
# dof_pos[16] += 0.01
dof_pos[21] += - step_gain
dof_pos[7] += step_gain
dof_pos[8] += step_gain
dof_pos[9] += step_gain
# dof_pos[14] += 0.01
dof_pos[12] += step_gain
dof_pos[13] += step_gain
dof_pos[14] += step_gain
dof_pos[17] += step_gain
dof_pos[18] += step_gain
dof_pos[19] += step_gain
# pinky
dof_pos[15] += step_gain
dof_pos[20] += step_gain
dof_pos[22] += step_gain
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=True)
elif ROBOT_NAME == "skeletonhand":
# close finger
for i in range(120):
i = i / 4
dof_pos = np.array([
[ i * 0.03, i * 0.04,
i * 0.01, -i * 0.04,
i * 0.005, -i * 0.04,
-i * 0.02, -i * 0.04,
-i * 0.01, -i * 0.04,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
],
])
# pos = np.random.randn(2,25)
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=SHOW_IMAGE)
print("pull out")
# pull out
if ROBOT_NAME == "allegro":
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for i in range(300):
graps_pos[...,0] -= 0.001
finger_pos += np.sqrt(i) * 1e-4
# print(pos)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
finger_pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
dof_pos *= 0.997
# print(dof_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "skeletonhand":
# pull out
for i in range(200):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
dof_pos /= 1.5
# pull out furthur
for i in range(100):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{text}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 12,074 | Python | 34.101744 | 164 | 0.575617 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/experiment_learning_common.py | import numpy as np
from PIL import Image
from exp.params import OBJ_INDEX_LIST, GRASP_PROFILES
ROBOT_NAME = "frankahand" #"skeletonhand" # "shadowhand" # "allegro"
grasp_profile = GRASP_PROFILES[ROBOT_NAME]
SUCESS_PERCENTAGE = 20
print("SUCESS_PERCENTAGE: ", SUCESS_PERCENTAGE)
result_file_path = f"/home/yizhou/Research/Data/{ROBOT_NAME}_exp_learning823.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn823.pth"
load_nucleus = True # nucleus loading
usd_path = "omniverse://localhost/Users/yizhou/scene4.usd" #grasp_profile["usd_path"]
SHOW_IMAGE = True
import getpass
user = getpass.getuser()
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from hand_env import HandEnv
from hand_common import HandBase
from render.utils import prim_random_color, LOOKS_PATH
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv(load_nucleus = load_nucleus)
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
# controller = HandEnv("/World/allegro*/allegro_mount", "/World/AnchorXform")
controller = HandBase(grasp_profile["articulation_root"], "/World/AnchorXform")
controller.grasp_profile = grasp_profile["offset"]
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath(grasp_profile["robot_path"])
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(model_path = MODEL_PATH, model_name = "fasterrcnn_resnet50_fpn")
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[4:]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
# randomize color
# reset look in scene
mat_look_prim = world.scene.stage.GetPrimAtPath(LOOKS_PATH)
if mat_look_prim:
omni.kit.commands.execute("DeletePrims", paths=[LOOKS_PATH])
world.step(render = False)
scene_instr = SceneInstructor()
scene_instr.analysis()
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
prim_random_color(handle_path_str)
world.scene.add(mobility_obj)
world.reset()
world.render()
world.render()
image_array =env.get_image(return_array=True)
if SHOW_IMAGE:
world.render()
env.get_image().show()
scene_instr.model = model
scene_instr.predict_bounding_boxes(image_array[:,:,:3])
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# if no valid predicted boundbox
if not scene_instr.is_pred_valid:
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX}, invalid prediction\n")
world.scene.remove_object(mobility_obj_name)
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
continue
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
# handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
# print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
the_box = scene_instr.get_box_from_desc(v_desc, h_desc)
handle_direction = "horizontal" if (the_box[2] - the_box[0]) > (the_box[3] - the_box[1]) else "vertical"
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", graps_pos, grasp_rot )
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(500):
world.step(render=SHOW_IMAGE)
print("move to handle")
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=SHOW_IMAGE)
# close finger
print("close finger")
finger_pos = grasp_profile["finger_pos"]
if ROBOT_NAME == "allegro":
for i in range(120):
controller.robots.set_joint_position_targets(finger_pos * i / 120) #
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for _ in range(100):
finger_pos -= 0.01
controller.robots.set_joint_position_targets(finger_pos)
pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
dof_pos = finger_pos
for i in range(60):
# thumb
dof_pos[6] += 0.01
dof_pos[11] += 0.02
# dof_pos[16] += 0.01
dof_pos[21] += -0.01
dof_pos[7] += 0.01
dof_pos[8] += 0.01
dof_pos[9] += 0.01
# dof_pos[14] += 0.01
dof_pos[12] += 0.01
dof_pos[13] += 0.01
dof_pos[14] += 0.01
dof_pos[17] += 0.01
dof_pos[18] += 0.01
dof_pos[19] += 0.01
# pinky
dof_pos[15] += 0.01
dof_pos[20] += 0.01
dof_pos[22] += 0.01
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "skeletonhand":
# close finger
for i in range(120):
i = i / 4
dof_pos = np.array([
[ i * 0.03, i * 0.04,
i * 0.01, -i * 0.04,
i * 0.005, -i * 0.04,
-i * 0.02, -i * 0.04,
-i * 0.01, -i * 0.04,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
],
])
# pos = np.random.randn(2,25)
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=SHOW_IMAGE)
print("pull out")
# pull out
if ROBOT_NAME == "allegro":
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "frankahand":
for i in range(300):
graps_pos[...,0] -= 0.001
finger_pos += np.sqrt(i) * 1e-4
# print(pos)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
finger_pos = np.clip(finger_pos, 0, 4)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "shadowhand":
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
dof_pos *= 0.997
# print(dof_pos)
world.step(render=SHOW_IMAGE)
elif ROBOT_NAME == "skeletonhand":
# pull out
for i in range(200):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
dof_pos /= 1.5
# pull out furthur
for i in range(100):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{v_desc}|{h_desc}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 11,996 | Python | 34.81194 | 175 | 0.569273 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/allegro_experiment_learning copy.py | import numpy as np
from PIL import Image
## Get object indexes
# import os
# OBJ_INDEX_LIST = []
# for i in os.listdir("/home/yizhou/Research/temp/"):
# if str(i).isdigit():
# OBJ_INDEX_LIST.append(i)
# print(sorted(OBJ_INDEX_LIST, key = lambda x: int(x)))
from exp.params import OBJ_INDEX_LIST
SUCESS_PERCENTAGE = 20
result_file_path = "/home/yizhou/Research/Data/allegro_exp_learning823.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn823.pth"
SHOW_IMAGE = False
import getpass
user = getpass.getuser()
usd_path = f"omniverse://localhost/Users/yizhou/scene1.usd"
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from hand_env import HandEnv
from hand_common import HandBase
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv()
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
# controller = HandEnv("/World/allegro*/allegro_mount", "/World/AnchorXform")
controller = HandBase("/World/allegro*/allegro_mount", "/World/AnchorXform")
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath("/World/allegro")
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(model_path = MODEL_PATH, model_name = "fasterrcnn_resnet50_fpn")
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[:1]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
world.scene.add(mobility_obj)
world.reset()
world.render()
scene_instr = SceneInstructor()
scene_instr.analysis()
# export data and load model
# scene_instr.output_path = "/home/yizhou/Research/temp0/"
# scene_instr.export_data()
# omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
world.render()
image_array =env.get_image(return_array=True)
scene_instr.model = model
scene_instr.predict_bounding_boxes(image_array[:,:,:3])
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# if no valid predicted boundbox
if not scene_instr.is_pred_valid:
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX}, invalid prediction\n")
world.scene.remove_object(mobility_obj_name)
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
continue
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
# handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
the_box = scene_instr.get_box_from_desc(v_desc, h_desc)
handle_direction = "horizontal" if (the_box[2] - the_box[0]) > (the_box[3] - the_box[1]) else "vertical"
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", graps_pos, grasp_rot )
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(500):
world.step(render=SHOW_IMAGE)
print("move to handle")
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=SHOW_IMAGE)
print("close finger")
# close finger
finger_pos = np.array([
[
0, 0, 0, np.pi/2 + np.pi/18,
np.pi/5, np.pi/5, np.pi/5, 0,
np.pi/5, np.pi/5, np.pi/5, np.pi/6,
np.pi/5, np.pi/5, np.pi/5, np.pi/6,
],
])
for i in range(120):
controller.robots.set_joint_position_targets(finger_pos * i / 120) #
world.step(render=SHOW_IMAGE)
print("pull out")
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(finger_pos)
world.step(render=SHOW_IMAGE)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{v_desc}|{h_desc}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 7,820 | Python | 34.071749 | 175 | 0.6289 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/model.py | import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from exp.learning.custom_cliport import CustomCliport
def load_vision_model(
model_path = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn.pth",
model_name = "fasterrcnn_resnet50_fpn",
clip_text_feature_path = "/home/yizhou/Research/OpenAnyDrawer/learning/text2clip_feature.json"
):
# load a model; pre-trained on COCO
if model_name == "fasterrcnn_resnet50_fpn":
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
num_classes = 2 # 1 class (wheat) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.load_state_dict(torch.load(model_path))
model.eval()
elif model_name == "custom_cliport":
model = CustomCliport(clip_text_feature_path = clip_text_feature_path)
model.load_state_dict(torch.load(model_path))
model = model.to(model.device)
model.set_prediction_mode()
model.eval()
return model
| 1,331 | Python | 34.052631 | 98 | 0.706236 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/frankahand_experiment.py | import numpy as np
from PIL import Image
## Get object indexes
# import os
# OBJ_INDEX_LIST = []
# for i in os.listdir("/home/yizhou/Research/temp/"):
# if str(i).isdigit():
# OBJ_INDEX_LIST.append(i)
# print(sorted(OBJ_INDEX_LIST, key = lambda x: int(x)))
from exp.params import OBJ_INDEX_LIST
SUCESS_PERCENTAGE = 20
result_file_path = "/home/yizhou/Research/Data/frankahand_exp.txt"
SHOW_IMAGE = False
import getpass
user = getpass.getuser()
from omni.isaac.kit import SimulationApp
# "/home/yizhou/Research/OpenAnyDrawer/scene0.usd" #
usd_path = f"omniverse://localhost/Users/{user}/scene4.usd"
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# custom import
from open_env import OpenEnv
from franka.gripper import GripperHandEnv
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
controller = GripperHandEnv("/World/Franka/panda_link8", "/World/AnchorXform")
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
if SHOW_IMAGE:
world.render()
env.get_image()
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST[1:]:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
world.scene.add(mobility_obj)
world.reset()
world.render()
scene_instr = SceneInstructor()
scene_instr.analysis()
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## SOLUTION ##############################
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
verticle = handle_direction == "horizontal")
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(200):
world.step(render=False)
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=False)
# close
pos = np.array([[0.0, 0.0]])
for _ in range(100):
pos -= 0.01
controller.robots.set_joint_position_targets(pos)
world.step(render=False)
# pull out
for i in range(300):
graps_pos[...,0] -= 0.001
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(pos)
pos += 0.015
world.step(render=False)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 5,338 | Python | 32.36875 | 152 | 0.628138 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/exp/humanhand_experiment_learning.py | import numpy as np
from PIL import Image
## Get object indexes
# import os
# OBJ_INDEX_LIST = []
# for i in os.listdir("/home/yizhou/Research/temp/"):
# if str(i).isdigit():
# OBJ_INDEX_LIST.append(i)
# print(sorted(OBJ_INDEX_LIST, key = lambda x: int(x)))
from exp.params import OBJ_INDEX_LIST
SUCESS_PERCENTAGE = 20
result_file_path = "/home/yizhou/Research/Data/humanhand_exp_learning.txt"
MODEL_PATH = "/home/yizhou/Research/temp0/fasterrcnn_resnet50_fpn.pth"
SHOW_IMAGE = False
import getpass
user = getpass.getuser()
from omni.isaac.kit import SimulationApp
# "/home/yizhou/Research/OpenAnyDrawer/scene0.usd" #
usd_path = f"omniverse://localhost/Users/{user}/scene3.usd"
simulation_app = SimulationApp({"headless": True, "open_usd": usd_path, "livesync_usd": usd_path})
# world
import omni
from omni.isaac.core import World
world = World()
# reset scene
mobility_prim = world.scene.stage.GetPrimAtPath("/World/Game/mobility")
if mobility_prim:
omni.kit.commands.execute("DeletePrims", paths=["/World/Game/mobility"])
# reset scene
replicator_prim = world.scene.stage.GetPrimAtPath("/Replicator")
if replicator_prim:
omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
# custom import
from open_env import OpenEnv
from hand.hand_env import HumanHandEnv
from task.checker import TaskChecker
from task.instructor import SceneInstructor
from omni.isaac.core.prims.xform_prim import XFormPrim
env = OpenEnv()
env.add_camera()
env.setup_viewport()
# env = HandEnv("/World/Hand/Bones/l_carpal_mid", "/World/Hand*/Bones/l_thumbSkeleton_grp/l_distalThumb_mid")
controller = HumanHandEnv("/World/Hand/Bones/l_carpal_mid", "/World/AnchorXform")
# init
world.reset()
controller.start()
world.scene.add(controller.robots)
# hide robot
hand_prim = world.scene.stage.GetPrimAtPath("/World/Hand")
hand_prim.GetAttribute('visibility').Set('invisible')
if SHOW_IMAGE:
world.render()
env.get_image()
# load deep leanrning model
from exp.model import load_vision_model
model = load_vision_model(model_path = MODEL_PATH, model_name = "fasterrcnn_resnet50_fpn")
# iterate object index
for OBJ_INDEX in OBJ_INDEX_LIST:
OBJ_INDEX = int(OBJ_INDEX)
env.add_object(OBJ_INDEX, scale = 0.1)
mobility_obj = XFormPrim("/World/Game/mobility")
mobility_obj_name = mobility_obj.name
world.scene.add(mobility_obj)
world.reset()
world.render()
scene_instr = SceneInstructor()
scene_instr.analysis()
# export data and load model
# scene_instr.output_path = "/home/yizhou/Research/temp0/"
# scene_instr.export_data()
# omni.kit.commands.execute("DeletePrims", paths=["/Replicator"])
world.render()
image_array =env.get_image(return_array=True)
scene_instr.model = model
scene_instr.predict_bounding_boxes(image_array[:,:,:3])
# if not valid
if not scene_instr.is_obj_valid:
print("object not valid: ", OBJ_INDEX)
simulation_app.close()
exit()
# if no valid predicted boundbox
if not scene_instr.is_pred_valid:
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX}, invalid prediction\n")
world.scene.remove_object(mobility_obj_name)
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
continue
# iterate handle index
handle_num = len(list(scene_instr.valid_handle_list.keys()))
for HANDLE_INDEX in range(handle_num):
handle_path_str = list(scene_instr.valid_handle_list.keys())[HANDLE_INDEX]
handle_joint_type = scene_instr.valid_handle_list[handle_path_str]["joint_type"]
handle_joint = scene_instr.valid_handle_list[handle_path_str]["joint"]
handle_rel_direciton = scene_instr.valid_handle_list[handle_path_str]["relative_to_game_center"]
# handle_direction = scene_instr.valid_handle_list[handle_path_str]["direction"]
# Task
print("handle_path_str, handle_joint_type, handle_joint, rel_direction", handle_path_str, handle_joint_type, handle_joint, handle_rel_direciton)
task_checker = TaskChecker("mobility", handle_joint, handle_joint_type, IS_RUNTIME=True)
################################################## LEARNING SOLUTION ##############################
v_desc = scene_instr.valid_handle_list[handle_path_str]["vertical_description"]
h_desc = scene_instr.valid_handle_list[handle_path_str]["horizontal_description"]
the_box = scene_instr.get_box_from_desc(v_desc, h_desc)
handle_direction = "horizontal" if (the_box[2] - the_box[0]) > (the_box[3] - the_box[1]) else "vertical"
# init
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(60):
world.step() # wait some time
# get grasp location, if handle is horizontal, gripper should be vertical
# graps_pos, grasp_rot = controller.calculate_grasp_location(keyword = handle_path_str,
# verticle = handle_direction == "horizontal")
graps_pos, grasp_rot = controller.calculate_grasp_location_from_pred_box(the_box, verticle= handle_direction == "horizontal")
print("graps_pos, grasp_rot ", graps_pos, grasp_rot )
# move close to handle
graps_pos[...,0] -= 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(500):
world.step(render=SHOW_IMAGE)
print("move to handle")
# move to handle
graps_pos[...,0] += 0.1
controller.xforms.set_world_poses(graps_pos, grasp_rot)
for _ in range(100):
world.step(render=SHOW_IMAGE)
print("close finger")
# close finger
for i in range(100):
i = i / 5
dof_pos = np.array([
[ i * 0.03, i * 0.04,
i * 0.01, -i * 0.04,
i * 0.005, -i * 0.04,
-i * 0.02, -i * 0.04,
-i * 0.01, -i * 0.04,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
-i * 0.02, -i * 0.03, -i * 0.03, -i * 0.03, -i * 0.03,
],
])
# pos = np.random.randn(2,25)
controller.robots.set_joint_position_targets(dof_pos) #
world.step(render=SHOW_IMAGE)
# pull out
for i in range(200):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
dof_pos /= 1.5
# pull out furthur
for i in range(100):
graps_pos[...,0] -= 0.001
# env.robots.set_world_poses(graps_pos, grasp_rot)
controller.xforms.set_world_poses(graps_pos, grasp_rot)
controller.robots.set_joint_position_targets(dof_pos)
world.step(render=SHOW_IMAGE)
# check task sucess
open_ratio = task_checker.joint_checker.compute_percentage()
if handle_joint_type == "PhysicsRevoluteJoint": # open a door the upper limit may reach 180 degree
open_ratio *= 2
task_success = open_ratio > SUCESS_PERCENTAGE
print("open_ratio, task_success", open_ratio, task_success)
with open(result_file_path, "a") as f:
f.write(f"{OBJ_INDEX},{HANDLE_INDEX},{handle_path_str},{handle_joint_type},{handle_joint},{task_success},{open_ratio},{graps_pos},{grasp_rot},{v_desc}|{h_desc}\n")
if SHOW_IMAGE:
world.render()
env.get_image().show()
world.reset()
controller.xforms.set_world_poses(positions=np.array([[0,0,0]]), orientations = np.array([[1, 0, 0, 0]])) # WXYZ
for _ in range(30):
world.step()
# close object
world.scene.remove_object(mobility_obj_name)
world.render()
simulation_app.close()
| 8,296 | Python | 34.609442 | 175 | 0.612946 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/render/utils.py |
import os
from typing import Union, Tuple, Dict, List, Callable
import omni.usd
import omni.kit.commands
from pxr import Sdf, UsdShade, Usd, Gf
import numpy as np
LOOKS_PATH = "/World/RLooks"
def material_omnipbr(
prim_path_str,
diffuse: Tuple[float] = None,
diffuse_texture: str = None,
roughness: float = None,
roughness_texture: str = None,
metallic: float = None,
metallic_texture: str = None,
specular: float = None,
emissive_color: Tuple[float] = None,
emissive_texture: str = None,
emissive_intensity: float = 0.0,
project_uvw: bool = False,
):
stage = omni.usd.get_context().get_stage()
mdl = "OmniPBR.mdl"
mtl_name, _ = os.path.splitext(mdl)
if not stage.GetPrimAtPath(LOOKS_PATH):
stage.DefinePrim(LOOKS_PATH, "Scope")
prim_path = omni.usd.get_stage_next_free_path(stage, f"{LOOKS_PATH}/{mdl.split('.')[0]}", False)
omni.kit.commands.execute(
"CreateMdlMaterialPrim", mtl_url=mdl, mtl_name=mtl_name, mtl_path=prim_path, select_new_prim=False
)
shader = UsdShade.Shader(omni.usd.get_shader_from_material(stage.GetPrimAtPath(prim_path), True))
shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f)
shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("reflection_roughness_constant", Sdf.ValueTypeNames.Float)
shader.CreateInput("reflectionroughness_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("reflection_roughness_texture_influence", Sdf.ValueTypeNames.Float)
shader.CreateInput("metallic_constant", Sdf.ValueTypeNames.Float)
shader.CreateInput("metallic_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("metallic_texture_influence", Sdf.ValueTypeNames.Float)
shader.CreateInput("specular_level", Sdf.ValueTypeNames.Float)
shader.CreateInput("enable_emission", Sdf.ValueTypeNames.Bool)
shader.CreateInput("emissive_color", Sdf.ValueTypeNames.Color3f)
shader.CreateInput("emissive_color_texture", Sdf.ValueTypeNames.Asset)
shader.CreateInput("emissive_intensity", Sdf.ValueTypeNames.Float)
shader.CreateInput("project_uvw", Sdf.ValueTypeNames.Bool)
enable_emission = emissive_intensity != 0.0
roughness_texture_influence = float(roughness_texture is not None)
metallic_texture_influence = float(roughness_texture is not None)
prim = stage.GetPrimAtPath(prim_path)
properties = {
"diffuse_color_constant": diffuse,
"diffuse_texture": diffuse_texture,
"reflection_roughness_constant": roughness,
"reflectionroughness_texture": roughness_texture,
"reflection_roughness_texture_influence": roughness_texture_influence,
"metallic_constant": metallic,
"metallic_texture": metallic_texture,
"metallic_texture_influence": metallic_texture_influence,
"specular_level": specular,
"enable_emission": enable_emission,
"emissive_color": emissive_color,
"emissive_color_texture": emissive_texture,
"emissive_intensity": emissive_intensity,
"project_uvw": project_uvw,
}
for attribute, attribute_value in properties.items():
if attribute_value is None:
continue
if UsdShade.Material(prim):
shader = UsdShade.Shader(omni.usd.get_shader_from_material(prim, True))
shader.GetInput(attribute).Set(attribute_value)
else:
prim.GetAttribute(attribute).Set(attribute_value)
omni.kit.commands.execute(
"BindMaterialCommand",
prim_path=prim_path_str,
material_path=prim.GetPath().pathString,
strength=UsdShade.Tokens.strongerThanDescendants,
)
def prim_random_color(prim_path_str):
"""
Randomize color for prim at path
"""
diffuse = Gf.Vec3f(np.random.rand(), np.random.rand(), np.random.rand())
material_omnipbr(prim_path_str, diffuse = diffuse)
# # test
# prim_random_color("/World/Cube")
# print("test random shader") | 4,031 | Python | 35.654545 | 106 | 0.694617 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/render/offline_rendering.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# ~/.local/share/ov/pkg/isaac_sim-2022.1.0/python.sh
"""Generate offline synthetic dataset
"""
from omni.isaac.kit import SimulationApp
import os
import carb
# Set rendering parameters and create an instance of kit
CONFIG = {"renderer": "RayTracedLighting", "headless": True,
"width": 256, "height": 256, "num_frames": 5}
kit = SimulationApp(launch_config=CONFIG)
from omni.isaac.core import World
world = World()
from omni.isaac.core.prims.xform_prim import XFormPrim
from open_env import OpenEnv
from task.instructor import SceneInstructor
from exp.params import OBJ_INDEX_LIST, ALL_SEMANTIC_TYPES
env = OpenEnv()
# we will be using the replicator library
import omni.replicator.core as rep
# This allows us to run replicator, which will update the random
# parameters and save out the data for as many frames as listed
def run_orchestrator():
rep.orchestrator.run()
# Wait until started
while not rep.orchestrator.get_is_started():
kit.update()
# Wait until stopped
while rep.orchestrator.get_is_started():
kit.update()
rep.BackendDispatch.wait_until_done()
for i in OBJ_INDEX_LIST[3:]:
print("rendering object id:", i)
i = int(i)
env.add_object(i, scale = 0.1)
game_obj = XFormPrim("/World/Game")
game_obj_name = game_obj.name
world.scene.add(game_obj)
scene_instr = SceneInstructor()
scene_instr.output_path = "/home/yizhou/Research/temp1"
scene_instr.analysis()
scene_instr.add_semantic_to_handle()
if scene_instr.is_obj_valid:
with rep.new_layer():
camera = rep.create.camera(position=(-10 * scene_instr.scale, 0, 5 * scene_instr.scale), rotation=(90, 0, -90))
render_product = rep.create.render_product(camera, (256, 256))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize( output_dir=os.path.join(scene_instr.output_path, f"{i}"), rgb=True, bounding_box_2d_tight=True)
writer.attach([render_product])
light_group = rep.create.group(["/World/defaultLight"])
shapes = rep.get.prims(semantics=[('class', i) for i in ALL_SEMANTIC_TYPES])
mats = rep.create.material_omnipbr(diffuse=rep.distribution.uniform((0,0,0), (1,1,1)), count=20)
with rep.trigger.on_frame(num_frames=CONFIG["num_frames"]):
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-1.5, -0.2, 0.5), (-1, 0.2, 0.5)),
rotation=(90, 0, -90),
)
# # Randomize light colors
# with light_group:
# rep.modify.attribute("color", rep.distribution.uniform((0.1, 0.1, 0.1), (1.0, 1.0, 1.0)))
# rep.modify.pose(
# position=rep.distribution.uniform((0, -45, 90), (0, 0, 90))
# )
# randomize
with shapes:
rep.randomizer.materials(mats)
run_orchestrator()
world.scene.remove_object(game_obj_name)
kit.update()
kit.close()
| 3,621 | Python | 32.537037 | 126 | 0.632422 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/render/param.py | NECLEUS_MATERIALS = | 20 | Python | 19.99998 | 20 | 0.8 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/instructor.py | # instructions as language
import carb
import omni
import os
import torch
# try:
# import cv2
# except:
# omni.kit.pipapi.install("opencv-python")
# import cv2
import numpy as np
from pxr import UsdPhysics, Gf, UsdGeom
from task.utils import *
import omni.kit.viewport_widgets_manager as wm
from omni import ui
from omni.isaac.core.utils.semantics import add_update_semantics, remove_all_semantics
import omni.replicator.core as rep
CAMERA_WIDGET_STYLING = {
"Rectangle::background": {"background_color": 0x7F808080, "border_radius": 5}
}
class LabelWidget(wm.WidgetProvider):
def __init__(self, text_list:list):
self.text_list = text_list
def build_widget(self, window):
with ui.ZStack(width=0, height=0, style=CAMERA_WIDGET_STYLING):
ui.Rectangle(name="background")
with ui.VStack(width=0, height=0):
ui.Spacer(height=2)
for text in self.text_list:
ui.Label(text, width=0, height=0, name="", style={"color": "darkorange"})
class SceneInstructor():
def __init__(self) -> None:
# constant
self.long_handle_ratio = 3 # ratio to determin the long handle
self.short_handle_ratio = 1.5 # ratio to determin the small handle
self.spatial_desc_tolerance = 0.05 # spatial description
# output path
self.output_path = "/home/yizhou/Research/temp"
self.reset()
def reset(self):
# scene
self.stage = omni.usd.get_context().get_stage()
# knowledge
self.handle_knowledge = {}
self.joint_knowledge = {"PhysicsRevoluteJoint":[], "PhysicsPrismaticJoint":[], "PhysicsFixedJoint": []}
# constant
self.scale = 0.1 # object scale
self.is_obj_valid = True # valid object scene
# pred
self.pred_boxes = None
self.is_pred_valid = True # Prediction valid
####################################################################################
############################ analysis ###############################################
####################################################################################
def analysis(self):
self.analysis_game()
self.analysis_handle_primary()
self.analysis_cabinet_type()
self.analysis_spatial_rel()
def analysis_game(self):
"""
Analysis global game information
"""
bboxes = get_bounding_box("/World/Game/mobility")
self.game_center = 0.5 * (bboxes[0] + bboxes[1])
def analysis_handle_primary(self):
"""
Analysis handle to get the positions
"""
keyword = "handle_"
prim_list = list(self.stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# get basic information
for prim in prim_list:
prim_path_str = prim.GetPath().pathString
handle_num = prim_path_str.split("/")[-1].split("_")[-1]
# get bounding boxes
bboxes = get_bounding_box(prim_path_str)
center = 0.5 * (bboxes[0] + bboxes[1])
scale = (bboxes[1][0] - bboxes[0][0], bboxes[1][1] - bboxes[0][1], bboxes[1][2] - bboxes[0][2])
size = scale[0] * scale[1] * scale[2]
size_type = self.get_handle_type_from_scale(scale)
direction = "horizontal" if scale[1] > scale[2] else "vertical"
relative_to_game_center = "left" if center[1] >= self.game_center[1] else "right"
self.handle_knowledge[prim_path_str] = {
"num": handle_num,
"center": center,
"relative_to_game_center": relative_to_game_center,
"bboxes": bboxes,
"scale": scale,
"size": size,
"size_type": size_type,
"direction": direction,
"overlap_with": [],
"overlap_with_longer": False,
"joint_type": "",
}
# get intersection
for i in range(len(prim_list)):
path_str1 = prim_list[i].GetPath().pathString
bboxes1 = self.handle_knowledge[path_str1]["bboxes"]
for j in range(i + 1, len(prim_list)):
path_str2 = prim_list[j].GetPath().pathString
bboxes2 = self.handle_knowledge[path_str2]["bboxes"]
if bboxes_overlap(bboxes1, bboxes2):
overlap_with1 = self.handle_knowledge[path_str1]["overlap_with"]
overlap_with1.append(path_str2)
overlap_with2 = self.handle_knowledge[path_str2]["overlap_with"]
overlap_with2.append(path_str1)
if max(self.handle_knowledge[path_str1]["scale"]) > max(self.handle_knowledge[path_str2]["scale"]):
self.handle_knowledge[path_str2]["overlap_with_longer"] = True
else:
self.handle_knowledge[path_str1]["overlap_with_longer"] = True
def analysis_cabinet_type(self):
# get drawer/door from joint type
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if "joint_" in item.GetPath().pathString]
# get joint knowledge
for prim in prim_list:
# print("type", prim, prim.GetTypeName())
joint = UsdPhysics.Joint.Get(self.stage, prim.GetPath())
assert joint, f"Not a joint? Check model {prim.GetPath().pathString}"
b1paths = joint.GetBody1Rel().GetTargets()
# print("b1paths", prim.GetTypeName(), b1paths)
self.joint_knowledge[prim.GetTypeName()].append([b1paths[0].pathString, prim.GetPath().pathString])
# update joint type
for handle_path_str in self.handle_knowledge:
handle_know = self.handle_knowledge[handle_path_str]
for joint_type in self.joint_knowledge:
for joint_body_path_str, joint_prim_path_str in self.joint_knowledge[joint_type]:
if joint_body_path_str in handle_path_str:
handle_know["joint_type"] = joint_type
handle_know["joint_path_str"] = joint_prim_path_str
break
# get revolute/linear handles
self.valid_handle_list = {}
# if it doesn't overlap with any larger handle, it is a true handle
for handle_path_str in self.handle_knowledge:
if not self.handle_knowledge[handle_path_str]["overlap_with_longer"]:
if self.handle_knowledge[handle_path_str]["joint_type"] == "PhysicsRevoluteJoint":
self.valid_handle_list[handle_path_str] = {
"joint_type": "PhysicsRevoluteJoint",
"cabinet_type": "door",
"vertical_description": "",
"horizontal_description": "",
}
if self.handle_knowledge[handle_path_str]["joint_type"] == "PhysicsPrismaticJoint":
self.valid_handle_list[handle_path_str] = {
"joint_type": "PhysicsPrismaticJoint",
"cabinet_type": "drawer",
"vertical_description": "",
"horizontal_description": "",
}
# other import information
self.valid_handle_list[handle_path_str]["joint"] = self.handle_knowledge[handle_path_str]["joint_path_str"].split("/")[-1]
self.valid_handle_list[handle_path_str]["relative_to_game_center"] = self.handle_knowledge[handle_path_str]["relative_to_game_center"]
self.valid_handle_list[handle_path_str]["direction"] = self.handle_knowledge[handle_path_str]["direction"]
def analysis_spatial_rel(self):
"""
Analysis the spatial relationship of handle
: joint_type -> vertical -> horizontal
"""
print("analysis_spatial_rel: ", self.valid_handle_list)
if len(self.valid_handle_list) == 0:
carb.log_warn("No handle in the scene")
self.is_obj_valid = False
return
# if only one joint, no need to describe from spatial layout
if len(self.valid_handle_list) == 1:
self.is_obj_valid = True
return
# get vertical and horizontal centers
v_centers = []
h_centers = []
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
center_z = handle_center[2]
center_y = handle_center[1]
is_v_center_list = any([abs(z - center_z) < self.spatial_desc_tolerance for z in v_centers])
is_h_center_list = any([abs(y - center_y) < self.spatial_desc_tolerance for y in h_centers])
if not is_v_center_list:
v_centers.append(center_z)
if not is_h_center_list:
h_centers.append(center_y)
v_centers = sorted(v_centers)
h_centers = sorted(h_centers)
# vertical
if len(v_centers) == 1:
pass
elif len(v_centers) == 2:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
elif len(v_centers) == 3:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
elif abs(handle_center[2] - v_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "middle"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
elif len(v_centers) == 4:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[2] - v_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "bottom"
elif abs(handle_center[2] - v_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "second-bottom"
elif abs(handle_center[2] - v_centers[2]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["vertical_description"] = "second-top"
else:
self.valid_handle_list[handle_path_str]["vertical_description"] = "top"
else:
carb.log_warn("too many handles align vertically!")
self.is_obj_valid = False
# horizontal
if len(h_centers) == 1:
pass
elif len(h_centers) == 2:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
elif len(h_centers) == 3:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
elif abs(handle_center[1] - h_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "middle"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
elif len(h_centers) == 4:
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
if abs(handle_center[1] - h_centers[0]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "right"
elif abs(handle_center[1] - h_centers[1]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "second-right"
elif abs(handle_center[1] - h_centers[2]) < self.spatial_desc_tolerance:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "second-left"
else:
self.valid_handle_list[handle_path_str]["horizontal_description"] = "left"
else:
carb.log_warn("too many handles align horizontally!")
self.is_obj_valid = False
# print("valid_handle_list: ", self.valid_handle_list)
# print("knowledge", self.handle_knowledge)
def get_handle_type_from_scale(self, scale):
"""
Get a general shape for the handle
"""
if max(scale) / min(scale) > self.long_handle_ratio:
return "long"
elif max(scale) / min(scale) < self.short_handle_ratio:
return "short"
else:
return "middle?"
####################################################################################
############################ UI ###############################################
####################################################################################
def build_ui(self, desc:list, gui_path:str, gui_location):
gui = self.stage.GetPrimAtPath(gui_path)
if not gui:
gui = UsdGeom.Xform.Define(self.stage, gui_path)
gui.AddTranslateOp().Set(gui_location)
self.wiget_id = wm.add_widget(gui_path, LabelWidget(desc), wm.WidgetAlignment.TOP)
def build_handle_desc_ui(self):
"""
build hud for handle
"""
for handle_path_str in self.valid_handle_list:
handle_center = self.handle_knowledge[handle_path_str]["center"]
handle_num = self.handle_knowledge[handle_path_str]["num"]
gui_location = handle_center
gui_path = f"/World/GUI/handle_{handle_num}"
h_desc = self.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = self.valid_handle_list[handle_path_str]["vertical_description"]
cabinet_type = self.valid_handle_list[handle_path_str]["cabinet_type"]
self.build_ui([f"{cabinet_type}", "handle_" + handle_num, f"{v_desc}/{h_desc}"], gui_path, gui_location)
######################################## semantic #####################################################
def add_semantic_to_handle(self):
for handle_path_str in self.valid_handle_list:
prim = self.stage.GetPrimAtPath(handle_path_str)
h_desc = self.valid_handle_list[handle_path_str]["horizontal_description"]
v_desc = self.valid_handle_list[handle_path_str]["vertical_description"]
cabinet_type = self.valid_handle_list[handle_path_str]["cabinet_type"]
# add_update_semantics(prim, "handle")
add_update_semantics(prim, semantic_label = f"{v_desc}_{h_desc}_{cabinet_type}")
def export_data(self):
"""
Export RGB and Bounding box info to file
"""
with rep.new_layer():
camera = rep.create.camera(position=(-10 * self.scale, 0, 5 * self.scale), rotation=(90, 0, -90))
render_product = rep.create.render_product(camera, (256, 256))
# Initialize and attach writer
self.writer = rep.WriterRegistry.get("BasicWriter")
self.writer.initialize( output_dir=self.output_path, rgb=True, bounding_box_2d_tight=True)
self.writer.attach([render_product])
with rep.trigger.on_frame(num_frames=1):
pass
rep.orchestrator.run()
rep.BackendDispatch.wait_until_done()
# rep.orchestrator.preview()
# omni.kit.commands.execute("DeletePrims", paths=["/World/Game"])
def load_model(self):
"""
Load deep leanring model
"""
from exp.model import load_vision_model
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = load_vision_model().to(self.device)
print("successfully loaded model")
def predict_bounding_boxes(self, image, detection_threshold = 0.5):
"""
Predict bounding boxes
::params:
image: 255 rgb
"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
image_arr = image / 255.0
images = [torch.tensor(image_arr).to(torch.float).permute(2,0,1).to(self.device )] # .to("cuda")
outputs = self.model(images)
# print("outputs", outputs)
boxes = outputs[0]['boxes'].data.cpu().numpy()
scores = outputs[0]['scores'].data.cpu().numpy()
# sort from max to min
inds = scores.argsort()[::-1]
boxes = boxes[inds]
# if no boxes!
if len(boxes) == 0:
self.is_pred_valid = False
return
select_boxes = boxes[scores >= detection_threshold].astype(np.int32)
# if no boxes?
if len(select_boxes) == 0:
select_boxes = boxes
# get world box positions
self.pred_boxes= [self.get_bbox_world_position(box) for box in select_boxes]
def get_bbox_world_position(self, box,
resolution = 256, D = -293, camera_pos = [-1, 0, 0.5], handle_x = 0.61857):
"""
Calculate the grasp location for the handle
box: [x_min, y_min, x_max, y_max] 2D boudning box in camera
resolution: camera resolution
D: depth of field
camera_pos: camera_position
handle_x: object offset
"""
w_min = box[0] - resolution / 2
w_max = box[2] - resolution / 2
h_min = box[1] - resolution / 2
h_max = box[3] - resolution / 2
y_max = (handle_x - camera_pos[0]) * w_min / D + camera_pos[1]
y_min = (handle_x - camera_pos[0]) * w_max / D + camera_pos[1]
z_max = (handle_x - camera_pos[0]) * h_min / D + camera_pos[2]
z_min = (handle_x - camera_pos[0]) * h_max / D + camera_pos[2]
return [y_min, z_min, y_max, z_max]
def get_box_from_desc(self, v_desc, h_desc):
"""
Get box from description
"""
# if no description, get bbox of the highest score
if v_desc == "" and h_desc == "":
return self.pred_boxes[0]
# if just one box
if len(self.pred_boxes) == 1:
return self.pred_boxes[0]
v_boxes = sorted(self.pred_boxes, key = lambda box: 0.5 * (box[1] + box[3]))
h_boxes = sorted(self.pred_boxes, key = lambda box: 0.5 * (box[0] + box[2]))
# only vertical relation
if h_desc == "":
if v_desc == "top":
return v_boxes[-1]
elif v_desc == "second top" or v_desc == "middle":
return v_boxes[-2]
if v_desc == "bottom":
return v_boxes[0]
elif v_desc == "second bottom" or v_desc == "middle":
return v_boxes[1]
# only horizontal relation
elif v_desc == "":
if h_desc == "left":
return h_boxes[-1]
elif h_desc == "second left" or h_desc == "middle":
return h_boxes[-2]
if h_desc == "right":
return h_boxes[0]
elif h_desc == "second right" or h_desc == "middle":
return h_boxes[1]
else: # have both description
if v_desc == "bottom" and h_desc == "left":
if v_boxes[0][0] > v_boxes[1][0]:
return v_boxes[0]
else:
return v_boxes[1]
elif v_desc == "bottom" and h_desc == "right":
if v_boxes[0][0] > v_boxes[1][0]:
return v_boxes[1]
else:
return v_boxes[0]
elif v_desc == "top" and h_desc == "left":
if v_boxes[-1][0] > v_boxes[-2][0]:
return v_boxes[-1]
else:
return v_boxes[-2]
elif v_desc == "top" and h_desc == "right":
if v_boxes[-1][0] > v_boxes[-2][0]:
return v_boxes[-2]
else:
return v_boxes[-1]
# TODO: unhandled situation
else:
return self.pred_boxes[0]
return self.pred_boxes[0]
| 21,907 | Python | 40.335849 | 151 | 0.532752 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/utils.py | import omni
from pxr import UsdGeom, Usd
def get_bounding_box(prim_path: str):
"""
Get the bounding box of a prim
"""
stage = omni.usd.get_context().get_stage()
purposes = [UsdGeom.Tokens.default_]
bboxcache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
return game_bboxes
def bboxes_overlap(bboxes1, bboxes2):
"""
To judge whether two bboxes overlap with each other
bboxes: [min (vec3), max (vec3)]
"""
return not ( bboxes1[0][0] > bboxes2[1][0] or # left
bboxes1[1][0] < bboxes2[0][0] or # right
bboxes1[0][1] > bboxes2[1][1] or # bottom
bboxes1[1][1] < bboxes2[0][1] or # up
bboxes1[0][2] > bboxes2[1][2] or # front
bboxes1[1][2] < bboxes2[0][2]) # back
def get_mesh_bboxes(self, keyword: str):
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
bboxes_list = []
for prim in prim_list:
bboxes = get_bounding_box(prim.GetPath().pathString)
bboxes_list.append(bboxes)
return bboxes_list | 1,461 | Python | 32.999999 | 119 | 0.607803 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/checker.py | # task check
import omni
from task.newJointCheck import JointCheck
class TaskChecker():
def __init__(self, target_obj, target_joint, joint_type, IS_RUNTIME = False) -> None:
self.target_obj =target_obj
self.target_joint = target_joint
self.joint_type = joint_type
self.target_prim_path = "/World/Game/" + self.target_obj
self.joint_checker = JointCheck(self.target_prim_path, self.target_joint)
self.init_value = 0.0 # from start
self.target_value = 0.25 # to target
# reverse joint direction check if necessary
if self.joint_type == "PhysicsRevoluteJoint":
self.check_joint_direction()
# other constant
self.total_step = 0
self.print_every = 30
self.checking_interval = 30
# register events
if not IS_RUNTIME:
self.create_task_callback()
def check_joint_direction(self):
"""
Check joint positive rotation to upper or negative rotation to lower
"""
is_upper = abs(self.joint_checker.upper) > abs(self.joint_checker.lower)
if not is_upper:
# if is lower, reverse init_value and target value
self.init_value = 1 - self.init_value if self.init_value != -1 else -1
self.target_value = 1 - self.target_value
################################### UPDATE ###########################################
def create_task_callback(self):
self.timeline = omni.timeline.get_timeline_interface()
stream = self.timeline.get_timeline_event_stream()
self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event)
# subscribe to Physics updates:
self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events(
self._on_physics_step
)
def _on_timeline_event(self, e):
"""
set up timeline event
"""
if e.type == int(omni.timeline.TimelineEventType.STOP):
self.it = 0
self.time = 0
self.reset()
def reset(self):
"""
Reset event
"""
self._physics_update_subscription = None
self._timeline_subscription = None
# self._setup_callbacks()
def _on_physics_step(self, dt):
self.start_checking()
def start_checking(self):
self.total_step += 1
if self.total_step % self.checking_interval == 0:
percentage = self.joint_checker.compute_percentage()
# log
if self.total_step % self.print_every == 0:
print("current: {:.1f}; target: {:.1f}; delta percentage: {:.1f}:".format(percentage, self.target_value * 100, self.target_value * 100 - percentage) )
if percentage / 100.0 > self.target_value:
print("success")
# self.timeline.pause()
| 2,981 | Python | 33.275862 | 166 | 0.568266 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/task/newJointCheck.py | from omni.isaac.dynamic_control import _dynamic_control
import omni
import math
class JointCheck():
def __init__(self, joint_prim, joint_name) -> None:
self.joint_name = joint_name
self.stage = omni.usd.get_context().get_stage()
self.prim_list = list(self.stage.TraverseAll())
self.prim_list = [ item for item in self.prim_list if joint_name in
item.GetPath().pathString and item.GetPath().pathString.startswith(joint_prim) and item.GetPath().pathString.endswith(joint_name)]
assert len(self.prim_list) == 1, "len of " + str(len(self.prim_list))
self.prim = self.prim_list[0]
self.type = self.prim.GetTypeName()
self.full_name = self.prim.GetPath().pathString
self.joint = self.stage.GetPrimAtPath(self.full_name)
# get joint upper and
self.upper = self.joint.GetAttribute("physics:upperLimit").Get()
self.lower = self.joint.GetAttribute("physics:lowerLimit").Get()
# need to compute this at the first step
self.initial_percentage = self.compute_percentage()
def compute_velocity(self):
# this function currently is not accurate, do not use it.
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
dof_vel = self.dc.get_dof_velocity(dof_ptr)
# dof_vel = self.dc.get_dof_velocity_target(dof_ptr)
if self.type == 'PhysicsPrismaticJoint':
from omni.isaac.core.utils.stage import get_stage_units
v = dof_vel * (get_stage_units() * 100) # in centimeters
print("units conversion: ", get_stage_units() * 100)
else:
v = math.degrees(dof_vel)
return v
def get_joint_link(self):
body0 = self.joint.GetRelationship("physics:body0").GetTargets()[0]
body1 = self.joint.GetRelationship("physics:body1").GetTargets()[0]
return body1
def set_velocity(self, velocity):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
if self.type == 'PhysicsPrismaticJoint':
from omni.isaac.core.utils.stage import get_stage_units
#velocity is in centimeters
v = velocity / (get_stage_units() * 100)
else:
v = math.radians(velocity)
self.dc.wake_up_articulation(self.art)
self.dc.set_dof_velocity(dof_ptr, velocity)
def compute_percentage(self):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
dof_pos = self.dc.get_dof_position(dof_ptr)
if self.type == 'PhysicsPrismaticJoint':
tmp = dof_pos
else:
tmp = math.degrees(dof_pos)
pertentage = (tmp - self.lower)/(self.upper - self.lower) * 100
# print("upper lower percentage", tmp, self.upper, self.lower, pertentage)
if pertentage > 100:
pertentage = 100
elif pertentage < 0:
pertentage = 0
return pertentage
def compute_distance(self):
return abs(self.compute_percentage() - self.initial_percentage)
def set_joint(self, percentage):
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.art = self.dc.get_articulation(self.full_name)
dof_ptr = self.dc.find_articulation_dof(self.art, self.joint_name)
upper = self.joint.GetAttribute("physics:upperLimit").Get()
lower = self.joint.GetAttribute("physics:lowerLimit").Get()
tmp = percentage / 100.0 *(upper-lower) + lower
if self.type == 'PhysicsPrismaticJoint':
dof_pos = tmp
else:
dof_pos = math.radians(tmp)
self.dc.wake_up_articulation(self.art)
self.dc.set_dof_position(dof_ptr, dof_pos)
#test cases
# check = JointCheck("/World/game/mobility_Door_8897","joint_1")
# check.set_velocity(0.001)
# check.compute_velocity()
# print(check.set_joint(50))
# check = JointCheck("/World/game/mobility_StorageFurniture_40417", "joint_3")
# print(check.compute_velocity()) | 4,466 | Python | 37.179487 | 142 | 0.62987 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/open/any/drawer/shadowhand/hand_env.py | import numpy as np
from numpy_utils import *
from utils import get_mesh_bboxes
from omni.isaac.core import World, SimulationContext
from omni.isaac.core.prims.xform_prim_view import XFormPrimView
from omni.isaac.core.robots.robot_view import RobotView
class ShadowHandEnv():
def __init__(self,
prim_paths_expr="",
xform_paths_expr="",
backend = "numpy",
device = None
) -> None:
# init hand helper
# self.hander_helper = HandHelper()
self.xform_paths_expr = xform_paths_expr
self.prim_paths_expr = prim_paths_expr
self.backend = backend
self.device = device
def start(self):
# simulation context
self.simlation_context = SimulationContext(backend=self.backend, device=self.device)
print("simlation context", SimulationContext.instance().backend, SimulationContext.instance().device)
# articulation
self.robots = RobotView(self.prim_paths_expr) # sim.create_articulation_view("/World/envs/*/humanoid/torso") #
self.robot_indices = self.robots._backend_utils.convert(np.arange(self.robots.count, dtype=np.int32), self.device)
self.num_envs = len(self.robot_indices)
print("num_envs", self.num_envs)
# initialize
self.robots.initialize()
self.robot_states = self.robots.get_world_poses()
self.dof_pos = self.robots.get_joint_positions()
self.initial_dof_pos = self.dof_pos
self.dof_vel = self.robots.get_joint_velocities()
self.initial_dof_vel = self.dof_vel
self.xforms = XFormPrimView(self.xform_paths_expr)
def calculate_grasp_location(self, keyword = "handle_", verticle = True, x_offset = 0.04):
"""
Calculate the grasp location for the handle
"""
bboxes_list = get_mesh_bboxes(keyword)
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = bboxes_list[0][0][0] #
center_list = [(e[1] + e[0]) / 2 for e in bboxes_list] # box center
if verticle:
grasp_list = [[min_x - x_offset, c[1], c[2] - 0.42] for c in center_list]
else:
grasp_list = [[min_x - x_offset, c[1] + 0.42, c[2]] for c in center_list]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [-0.5, 0.5, -0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
# rotation: 0, 0.70711, 0, 0.70711; 0, 90, 0
# rotation:[0.5, 0.5, 0.5, 0.5]
return graps_pos, grasp_rot
def move_to_target(self, goal_pos, goal_rot, finger = "thumb"):
"""
Move hand to target points
"""
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ
# get franka DOF states
dof_pos = self.robots.get_joint_positions()
# compute position and orientation error
pos_err = goal_pos - finger_pos
orn_err = orientation_error(goal_rot, finger_rot)
dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1)
jacobians = self.robots._physics_view.get_jacobians()
# jacobian entries corresponding to correct finger
if finger == "thumb":
finger_index = 14
elif finger == "index":
finger_index = 15
elif finger == "middle":
finger_index = 16
elif finger == "pinky":
finger_index = 17
else: # ring
finger_index = 18
j_eef = jacobians[:, finger_index, :]
# solve damped least squares
j_eef_T = np.transpose(j_eef, (0, 2, 1))
d = 0.05 # damping term
lmbda = np.eye(6) * (d ** 2)
u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(self.num_envs, -1)
# update position targets
pos_targets = dof_pos + u # * 0.3
return pos_targets
##################################################################################################
# -------------------------------------- Control ------------------------------------------------#
##################################################################################################
def move_finger_to_fast(self, target_pos, target_rot, world, finger = "thumb", max_step = 100):
"""
Quickly move the robot hands to the target position and rotation
"""
for i in range(max_step):
world.step(render=True)
# get end effector transforms
finger_pos, finger_rot = self.xforms.get_world_poses()
finger_rot = finger_rot[:,[1,2,3,0]] # WXYZ -> XYZW
print("finger_pos", finger_pos)
orient_error = quat_mul(target_rot[0], quat_conjugate(finger_rot[0]))
# print("orient_error", orient_error)
# if abs(orient_error[3] - 1) < 0.02 and \
# np.sqrt(orient_error[0]**2 + orient_error[1]**2 + orient_error[2]**2) < 0.02 and \
# np.sqrt(np.sum((target_pos[0] - finger_pos[0])**2)) < 0.01:
# print("Done rotation, position", finger_pos, finger_rot)
# return
u = self.move_to_target(target_pos, target_rot)
# u[:,[-2, -1]] = 0.05 if open_gripper else 0
self.robots.set_joint_position_targets(u)
print("Not Done rotation, position", finger_pos, finger_rot)
def calculate_grasp_location_from_pred_box(self, box, verticle = True, x_offset = 0.04):
"""
Calculate the grasp location for the handle
"""
# assert len(bboxes_list) == self.num_envs, "more than one handle!"
# get center and min x axis
min_x = 0.618
handle_y = 0.5 * (box[0] + box[2])
handle_z = 0.5 * (box[1] + box[3])
if verticle:
grasp_list = [[min_x - x_offset, handle_y, handle_z - 0.42]]
else:
grasp_list = [[min_x - x_offset, handle_y + 0.42, handle_z]]
graps_pos = np.array(grasp_list, dtype=np.float32)
base_rotation = [-0.5, 0.5, -0.5, 0.5] if verticle else [0, 0.70711, 0, 0.70711]
grasp_rot = np.array([base_rotation], dtype=np.float32)# XYZW
return graps_pos, grasp_rot | 6,489 | Python | 36.732558 | 122 | 0.542919 |
yizhouzhao/OpenAnyDrawer/open-any-drawer/exts/open.any.drawer/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Simple UI Extension Template"
description="The simplest python extension example. Use it as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "open.any.drawer"
| 798 | TOML | 26.551723 | 105 | 0.740602 |
lydd8888/kit-exts-uv_viewer/README.md | # UV viewer Extension

Demo:https://www.youtube.com/watch?v=deavDepvdlc
# About
This extension can show object's UV in viewport
This Extension is heavily inspried by Camera Reticle Extension
I create this entension mainly to check if my UV is right since Omniverse does not natively support UV check
Since it is fully Python, for complex model, it takes couple seconds to calculate the UV
# Adding Extensions
To add this extension to your Omniverse app:
Clone the extension from: https://github.com/lydd8888/kit-exts-uv_viewer
Go into: Extension Manager -> Gear Icon -> Setting
Add Local Link to Extension Search Path:```../uv_viewer_extension/kit-exts-uv_viewer/exts```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
| 1,416 | Markdown | 26.249999 | 258 | 0.753531 |
lydd8888/kit-exts-uv_viewer/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
lydd8888/kit-exts-uv_viewer/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
lydd8888/kit-exts-uv_viewer/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/constants.py | """Constants used by the CameraReticleExtension"""
DEFAULT_UI_PERCENTAGE = 50
| 79 | Python | 18.999995 | 50 | 0.772152 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/extension.py | import omni.ext
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport_window
from .viewport_scene import ViewportSceneInfo
from .uv_viewer import UvModel
from omni.ui import scene as sc
import carb
class UV_Viewer(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
super().__init__()
self.viewport_scene = None
def on_startup(self, ext_id: str) -> None:
viewport_window = get_active_viewport_window()
if viewport_window is not None:
uv_model = UvModel()
self.viewport_scene = ViewportSceneInfo(uv_model, viewport_window, ext_id)
def on_shutdown(self):
"""Called when the extension is shutting down."""
if self.viewport_scene:
self.viewport_scene.destroy()
self.viewport_scene = None | 982 | Python | 31.766666 | 119 | 0.669043 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/viewport_scene.py | from functools import partial
from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjInfoManipulator
from .uv_viewer import UvModel
import carb
from omni.ui import color as cl
import omni.kit.app
import omni.client
import threading
import os
from . import constants
class ViewportSceneInfo():
"""The scene view overlay
Build the Uv and Uv button on the given viewport window.
"""
def __init__(self, model: UvModel, viewport_window: ui.Window, ext_id: str) -> None:
"""
for check UV map changed
"""
self.filename = "D:/Amazon_Box_Stable_Diffusion/HoudiniUV/UV_Viewer_Extension/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/data/output.png"
self.previous_timestamp = None # Initialize the previous timestamp
"""
Overlay Constructor
Args:
viewport_window (Window): The viewport window to build the overlay on.
ext_id (str): The extension id.
"""
self.model = model
self.scene_view = None
self.viewport_window = viewport_window
self.ext_id = ext_id
self.on_window_changed()
self.previous_resolution = (None, None)
# Rebuild the overlay whenever the model change
self.model.add_model_changed_fn(self.build_uv_overlay)
# Rebuild the overlay whenever the viewport resolution changed
self.check_resolution_periodically()
def check_resolution_periodically(self):
self.check_resolution_change()
# Call this method every 1 second, for example
threading.Timer(0.1, self.check_resolution_periodically).start()
def check_resolution_change(self):
current_resolution = self.viewport_window.viewport_api.get_texture_resolution()
if current_resolution != self.previous_resolution:
self.build_uv_overlay()
self.previous_resolution = current_resolution
def on_window_changed(self, *args):
"""Update aspect ratio and rebuild overlay when viewport window changes."""
if self.viewport_window is None:
return
settings = carb.settings.get_settings()
fill = self.viewport_window.viewport_api.fill_frame
if fill:
width = self.viewport_window.frame.computed_width + 8
height = self.viewport_window.height
else:
width, height = self.viewport_window.viewport_api.resolution
self._aspect_ratio = width / height
self.model = self.get_model()
carb.log_info("build_overlay")
self.build_uv_overlay()
def get_aspect_ratio_flip_threshold(self):
"""Get magic number for aspect ratio policy.
Aspect ratio policy doesn't seem to swap exactly when window_aspect_ratio == window_texture_aspect_ratio.
This is a hack that approximates where the policy changes.
"""
return self.get_aspect_ratio()*0.95
def build_uv_overlay(self, *args):
# Create a unique franme for our SceneView
with self.viewport_window.get_frame(self.ext_id):
with ui.ZStack():
# Create a default SceneView (it has a default camera-model)
self.scene_view = sc.SceneView()
with self.scene_view.scene:
if self.model.uv_enabled.as_bool:
ObjInfoManipulator(viewport_window=self.viewport_window, model=self.get_model())
# Register the SceneView with the Viewport to get projection and view updates
# This is control
# self.viewport_window.viewport_api.add_scene_view(self.scene_view)
# Build UV Menu button
with ui.VStack():
ui.Spacer()
with ui.HStack(height=0):
ui.Spacer()
self.uv_menu = UvMenu(self.model)
def get_aspect_ratio(self):
return self._aspect_ratio
def get_model(self):
return self.model
def __del__(self):
self.destroy()
def destroy(self):
if self.scene_view:
# Empty the SceneView of any elements it may have
self.scene_view.scene.clear()
# un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self.scene_view)
# Remove our references to these objects
self.viewport_window = None
self.scene_view = None
class UvMenu():
"""The popup uv menu"""
def __init__(self, model: UvModel):
self.model = model
self.button = ui.Button("Show Uv", height = 0, width = 0, mouse_pressed_fn=self.show_uv_menu,
style={"margin": 10, "padding": 5, "color": cl.white})
self.uv_menu = None
def on_group_check_changed(self, safe_area_group, model):
"""Enables/disables safe area groups
When a safe area checkbox state changes, all the widgets of the respective
group should be enabled/disabled.
Args:
safe_area_group (HStack): The safe area group to enable/disable
model (SimpleBoolModel): The safe group checkbox model.
"""
safe_area_group.enabled = model.as_bool
def show_uv_menu(self, x, y, button, modifier):
self.uv_menu = ui.Menu("Uv Option", width=200, height=100)
self.uv_menu.clear()
with self.uv_menu:
with ui.Frame(width=0, height=100):
with ui.HStack():
with ui.VStack():
ui.Label("Uv Option", alignment=ui.Alignment.LEFT, height=30)
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.uv_enabled)
# if not action_safe_group, the floatslider will not work
action_safe_group = ui.HStack(enabled=self.model.uv_enabled.as_bool)
callback = partial(self.on_group_check_changed, action_safe_group)
cb.model.add_value_changed_fn(callback)
with action_safe_group:
ui.Spacer(width=10)
ui.Label("uv viewer", alignment=ui.Alignment.TOP)
ui.Spacer(width=14)
with ui.VStack():
ui.FloatSlider(self.model.uv_size, width=100,
format="%.0f%%", min=0, max=100, step=1)
ui.Rectangle(name="ActionSwatch", height=5)
ui.Spacer()
self.uv_menu.show_at(x - self.uv_menu.width, y - self.uv_menu.height)
| 6,987 | Python | 38.480226 | 155 | 0.574782 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/uv_viewer.py | from omni.ui import scene as sc
from omni.ui_scene._scene import AbstractManipulatorItem
import omni.usd
from pxr import Tf
from pxr import Usd
from pxr import UsdGeom
import omni.ui as ui
from . import constants
import carb
class UvModel(sc.AbstractManipulatorModel):
"""
The model that track mesh's uv
"""
# Position needed for when we call item changed
class PositionItem(sc.AbstractManipulatorItem):
def __init__(self) -> None:
super().__init__()
self.value = [0,0,0]
def __init__(self) -> None:
super().__init__()
# Current select prim
self.prim = None
# Set Current path
self.current_path = ""
# update to hold position object created
self.position = UvModel.PositionItem()
# Save the UsdContext name
usd_context = self._get_context()
# Get the Menu item
self.uv_enabled = ui.SimpleBoolModel(True)
self.uv_size = ui.SimpleFloatModel(constants.DEFAULT_UI_PERCENTAGE, min=0, max=100)
# Track selection changes
self.events = usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
self._register_submodel_callbacks()
self._callbacks = []
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context()
def on_stage_event(self, event):
# if statement to only check when selection changed
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths:
# This turns off the manipulator when everything is deselected
self._item_changed(self.position)
self.current_path = ""
return
prim = stage.GetPrimAtPath(prim_paths[0])
self.prim = prim
self.current_path = prim_paths[0]
# Position is changed because new selected object has a different position
self._item_changed(self.position)
def get_item(self, indentifier: str) -> AbstractManipulatorItem:
if indentifier == "name":
return self.current_path
def _register_submodel_callbacks(self):
"""Register to listen to when any submodel values change."""
self.uv_enabled.add_value_changed_fn(self._model_changed)
self.uv_size.add_value_changed_fn(self._model_changed)
def _model_changed(self, model):
for callback in self._callbacks:
callback()
def add_model_changed_fn(self, callback):
self._callbacks.append(callback)
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe() | 3,090 | Python | 33.730337 | 91 | 0.609385 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/object_info_manipulator.py | import omni.kit.pipapi
omni.kit.pipapi.install("matplotlib==3.8.2")
omni.kit.pipapi.install("numpy==1.26.2")
omni.kit.pipapi.install("pycairo==1.25.1")
from PIL import Image, ImageDraw
from pxr import Usd,Gf,UsdGeom,Vt
from omni.ui import scene as sc
import omni.ui as ui
from omni.ui import color as cl
import carb
from omni.ui_scene._scene import Color4
import omni.usd
import matplotlib.pyplot as plt
import numpy as np
import cairo
from .uv_viewer import UvModel
import time
import os
from pathlib import Path
class ObjInfoManipulator(sc.Manipulator):
"""
Manipulator that display the object uv right next to the object
"""
def __init__(self, viewport_window, model,**kwargs) -> None:
super().__init__(**kwargs)
# Build Cache for the UV data
self.cache = {}
self.vp_win = viewport_window
resolution = self.vp_win.viewport_api.get_texture_resolution()
self._aspect_ratio = resolution[0] / resolution[1]
self._width = resolution[0]
self._height = resolution[1]
if model is None:
self.model = UvModel() # Initialize with UvModel() if model is not provided
else:
self.model = model # Use the provided model if it's given
levels_to_go_up = 3
script_directory = Path(__file__).resolve()
script_directory = script_directory.parents[levels_to_go_up]
#script_directory = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
uv_path = "data/output.png"
uv_background = "data/uv_background.jpg"
self.file_path = os.path.join(script_directory,uv_path)
self.uv_background = os.path.join(script_directory,uv_background)
def on_build(self) -> None:
"""Called when the model is changed and rebuilds the whole manipulator"""
initial_mtime = os.path.getmtime(self.file_path)
aspect_ratio = self.get_aspect_ratio()
width = self.get_width()
height = self.get_height()
inverse_ratio = 1 / aspect_ratio
# Get uv_size, by default it is 50 from model file
uv_size = self.model.uv_size.as_float
if not self.model:
return
# If we don't have selection then just return
if self.model.get_item("name") == "":
return
# Track aspect ratio to determine where to place the uv graph
if width>height:
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.01*uv_size,0.01*uv_size,0.6)
transform = move*rotate*scale
with sc.Transform(transform):
with sc.Transform(sc.Matrix44.get_translation_matrix(0.5,0.5,0)):
self._build_safe_rect()
self._build_axis()
self._build_uv()
# Build Uv and save to disk
time.sleep(0.15)
current_mtime = os.path.getmtime(self.file_path)
# Compare current modification time with the initial one
if current_mtime != initial_mtime:
carb.log_warn(current_mtime)
carb.log_warn(initial_mtime)
# File has been updated, call _show_uv()
self._show_uv()
# Update the initial_mtime to the current modification time
initial_mtime = current_mtime
else :
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.5,0.5,0.5)
transform = move*rotate*scale
with sc.Transform(transform):
with sc.Transform(sc.Matrix44.get_translation_matrix(0.5,0.5,0)):
self._build_safe_rect()
self._build_axis()
self._build_uv()
# Build Uv and save to disk
time.sleep(0.15)
current_mtime = os.path.getmtime(self.file_path)
# Compare current modification time with the initial one
if current_mtime != initial_mtime:
carb.log_warn(current_mtime)
carb.log_warn(initial_mtime)
# File has been updated, call _show_uv()
self._show_uv()
# Update the initial_mtime to the current modification time
initial_mtime = current_mtime
# Check if uv png is new
def is_file_updated(file_path, reference_time):
file_stat = os.stat(file_path)
file_modification_time = time.localtime(file_stat.st_mtime)
return file_modification_time > reference_time
def _build_uv(self):
# Get the object's path as a unique key
object_path = self.model.get_item('name')
# Check if object information is already in the cache
if object_path in self.cache:
object_info = self.cache[object_path]
carb.log_warn("uv info in cache")
else:
carb.log_warn("uv info not in cache")
# Gather information
stage = omni.usd.get_context().get_stage()
mesh = stage.GetPrimAtPath(self.model.get_item('name'))
if mesh == None:
return
if mesh.GetTypeName() != "Mesh":
carb.log_error("PLEASE SELECT A MESH")
return
else:
st = mesh.GetAttribute("primvars:st").Get()
st_indices = []
if mesh.GetAttribute("primvars:st:indices"):
st_indices = mesh.GetAttribute("primvars:st:indices").Get()
uv_coordinates = st
vertex_indices = list(range(len(uv_coordinates)))
vertex_counts = mesh.GetAttribute("faceVertexCounts").Get()
# Create a UV Mesh with UV Faces
if st_indices:
uv_mesh = np.array([uv_coordinates[i] for i in st_indices])
else:
uv_mesh = np.array([uv_coordinates[i] for i in vertex_indices])
# Initialize object_info dictionary
object_info = {
"uv_mesh": uv_mesh,
"vertex_counts": vertex_counts,
# Add any other information needed for self._draw_uv_line here
}
# Store the object information in the cache
self.cache[object_path] = object_info
# Retrieve the required information from the cached object_info dictionary
uv_mesh = object_info["uv_mesh"]
vertex_counts = object_info["vertex_counts"]
# initial count, will plus count number in vertex_counts
current_index = 0
# for debug only
loop_counter = 0
width, height = 512, 512
# image = Image.new("RGB", (width, height), (0, 0, 0))
# draw = ImageDraw.Draw(image)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
# Set background color
ctx.set_source_rgba(0, 0, 0, 0.8)
ctx.paint()
# Set the line width for thinner lines (adjust as needed)
line_width = 0.7 # You can change this value to make the lines thinner or thicker
ctx.set_line_width(line_width)
for count in vertex_counts:
uv_face = uv_mesh[current_index:current_index+count]
pixel_uv_face = [(uv[0] * width, uv[1] * height) for uv in uv_face]
ctx.set_source_rgb(0.6, 0.6, 0.6) # Light gray outline
ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD) # Filling rule
ctx.move_to(*pixel_uv_face[0])
for point in pixel_uv_face[1:]:
ctx.line_to(*point)
ctx.close_path()
ctx.stroke()
ctx.set_source_rgb(0.2, 0.2, 0.2) # Dark gray fill
ctx.fill()
current_index += count
# for debug only, test for loop counts
loop_counter += 1
if loop_counter >= 1000000:
break
# image.save("D:/Amazon_Box_Stable_Diffusion/HoudiniUV/UV_Viewer_Extension/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/data/output.png")
surface.write_to_png(self.file_path)
def _show_uv(self):
point_count = 4
# Form the mesh data
alpha = 0.9
points = [[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]]
vertex_indices = [0, 1, 2, 3]
colors = [[1, 1, 1, alpha], [1, 1, 1, alpha], [1, 1, 1, alpha], [1, 1, 1, alpha]]
uvs = [[0, 0], [0, 1], [1, 1], [1, 0]]
# Draw the mesh
uv_background = self.uv_background
filename = self.file_path
sc.TexturedMesh(uv_background, uvs, points, colors, [point_count], vertex_indices)
sc.TexturedMesh(filename, uvs, points, colors, [point_count], vertex_indices)
def display_previous_show_uv(self):
aspect_ratio = self.get_aspect_ratio()
width = self.get_width()
height = self.get_height()
inverse_ratio = 1 / aspect_ratio
if width>height:
move = sc.Matrix44.get_translation_matrix(-0.9,-0.9*inverse_ratio,0)
rotate = sc.Matrix44.get_rotation_matrix(0,0,0)
scale = sc.Matrix44.get_scale_matrix(0.6,0.6,0.6)
transform = move*rotate*scale
with sc.Transform(transform):
self._show_uv()
"""Main Function to draw UV directly in Omniverse"""
"""Depreciate due to performance issue"""
def _draw_uv_line(self, point_count, points):
# point_count = 3
# points = [[0,0,0],[0,0.5,0],[0.5,0.5,0]]
vertex_indices = []
colors = []
for i in range(point_count):
vertex_indices.append(i)
colors.append([0.5, 0.5, 0.5, 1])
# This will create a new list to append the first element to the list and form a closed line
line_points = points + [points[0]]
# Draw UV
sc.PolygonMesh(points, colors, [point_count], vertex_indices)
sc.Curve(
line_points,
thicknesses=[0.2],
colors=[0.0, 0.0, 0.0, 1],
curve_type=sc.Curve.CurveType.LINEAR,
)
# Draw a rect in 1:1 to show the UV block
def _build_safe_rect(self):
"""Build the scene ui graphics for the safe area rectangle
Args:
percentage (float): The 0-1 percentage the render target that the rectangle should fill.
color: The color to draw the rectangle wireframe with.
"""
transparent_black = (0, 0, 0, 0.1)
sc.Rectangle(1, 1, thickness=1, wireframe=False, color=transparent_black)
def _build_axis(self):
# grid represent 0-1
sc.Line([0,0,1], [0, 1, 1], thicknesses=[5.0], color=cl.red)
sc.Line([0,0,1], [1, 0, 1], thicknesses=[5.0], color=cl.red)
def get_aspect_ratio(self):
"""Get the aspect ratio of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._aspect_ratio
def get_width(self):
"""Get the width of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._width
def get_height(self):
"""Get the height of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._height
def on_model_updated(self, item):
# Regenerate the manipulator
self.invalidate()
"""Test Function"""
def __example_draw_shape(self):
point_count = 6
points = [[0,0,0],[0,0.5,0],[0.5,0.5,0],[0.8,0,0],[0.8,0.5,0],[0.8,0.7,0]]
vertex_indices = []
sizes = []
colors = []
for i in range(point_count):
weight = i / point_count
vertex_indices.append(i)
colors.append([weight, 1 - weight, 1, 1])
print(vertex_indices)
sc.PolygonMesh(points, colors, [point_count], vertex_indices)
#pass | 12,430 | Python | 36.784194 | 153 | 0.556637 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/tests/Test.py | import matplotlib.pyplot as plt
import numpy as np
# Your provided data
st_values = [(-6.121573, -0.53302103), (-0.68017477, -0.53302103), (-4.7612233, -0.53302103),
(0.68017477, -0.53302103), (-0.68017477, 0.53302103), (0.68017477, 0.53302103),
(-2.0405242, -0.53302103), (-3.4008737, -0.53302103), (2.0405242, -0.53302103),
(-2.0405242, 0.53302103), (-3.4008737, 0.53302103), (2.0405242, 0.53302103)]
st_indices = [1, 3, 5, 4, 6, 9, 10, 7, 9, 4, 5, 11, 6, 7, 2, 0, 6, 1, 4, 9, 8, 11, 5, 3]
# Reshape the indices into pairs of (u, v) coordinates
uv_indices = np.array(st_indices).reshape(-1, 2)
# Extract coordinates based on indices
mapped_coordinates = [st_values[i] for i in uv_indices.flatten()]
# Convert to NumPy array for easier manipulation
mapped_coordinates = np.array(mapped_coordinates)
# Extract u and v coordinates
u_coords, v_coords = mapped_coordinates[:, 0], mapped_coordinates[:, 1]
# Plot the UV mapping
plt.scatter(u_coords, v_coords, marker='o', label='UV Mapping')
plt.title('UV Mapping')
plt.xlabel('U Coordinate')
plt.ylabel('V Coordinate')
plt.legend()
plt.show()
| 1,142 | Python | 32.617646 | 93 | 0.658494 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import com.soliptionpictures.hunter
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = com.soliptionpictures.hunter.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,692 | Python | 35.021276 | 142 | 0.685579 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/com/soliptionpictures/hunter/tests/Uv_test.py | import matplotlib.pyplot as plt
import numpy as np
# Your provided data
st_values = [(-6.121573, -0.53302103), (-0.68017477, -0.53302103), (-4.7612233, -0.53302103),
(0.68017477, -0.53302103), (-0.68017477, 0.53302103), (0.68017477, 0.53302103),
(-2.0405242, -0.53302103), (-3.4008737, -0.53302103), (2.0405242, -0.53302103),
(-2.0405242, 0.53302103), (-3.4008737, 0.53302103), (2.0405242, 0.53302103)]
st_indices = [1, 3, 5, 4, 6, 9, 10, 7, 9, 4, 5, 11, 6, 7, 2, 0, 6, 1, 4, 9, 8, 11, 5, 3]
# Reshape the indices into pairs of (u, v) coordinates
uv_indices = np.array(st_indices).reshape(-1, 2)
# Extract coordinates based on indices
mapped_coordinates = [st_values[i] for i in uv_indices.flatten()]
# Convert to NumPy array for easier manipulation
mapped_coordinates = np.array(mapped_coordinates)
# Extract u and v coordinates
u_coords, v_coords = mapped_coordinates[:, 0], mapped_coordinates[:, 1]
# Plot the UV mapping
plt.scatter(u_coords, v_coords, marker='o', label='UV Mapping')
plt.title('UV Mapping')
plt.xlabel('U Coordinate')
plt.ylabel('V Coordinate')
plt.legend()
plt.show()
| 1,139 | Python | 35.774192 | 93 | 0.660228 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["HUNTER"]
# The title and description fields are primarily for displaying extension info in UI
title = "UV Viewer"
description="An extension that can view the UV of mesh"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "uv"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.jpg"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import com.soliptionpictures.hunter".
[[python.module]]
name = "com.soliptionpictures.hunter"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,546 | TOML | 31.229166 | 118 | 0.743855 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/docs/README.md | # UV viwer Extension [com.soliptionpictures.hunter]
This is a simple extension to show UV of selected mesh
This Extension is heavily inspried by Camera Reticle Extension
I create this entension mainly to check if my UV is right since Omniverse does not natively support UV check
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go into: Extension Manager -> Gear Icon -> Extension Search Path
2. Add Link to the path: ../uv_viewer_extension/kit-exts-uv_viewer/exts
| 496 | Markdown | 40.416663 | 108 | 0.782258 |
lydd8888/kit-exts-uv_viewer/exts/com.soliptionpictures.hunter/docs/index.rst | com.soliptionpictures.hunter
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"com.soliptionpictures.hunter"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 357 | reStructuredText | 16.047618 | 45 | 0.638655 |
barakooda/Nvidia-Test-Barak-Koren/README.md | Nvidia test for Senior Solution Engineer.
Omniverse Test
This test is split into 3 subtasks:
1. Rotating cube with texture:
Create an extension in omniverse that opens a window loads a texture and displays it on a 3d rotating cube.
Useful links:
a. Quick example on how to create an extension: Build an Omniverse Extension in less than 10 Minutes | NVIDIA On-Demand
b. Scene and UI Manipulator Extensions - Samples & Tutorials on Github
c. Extensions: Extensions documentation -NVIDIA-
2. Inference Model
Add the MNIST model to your extension now you will load the texture (a handwritten letter/character) and you will display the character on the cube
Useful links:
a. MNIST https://github.com/pytorch/examples/tree/main/mnist
b. Inference https://docs.omniverse.nvidia.com/extensions/latest/ext_inference.html
3. Draw with mouse:
Add to the extension window a widget that can draw with the mouse a letter/character and display it on the cube.
| 950 | Markdown | 46.549998 | 147 | 0.795789 |
barakooda/Nvidia-Test-Barak-Koren/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
barakooda/Nvidia-Test-Barak-Koren/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
barakooda/Nvidia-Test-Barak-Koren/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/common.py |
import os
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODEL_PATH = os.path.join(root_path, "data", "mnist_cnn.pt")
OUTPUT_PATH = os.path.join(root_path, "data", "predicted_label_image.png")
TEXTURE_SIZE = 256
TXT_SIZE = 180
BLACK_COLOR = [0, 0, 0, 255] | 284 | Python | 24.909089 | 74 | 0.683099 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/extension.py | import os
import omni.ext
import omni.ui as ui
import omni.kit.commands
from pxr import Sdf,UsdShade,UsdGeom
from .img2txt2img import img2txt2img
from .circle import draw_circle,draw_circle_optimized
from PIL import Image
import numpy as np
from .bug_fixes import fix_cube_uv
from .utils import wait_for_stage
from .common import OUTPUT_PATH,MODEL_PATH,TEXTURE_SIZE,root_path
import omni.timeline as timeline
#na_vi_da_test
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Na_vi_da_testExtension(omni.ext.IExt):
def set_rotation_for_cube(self)->None:
cube = self.stage.GetPrimAtPath(self.cube_path)
# Obtain xformable interface
xformable = UsdGeom.Xformable(cube)
# Create rotation attribute for keyframing
rotation_attr = xformable.AddRotateYOp().GetAttr()
# Set keyframes
self.stage.SetStartTimeCode(1)
time_start = self.stage.GetStartTimeCode()
time_end = self.stage.GetEndTimeCode()
num_frames = time_end - time_start
rotation_attr.Set(0, time_start)
rotation_attr.Set(360 - (num_frames / 360 ), 100)
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def click_spwan_cube(self):
self.spwan_cube()
self.set_rotation_for_cube()
timeline.get_timeline_interface().play()
def click_load_image(self):
image_path = self.image_path_string_field_model.get_value_as_string()
img2txt2img(MODEL_PATH, image_path, OUTPUT_PATH)
shader = UsdShade.Shader.Define(self.stage, f'/World/Looks/OmniPBR/Shader')
shader.GetInput('diffuse_texture').Set(OUTPUT_PATH)
def click_reset(self):
self.clear_all()
def _on_mouse_pressed(self, x, y, key):
image_pos_x=self._image.screen_position_x
image_pos_y=self._image.screen_position_y
x = int(x - image_pos_x)
y = int(y - image_pos_y)
self.image_data = draw_circle_optimized(self.image_data, x, y, 8)
self.image_data_np = self.image_data.data
self.provider.set_data_array(self.image_data_np, self.image_data_size)
def clear_image(self):
self.image_data.fill(255)
self.image_data_size = self.image_data.shape[:2]
self.image_data_np = self.image_data.data
self.provider.set_data_array(self.image_data_np, self.image_data_size)
print("clear")
def spwan_cube(self):
omni.kit.commands.execute('cl')
self.cube_path = omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',prim_type='Cube')[1]
print("##########",self.cube_path)
self.stage = wait_for_stage()
cube = self.stage.GetPrimAtPath(self.cube_path)
fix_cube_uv(cube)
self.mat = omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary',
mdl_name='OmniPBR.mdl',
mtl_name='OmniPBR',
mtl_created_list=['/World/Looks/OmniPBR'],
bind_selected_prims=[])
print("##########",self.mat)
omni.kit.commands.execute('BindMaterial',
material_path='/World/Looks/OmniPBR',
prim_path=['/World/Cube'],
strength=['weakerThanDescendants'])
self.material = UsdShade.Material.Get(self.stage, '/World/Looks/OmniPBR')
print("Cube Spwaned")
def clear_all(self)->None:
omni.kit.commands.execute('DeletePrims',
paths=[Sdf.Path('/World/Cube')],
destructive=False)
omni.kit.commands.execute('DeletePrims',
paths=[Sdf.Path('/World/Looks')],
destructive=False)
def on_shutdown(self):
print("[na_vi_da_test] na_vi_da_test shutdown")
def submit_drwaing(self):
image_path = OUTPUT_PATH
img = Image.fromarray(self.image_data,mode="RGBA")
img.save(image_path, "PNG")
img2txt2img(MODEL_PATH, image_path, OUTPUT_PATH,invert_image=True)
shader = UsdShade.Shader.Define(self.stage, f'/World/Looks/OmniPBR/Shader')
shader.GetInput('diffuse_texture').Set(OUTPUT_PATH)
def build_window(self):
with self._window.frame:
with ui.VStack():
with ui.HStack(height=ui.Percent(5)):
ui.Button("Spwan Cube", clicked_fn=self.click_spwan_cube,width=64,height=64)
ui.Button("Reset", clicked_fn=self.click_reset,width=64,height=64)
with ui.VStack(height=ui.Percent(20)):
with ui.CollapsableFrame("By Image Path"):
with ui.VStack():
ui.Button("Load Image From Path", clicked_fn=self.click_load_image)
ui.Label("Image Path:")
self.image_path_string_field_model = ui.StringField().model
with ui.VStack(height=ui.Percent(75)):
with ui.CollapsableFrame("By Drawing"):
with ui.VStack():
with ui.HStack(height=ui.Percent(5)):
ui.Button("Submit",width=32,height=16, clicked_fn=self.submit_drwaing)
ui.Button("Clear",width=32,height=16, clicked_fn=self.clear_image)
self._image = ui.ImageWithProvider(
self.provider,
width=TEXTURE_SIZE,
height=TEXTURE_SIZE,
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT)
self._image.set_mouse_moved_fn(lambda x, y, b, m: self._on_mouse_pressed(x,y,b))
self._image.set_mouse_pressed_fn(lambda x, y, b, m: self._on_mouse_pressed(x,y,b))
def on_startup(self, ext_id):
self.image_path_string_field_model = None
self.image_data = np.ones((TEXTURE_SIZE, TEXTURE_SIZE, 4), dtype=np.uint8) * 255
self.image_data_size = self.image_data.shape[:2]
self.image_data_np = self.image_data.data
self.provider = ui.ByteImageProvider()
self.provider.set_data_array(self.image_data_np, self.image_data_size)
self._window = ui.Window("Textured Cube", width=512, height=512)
self.build_window() | 6,971 | Python | 39.77193 | 119 | 0.585999 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/bug_fixes.py |
from pxr import UsdGeom, Vt
NEW_UVS = Vt.Vec2fArray([
(0, 0), (1, 0), (1, 1), (0, 1),
(1, 0), (1, 1), (0, 1), (0, 0),
(1, 1), (0, 1), (0, 0), (1, 0),
(0, 1), (0, 0), (1, 0), (1, 1),
(0, 0), (1, 0), (1, 1), (0, 1),
(1, 0), (1, 1), (0, 1), (0, 0)])
def fix_cube_uv(cube):
mesh = UsdGeom.Mesh(cube)
uv_attr = mesh.GetPrimvar('st')
uv_primvar = UsdGeom.Primvar(uv_attr)
uv_primvar.Set(NEW_UVS)
| 501 | Python | 22.904761 | 44 | 0.383234 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/img2txt2img.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import TEXTURE_SIZE,TXT_SIZE
from PIL import Image, ImageDraw, ImageFont, ImageOps
import torchvision.transforms as transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def img2txt2img(model_path:str,image_path:str,output_path:str,invert_image:bool=False):
# Initialize the model
model = Net()
# Load the model
model.load_state_dict(torch.load(model_path))
# Set the model to evaluation mode
model.eval()
#input image
input_image = Image.open(image_path)
input_image = input_image.resize((28, 28))
input_image = input_image.split()[0]
if invert_image:
input_image = ImageOps.invert(input_image)
# Define the transformation
transform = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # Normalization values used during training
])
# Apply the transformations to the image
input_image = transform(input_image)
# Add a batch dimension
input_image = input_image.unsqueeze(0) # shape becomes [1, 1, 28, 28]
# Perform inference
with torch.no_grad(): # Deactivate gradients for the following block
output = model(input_image)
# Get the predicted label
_, predicted_label = torch.max(output, 1)
predicted_txt = predicted_label.item()
#print("Predicted label:", predicted_txt)
# Create an image with white background
width, height = TEXTURE_SIZE, TEXTURE_SIZE
image = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(image)
# Load a font
# You might have to download a specific font or use one that's available on your system
try:
font = ImageFont.truetype("arial.ttf", TXT_SIZE)
except IOError:
font = ImageFont.load_default()
# Calculate text size to center it
text = str(predicted_txt) # Assuming `predicted_label` is a PyTorch tensor containing the label
text_width, text_height = draw.textsize(text, font=font)
text_x = width / 2 - text_width / 2
text_y = height / 2 - text_height / 2
# Add text to image
draw.text((text_x, text_y), text, font=font, fill="black")
# Save or show image
image.save(output_path) | 2,991 | Python | 28.92 | 100 | 0.626881 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/utils.py | import time
from pxr import Sdf,Usd,UsdUtils
def wait_for_stage(timeout=10) -> Usd.Stage:
end_time = time.time() + timeout
while time.time() < end_time:
stages = UsdUtils.StageCache.Get().GetAllStages()
if stages:
return stages[0]
time.sleep(0.1) # Sleep for 100 milliseconds before checking again
return None | 361 | Python | 26.846152 | 75 | 0.648199 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/circle.py | import numpy as np
from .common import TEXTURE_SIZE,BLACK_COLOR
def draw_circle(image_data, coord_x, coord_y, radius)->np.ndarray:
# Create grids for x and y coordinates
y, x = np.ogrid[0:TEXTURE_SIZE, 0:TEXTURE_SIZE]
# Calculate the distance to the center for each point
distance_to_center = (x - coord_x)**2 + (y - coord_y)**2
# Identify the points within the circle
circle_points = distance_to_center <= radius**2
# Create the circle image with the same initial data as image_data
circle_image = np.copy(image_data)
# Draw the circle in black (setting it to [0, 0, 0, 255])
circle_image[circle_points] = [0, 0, 0, 255]
# Superimpose the circle onto the existing image data
image_data = np.where(circle_image == [0, 0, 0, 255], circle_image, image_data)
return image_data
def draw_circle_optimized(image_data, coord_x, coord_y, radius)->np.ndarray:
# Determine the bounding box of the circle.
box_left = max(0, coord_x - radius)
box_right = min(TEXTURE_SIZE, coord_x + radius)
box_top = max(0, coord_y - radius)
box_bottom = min(TEXTURE_SIZE, coord_y + radius)
# Create a coordinate grid for the bounding box.
y, x = np.ogrid[box_top:box_bottom, box_left:box_right]
# Use the circle equation to create a mask for that region.
circle_mask = (x - coord_x)**2 + (y - coord_y)**2 <= radius**2
# Update only the pixels in that region where the mask is True.
image_data[box_top:box_bottom, box_left:box_right][circle_mask] = BLACK_COLOR
return image_data | 1,578 | Python | 34.088888 | 83 | 0.664132 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/nvidia_test/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import nvidia_test
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = nvidia_test.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,658 | Python | 34.297872 | 142 | 0.680338 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "Nvidia test for solution engineer position"
description=""
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/barakooda/Nvidia-Test-Barak-Koren"
# One of categories for UI.
category = "Test"
# Keywords for the extension
keywords = ["kit", "example","Test"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
ai_model = "data/mnist_cnn.pt"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import na_vi_da_test".
[[python.module]]
name = "nvidia_test"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,600 | TOML | 31.019999 | 118 | 0.74 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/docs/README.md | # Python Extension Example [na_vi_da_test]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 172 | Markdown | 33.599993 | 126 | 0.773256 |
barakooda/Nvidia-Test-Barak-Koren/exts/nvidia_test/docs/index.rst | na_vi_da_test
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"na_vi_da_test"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 327 | reStructuredText | 14.619047 | 43 | 0.599388 |
RPL-CS-UCL/IsaacSyntheticPerception/README.md |
# Isaac Synthetic Perception Data Generator
<img src="https://github.com/RPL-CS-UCL/IsaacSyntheticPerception/blob/main/docs/img/rpl_logo.png" width="250">
# Introduction
This data generator uses the photo-realistic NVIDIA Isaac Simulator to gather sensor data in procedurally created environments.
The system is built to use a SensorRig (that can be extended) that holds different types of sensors (RGB, RGB-D, LiDAR, IMU, US, and contact sensors). The Rig also captures semantic, object detection and semantic instances as ground truths with the data. This SensorRig can move through the environments through multiple methods (waypoints, velocity API). The Rig captures data based on the user's parameters. This allows the user to manually snapshot data, snapshot at points of interest, or at a set sample frequency.
The procedural generations use different layers of noise to create heightmaps, biomes, and specific object placement. These parameters can be customised to produce different and repeatable environments.
The major advantage of this system is that data can be generated for specific use cases saving space and ignoring obsolete data. Large datasets do not need to be downloaded from slow repositories. Data can be generated to have ideal class balances and can be optimised in a pipeline to generate data on the fly for poor-performing classes in ML models.
# Getting Started
## Installations
```
./python.sh -m pip install faiss-gpu, opencv-python, scikit-image, timm, fast_pytorch_keymeans, pytorch_metric_learning, kornia
```
### ToDO:
Figure out how to install `pydensecrf` in Isaac's python
## Isaac Extension
Open IsaacSim, and enable the FrankaCopycat extension.
# Using the Sensor Rig
## What is it
The SensorRig is a custom class that creates, manages, moves, and samples an array of sensors automatically and based on user parameters. For example; you can place cameras at certain positions on the sensor rig. You can then have it then follow user defined waypoints and sample at certain intervals (all within the UI).
## How to use it
There are a few key things to note...
## How to expand it.
...
## SensorRig base options
### Sensors
Custom array of sensors, their positions and orientations
### Movement system
Movement type (velocity, waypoint, human control)
Move speed
#### waypoint
load from file
initialise from scene (build when in the scene)
save waypoints
| 2,415 | Markdown | 43.74074 | 519 | 0.785921 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
| 178 | Python | 16.899998 | 36 | 0.657303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/standalone.py | import sys
import os
# sys.argv.insert(1, f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
#
# # Add paths to extensions
# sys.argv.append(f'--ext-folder')
# sys.argv.append(f'{os.path.abspath(os.environ["ISAAC_PATH"])}/exts')
# # Run headless
# sys.argv.append('--no-window')
#
# # Set some settings
# sys.argv.append('--/app/asyncRendering=False')
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({'headless': False})
from omni.isaac.core.utils.extensions import enable_extension
enable_extension('omni.kit.asset_converter')
simulation_app.update()
from omni.isaac.core import World
from omni.isaac.quadruped.robots import Anymal
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from pxr import Gf, UsdGeom
import omni.appwindow # Contains handle to keyboard
import numpy as np
import carb
from sensors import SensorRig
import PCG.WorldGenerator as WG
class Anymal_runner(object):
def __init__(self, physics_dt, render_dt) -> None:
"""
Summary
creates the simulation world with preset physics_dt and render_dt and creates an anymal robot inside the warehouse
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
"""
self._world = World(
stage_units_in_meters=1.0,
physics_dt=physics_dt,
rendering_dt=render_dt,
)
self.step_total = 0
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error('Could not find Isaac Sim assets folder')
# spawn warehouse scene
prim = get_prim_at_path('/World/GroundPlane')
if not prim.IsValid():
prim = define_prim('/World/GroundPlane', 'Xform')
asset_path = (
assets_root_path
+ '/Isaac/Environments/Simple_Warehouse/warehouse.usd'
)
prim.GetReferences().AddReference(asset_path)
self._anymal = self._world.scene.add(
Anymal(
prim_path='/World/Anymal',
name='Anymal',
usd_path=assets_root_path
+ '/Isaac/Robots/ANYbotics/anymal_c.usd',
position=np.array([0, 0, 0.70]),
)
)
self.sensor_rig = SensorRig('SensorRig', '/World/Anymal')
self.path = '/home/jon/Downloads/sensors.json'
self.out_path = '/home/jon/Downloads/out/'
self._world.reset()
self._enter_toggled = 0
self._base_command = np.zeros(3)
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
'NUMPAD_8': [1.0, 0.0, 0.0],
'UP': [1.0, 0.0, 0.0],
# back command
'NUMPAD_2': [-1.0, 0.0, 0.0],
'DOWN': [-1.0, 0.0, 0.0],
# left command
'NUMPAD_6': [0.0, -1.0, 0.0],
'RIGHT': [0.0, -1.0, 0.0],
# right command
'NUMPAD_4': [0.0, 1.0, 0.0],
'LEFT': [0.0, 1.0, 0.0],
# yaw command (positive)
'NUMPAD_7': [0.0, 0.0, 1.0],
'N': [0.0, 0.0, 1.0],
# yaw command (negative)
'NUMPAD_9': [0.0, 0.0, -1.0],
'M': [0.0, 0.0, -1.0],
}
self.needs_reset = False
def setup(self) -> None:
"""
[Summary]
Set up keyboard listener and add physics callback
"""
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(
self._keyboard, self._sub_keyboard_event
)
self._world.add_physics_callback(
'anymal_advance', callback_fn=self.on_physics_step
)
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.sensor_rig.create_rig_from_file(self.path, self.stage)
self.sensor_rig.setup_sensor_output_path(self.out_path)
def on_physics_step(self, step_size) -> None:
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
self.step_total += step_size
if self.needs_reset:
self._world.reset(True)
self.needs_reset = False
self._anymal.advance(step_size, self._base_command)
self.sensor_rig.sample_sensors(step_size)
def run(self) -> None:
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
render = False
if self.step_total > 10:
self.step_total = 0
render = True
self._world.step(render=render)
if not self._world.is_simulating():
self.needs_reset = True
return
def _sub_keyboard_event(self, event, *args, **kwargs) -> bool:
"""
[Summary]
Keyboard subscriber callback to when kit is updated.
"""
# reset event
self._event_flag = False
self._base_command[0:3] += np.array(self._input_keyboard_mapping['UP'])
# when a key is pressed for released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(
self._input_keyboard_mapping[event.input.name]
)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(
self._input_keyboard_mapping[event.input.name]
)
return True
def main():
"""
[Summary]
Parse arguments and instantiate the ANYmal runner
"""
physics_dt = 1 / 200.0
render_dt = 1 / 60.0
runner = Anymal_runner(physics_dt=physics_dt, render_dt=render_dt)
simulation_app.update()
runner.setup()
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
# simulation_app.pause()
objpath = "/home/jon/Downloads/new_objects_save.json"
wrldpath = "/home/jon/Downloads/worlddata3.json"
WG.create_world(objpath,wrldpath)
# simulation_app.pause()
runner.run()
simulation_app.close()
if __name__ == '__main__':
main()
| 6,907 | Python | 30.257918 | 122 | 0.573476 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/synthetic_perception.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
try:
import omni.isaac.version as v
VERSION = v.get_version()[0]
except:
VERSION = '2021'
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from pxr import Usd, Gf
from .PCG import AreaMaskGenerator
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core.utils.semantics import get_semantics
import omni
import omni.kit.commands
import omni.timeline
from omni.isaac.core.utils.prims import get_prim_at_path # , get_prim_property
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import numpy as np
from .sensors import Lidar, DepthCamera, SensorRig
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.core import World
import omni.appwindow # Contains handle to keyboard
import carb
from omni.isaac.core.utils.stage import (
add_reference_to_stage,
is_stage_loading,
update_stage_async,
update_stage,
)
from pxr import UsdShade, Sdf
import omni.kit.commands
# from omni import usd._usd
from pxr import Sdf
import omni.physx
from omni.physx import get_physx_scene_query_interface
class SyntheticPerception(BaseSample):
"""
Main class
"""
# pylint: disable=too-many-instance-attributes
# Big class requires lots of attrs.
def __init__(self) -> None:
super().__init__()
self.__created_objs = []
self.save_count = 0
self.obstacles = []
self.__undefined_class_string = 'undef'
self.sr = SensorRig('SensorRig', '/World')
self._event_flag = False
self._o = "[SyntheticPerception] "
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
'NUMPAD_8': [1.5, 0.0, 0.0],
'UP': [1.5, 0.0, 0.0],
# back command
'NUMPAD_2': [-1.5, 0.0, 0.0],
'DOWN': [-1.5, 0.0, 0.0],
# left command
'NUMPAD_6': [0.0, -1.0, 0.0],
'RIGHT': [0.0, -1.0, 0.0],
# right command
'NUMPAD_4': [0.0, 1.0, 0.0],
'LEFT': [0.0, 1.0, 0.0],
# yaw command (positive)
'NUMPAD_7': [0.0, 0.0, 1.0],
'N': [0.0, 0.0, 1.0],
# yaw command (negative)
'NUMPAD_9': [0.0, 0.0, -1.0],
'M': [0.0, 0.0, -1.0],
}
def _sub_keyboard_event(self, event, *args, **kwargs):
self._event_flag = False
# when a key is pressedor released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._input_keyboard_mapping:
self.sr.apply_veloc(
self._input_keyboard_mapping[event.input.name]
)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self.sr.apply_veloc([0, 0, 0])
# print(self._input_keyboard_mapping[event.input.name])
return True
def force_reload(self):
self._world.initialize_physics()
self.setup_scene()
async def _on_load_world_async(self):
await omni.kit.app.get_app().next_update_async()
# print('[company.hello.world] company hello world startup')
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
# await self._world.reset_async()
async def load_sample(self) -> None:
"""Function called when clicking load buttton"""
if World.instance() is None:
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = World.instance()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load()
def setup_scene(self):
self.world = self.get_world()
async def init_world(self) -> None:
if World.instance() is None:
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = World.instance()
await self._world.reset_async()
await self._world.pause_async()
self.world_cleanup()
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.timeline = omni.timeline.get_timeline_interface()
self._world_settings = {
'physics_dt': 1.0 / 60.0,
'stage_units_in_meters': 1.0,
'rendering_dt': 1.0 / 60.0,
}
self._appwindow = omni.appwindow.get_default_app_window()
print('The world is initialized.')
async def setup_post_load(self):
self._world_settings = {
'physics_dt': 1.0 / 60.0,
'stage_units_in_meters': 1.0,
'rendering_dt': 1.0 / 60.0,
}
# self.init_sensor_and_semantics()
# self.init_sensor_rig()
# print('Aquiring keyboard interface')
# self._appwindow = omni.appwindow.get_default_app_window()
# self._input = carb.input.acquire_input_interface()
# self._keyboard = self._appwindow.get_keyboard()
# self._sub_keyboard = self._input.subscribe_to_keyboard_events(
# self._keyboard, self._sub_keyboard_event
# )
def remove_all_objects(self):
for i in reversed(range(len(self.__created_objs))):
try:
self._world.scene.remove_object(self.__created_objs[i])
except:
pass # already deleted from world
del self.__created_objs[i]
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists('sim_step'):
world.remove_physics_callback('sim_step')
if world.physics_callback_exists('sim_timestep'):
world.remove_physics_callback('sim_timestep')
stage = omni.usd.get_context().get_stage()
print('Pre rest setup over')
# self.sr.initialize_waypoints('', stage)
def world_cleanup(self):
self.remove_all_objects()
def init_semantics_in_scene(self):
self.stage = omni.usd.get_context().get_stage()
print(f"{self._o} Adding semantics to scene. Please wait until complete... ... ... ")
self.__add_semantics_to_all2(self.stage)
print(f"{self._o} All semantics added to scene. Complete.")
def init_sensor_and_semantics(self):
"""Initializes sensors and the replicator package"""
self.world_cleanup()
stage = omni.usd.get_context().get_stage()
# self.__sensor = Lidar()
self.__add_semantics_to_all(stage)
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.timeline = omni.timeline.get_timeline_interface()
def add_semantic(self, p, prim_class):
"""Adds semantic to prim"""
sem_dict = get_semantics(p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(p)
if 'Semantics' not in sem_dict:
# print(
# 'adding semantics and collider to ',
# p.GetPrimPath(),
# ' of class ',
# prim_class,
# )
sem = Semantics.SemanticsAPI.Apply(p, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(prim_class)
def __add_semantics_to_all2(self, stage):
"""Add semantic information to all prims on stage based on parent xform"""
prim_class = self.__undefined_class_string
completed_classes = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
len_of_prim = len(prim_ref_name.split('/'))
for word in prim_ref_name.split('/'):
if 'class' in word and word not in completed_classes:
prim_class = word
# self.add_semantic(prim_ref, prim_class)
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
len_of_child = len(
str(prim_child.GetPrimPath()).split('/')
)
# print(len_of_prim, ' : ', len_of_child)
if abs(len_of_prim - len_of_child) == 1:
# print(prim_child)
self.add_semantic(prim_child, prim_class)
completed_classes.append(prim_class)
def init_sensor_rig(self):
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
"""Initializes the sensor rig and adds individual sensors"""
self.sr.create_rig(
np.array([0, 5, 0]), np.asarray([1, 1, 1, 1]), self.stage
)
# self.sr.add_depth_camera_to_rig( (0, 0, 0), (0, 0, 0), (512, 512), True,"DepthCamera")
self.sr.add_sensor_to_rig(DepthCamera(name='depthcam2'))
self.sr.add_sensor_to_rig(Lidar(path='coolLidar'))
def init_sensor_rig_from_file(self, path,out_path):
self.stage = (
omni.usd.get_context().get_stage()
) # Used to access Geometry
self.sr.create_rig_from_file(path, self.stage, self._world)
self.sr.setup_sensor_output_path(out_path)
def sample_sensors(self):
self.sr.sample_sensors()
def attach_sensor_waypoint_callback(self, srx):
# print(self.get_world())
# print(self._world)
#
# self._world = World.instance()
# self.get_world().initialize()
# un comment to enalbe wAYPOINT
# self.get_world().add_physics_callback('sim_step', callback_fn=srx.move)
self._world.add_physics_callback('sim_step', callback_fn=srx.move)
def attach_sensor_sample_callback(self):
# un comment to enalbe wAYPOINT
self.get_world().add_physics_callback('sim_sample_step', callback_fn=self.sr.sample_sensors)
# def spawn_asset(
# self,
# asset_path,
# class_name,
# prim_name,
# x,
# y,
# z,
# scale,
# object_scale_delta,
# allow_rot,
# orign_p_name = "",
# override=False,
# rot = (0,0,0),
# ):
#
# prim_path = '/World/' + 'class_' + class_name + '/' + prim_name
#
# # if not override:
# add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
#
# stage = omni.usd.get_context().get_stage()
# prim = stage.GetPrimAtPath(prim_path)
# # prim.GetReferences().AddReference(assetPath=asset_path, primPath=prim_path)
# prim.SetInstanceable(True)
#
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
# sem.CreateSemanticTypeAttr()
# sem.CreateSemanticDataAttr()
# sem.GetSemanticTypeAttr().Set('class')
# sem.GetSemanticDataAttr().Set(class_name)
#
#
# # omni.kit.commands.execute('CopyPrim',
# # path_from=orign_p_name,
# # path_to=prim_path,
# # duplicate_layers=False,
# # combine_layers=False,
# # exclusive_select=False,
# # flatten_references=False,
# # copy_to_introducing_layer=False)
# # here we want to modify the scale
# low_lim = scale - object_scale_delta
# high_lim = scale + object_scale_delta
# scale = random.uniform(low_lim, high_lim) #/ 100
#
# random_rotation = 0
# if allow_rot:
# random_rotation = random.uniform(0, 360)
#
#
# # omni.kit.commands.execute('CreatePayloadCommand',
# # usd_context=omni.usd.get_context(),
# # path_to=Sdf.Path(prim_path),
# # asset_path=asset_path,
# # instanceable=True)
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=prim_path, # f"/World/{p_name}",
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(0, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=prim_path, # f"/World/{p_name}",
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(0, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
#
# def spawn_loop(
# self,
# path,
# class_name,
# p_name,
# coll,
# height_map,
# scale=1,
# object_scale_delta=0,
# allow_rot=True,
# ):
#
# for i, n in enumerate(coll):
# override=False
# # if i == 1:
# #
# # prim_path = '/World/' + 'class_' + class_name + '/' + p_name
# #
# # add_reference_to_stage(usd_path=path, prim_path=prim_path)
# #
# # override=True
#
# x, y = n
# x = float(x)
# y = float(y)
# mesh_scale = 10
# x_ind = x * mesh_scale
# y_ind = y * mesh_scale
# mesh_height_modifier = 10
# # if x_ind >= 2560:
# # print('x, overfilled', x_ind)
# # x_ind = 2559
# # if y_ind >= 2560:
# #
# # print('y, overfilled', y_ind)
# # y_ind = 2559
# z = float(height_map[int(y_ind/10)][int(x_ind/10)])# / mesh_height_modifier # was abs
#
# cc =(int(y_ind/10),int(x_ind/10) )
# ind = np.ravel_multi_index(cc, (len(height_map), len(height_map)))
# # print(np.asarray(self.t_normals))
# poss_rot = np.asarray(self.t_normals)[ind]
# # print("triangle normals")
# # print(poss_rot)
# # second one is iterated fasted
# if self.occupancy[int(y_ind/10)][int(x_ind/10)] != 0:
# # print("skipping oj spawn")
# continue
#
# self.occupancy[int(y_ind/10)][int(x_ind/10)]= 1
# _p_name = f'{p_name}_{i}'
# self.spawn_asset(
# path,
# class_name,
# _p_name,
# x,
# y,
# z,
# scale,
# object_scale_delta,
# allow_rot,
# override = override,
# orign_p_name = p_name,
# rot = poss_rot
# )
#
# def create_terrains(self, terrain_info):
#
# # create the parent
#
# omni.kit.commands.execute(
# 'CreatePrimWithDefaultXform',
# prim_type='Xform',
# prim_path='/World/t',
# attributes={},
# select_new_prim=True,
# )
#
# for key in terrain_info:
# mesh_path = terrain_info[key].mesh_path
# scale = terrain_info[key].scale
# mat_path = terrain_info[key].material_path
# mat_name = mat_path.split('/')[-1]
# mat_name = mat_name.replace('.mdl', '')
# mesh_path = mesh_path.replace('.obj', '.usd')
# # spawn prim
#
# prim_p = f'/World/t/class_{mat_name}'
# # prim_p = f'/World/t/terrain{key}'
#
# stage = omni.usd.get_context().get_stage()
# scale = 1#0.01
# # X SCALE SHOULD BE NEGATIVE TO FLIP IT CORRECTLY
# random_rotation = 0.0
# x, y, z = 0, 0, 0
# add_reference_to_stage(usd_path=mesh_path, prim_path=prim_p)
# self.create_material_and_bind(
# mat_name, mat_path, prim_p, scale, stage
# )
# prim=stage.GetPrimAtPath(prim_p)
# collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
# sem.CreateSemanticTypeAttr()
# sem.CreateSemanticDataAttr()
# sem.GetSemanticTypeAttr().Set('class')
# sem.GetSemanticDataAttr().Set(mat_name)
#
# scale = 1#0.1
# random_rotation = 0.0
# x, y, z = 0, 0, 0
# # stage = self.usd_context.get_stage()
#
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=f'/World/t',
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# # old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# # old_rotation_order=Gf.Vec3i(0, 1, 2),
# # new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# # new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
# omni.kit.commands.execute(
# 'TransformPrimSRTCommand',
# path=f'/World/t',
# old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
# new_scale=Gf.Vec3f(scale, scale, scale),
# old_translation=Gf.Vec3f(x, y, z),
# new_translation=Gf.Vec3f(x, y, z),
# # old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# # old_rotation_order=Gf.Vec3i(0, 1, 2),
# # new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# # new_rotation_order=Gf.Vec3i(0, 1, 2),
# time_code=Usd.TimeCode(),
# had_transform_at_key=False,
# )
#
# async def spawn_all(self, obs_to_spawn, object_dict, height_map, normals):
# self.t_normals = normals
# length = len(obs_to_spawn)
# counter = 1
# for key in obs_to_spawn:
#
# obj = object_dict[key]
# path = object_dict[key].usd_path
# print(f"{self._o} Spawning {len(obs_to_spawn[key])} of {path}. {counter} / {length}")
# class_name = obj.class_name
# if class_name == '':
# class_name = obj.unique_id
# self.spawn_loop(
# path,
# class_name,
# f'{obj.unique_id}_',
# obs_to_spawn[key],
# height_map,
# scale=obj.object_scale,
# object_scale_delta=obj.object_scale_delta,
# allow_rot=obj.allow_y_rot,
# )
# print("spawned, now we wait till stage loads")
# await update_stage_async()
# # print("some time should have passed")
# # return
# counter += 1
#
# def generate_world_generator(self, obj_path, world_path):
#
#
# print("Tring to generator worldf rom file")
# (
# obs_to_spawn,
# object_dict,
# terrain_info,
# meshGen,
# ) = AreaMaskGenerator.generate_world_from_file(obj_path, world_path)
# height_map = meshGen._points2
# self.occupancy = np.zeros((len(height_map),len(height_map)))
# self.create_terrains(terrain_info)
# meshGen.clean_up_files()
#
# return obs_to_spawn, object_dict, height_map, meshGen.normals
#
#
# def create_material_and_bind(
# self, mat_name, mat_path, prim_path, scale, stage
# ):
#
# obj_prim = stage.GetPrimAtPath(prim_path)
# mtl_created_list = []
#
# omni.kit.commands.execute(
# 'CreateAndBindMdlMaterialFromLibrary',
# mdl_name=mat_path,
# mtl_name=mat_name,
# mtl_created_list=mtl_created_list,
# )
# mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
# omni.usd.create_material_input(
# mtl_prim,
# 'project_uvw',
# True,
# Sdf.ValueTypeNames.Bool,
# )
#
# omni.usd.create_material_input(
# mtl_prim,
# 'texture_scale',
# Gf.Vec2f(scale, scale),
# Sdf.ValueTypeNames.Float2,
# )
# cube_mat_shade = UsdShade.Material(mtl_prim)
#
# UsdShade.MaterialBindingAPI(obj_prim).Bind(
# cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
# )
# def generate_world(self, obj_path, world_path):
# print('Starting world gen')
#
# if World.instance() is None:
# self._world = World(**self._world_settings)
# self.setup_scene()
# else:
# self._world = World.instance()
# print('checking if world is activev')
# print(self._world)
# obs_to_spawn, object_dict = AreaMaskGenerator.generate_world_from_file(
# obj_path, world_path
# )
# length = len(obs_to_spawn)
# counter = 1
# for key in obs_to_spawn:
# obj = object_dict[key]
# path = object_dict[key].usd_path
#
# # print("checking if world is activev")
# # print(self._world)
# print('trying to spawn ', path, ' ', counter, ' / ', length)
# class_name = obj.class_name
# if class_name == '':
# class_name = obj.unique_id
# self.spawn_loop(
# path,
# class_name,
# f'{obj.unique_id}_',
# obs_to_spawn[key],
# scale=obj.object_scale,
# object_scale_delta=obj.object_scale_delta,
# allow_rot=obj.allow_y_rot,
# )
# counter += 1
# print('AREA GENERATION FINISHED')
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/World/t.xformOp:orient'),
# value=Gf.Quatd(0.7071067811865476, Gf.Vec3d(-0.7071067811865476, 0.0, 0.0)),
# prev=Gf.Quatd(1.0, Gf.Vec3d(0.0, 0.0, 0.0)),
# )
#
#
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/World/t.xformOp:orient'),
# value=Gf.Quatd(6.123233995736766e-17, Gf.Vec3d(-4.329780281177467e-17, -0.7071067811865476, -0.7071067811865476)),
# prev=Gf.Quatd(0.7071067811865476, Gf.Vec3d(-0.7071067811865476, 0.0, 0.0)),
# )
# self.spawn_asset(
# mesh_path,
# 'terrain',
# f'terrainmesh_{key}',
# 0,
# 0,
# 0,
# 1,
# 0,
# False,
# )
# self.create_material_and_bind(mat_name,mat_path,)
# def test_spawn1(self):
#
# # asyncio.ensure_future(self.init_world())
# mesh_path = 'C:\\Users\\jonem\\Documents\\Kit\\apps\\Isaac-Sim\\exts\\IsaacSyntheticPerception\\com\\SyntheticPerception\\app\\PCG\\mesh_0.usd'
#
# add_reference_to_stage(usd_path=mesh_path, prim_path='/World/terrain')
# # self.spawn_asset(
# # mesh_path,
# # 'terrain',
# # f'terrainmesh',
# # 0,
# # 0,
# # 0,
# # 1,
# # 0,
# # False,
# # )
# def add_asset_to_stage(
# self, asset_path, prim_name, prim_path, scene, **kwargs
# ):
# # print('adding asset to stage ', asset_path, prim_path)
#
# add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
| 24,894 | Python | 34.871758 | 153 | 0.524303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/sensors.py | """
This class represents the SensorRig and supporting functions.
The goal of this class is to provide an easy control method to sample an array of sensors
and control its movement within and environment.
The SensorRig primarily has a collection of sensors that is read in from a json file.
These sensors are created and stored depending on the parameters and are contructed within
their own classes. See the Sensors folder for all available implemented sensors.
The rig also handles sampling rates and timestamps.
"""
from omni.syntheticdata.scripts.sensors import enable_sensors
from omni.syntheticdata import helpers
from omni.isaac.core.utils.prims import define_prim, delete_prim
import pathlib
import json
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import omni
import omni.kit.commands
import omni.timeline
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import omni.kit.commands
import omni.replicator.core as rep
import math
import numpy as np
import scipy.spatial.transform as tf
from dataclasses import dataclass
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.physx import get_physx_scene_query_interface
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
import carb
from pxr import Sdf
from .Sensors.LIDAR import Lidar
from .Sensors.IMU import IMUSensor
from .Sensors.Camera import DepthCamera
from omni.isaac.core.utils.rotations import (
lookat_to_quatf,
quat_to_euler_angles,
gf_quat_to_np_array,
)
# .
from numpy.linalg import norm
import copy
import traceback
from scipy.spatial.transform import Rotation
def quat_to_euler_angles(q):
q_img = q.GetImaginary()
q_real = q.GetReal()
# roll (x-axis rotation)
sinr_cosp = 2 * (q_real * q_img[0] + q_img[1] * q_img[2])
cosr_cosp = 1 - 2 * (q_img[0] * q_img[0] + q_img[1] * q_img[1])
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2 * (q_real * q_img[1] - q_img[2] * q_img[0])
if abs(sinp) >= 1:
pitch = math.copysign(math.pi / 2, sinp) # use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2 * (q_real * q_img[2] + q_img[0] * q_img[1])
cosy_cosp = 1 - 2 * (q_img[1] * q_img[1] + q_img[2] * q_img[2])
yaw = math.atan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def normalize(v):
if norm(v) == 0:
traceback.print_stack()
v /= norm(v)
return v
def normalized(v):
if v is None:
return None
return normalize(copy.deepcopy(v))
def proj_orth(v1, v2, normalize_res=False, eps=1e-5):
v2_norm = norm(v2)
if v2_norm < eps:
return v1
v2n = v2 / v2_norm
v1 = v1 - np.dot(v1, v2n) * v2n
if normalize_res:
return normalized(v1)
else:
return v1
def axes_to_mat(axis_x, axis_z, dominant_axis="z"):
if dominant_axis == "z":
axis_x = proj_orth(axis_x, axis_z)
elif dominant_axis == "x":
axis_z = proj_orth(axis_z, axis_x)
elif dominant_axis is None:
pass
else:
raise RuntimeError("Unrecognized dominant_axis: %s" % dominant_axis)
axis_x = axis_x / norm(axis_x)
axis_z = axis_z / norm(axis_z)
axis_y = np.cross(axis_z, axis_x)
R = np.zeros((3, 3))
R[0:3, 0] = axis_x
R[0:3, 1] = axis_y
R[0:3, 2] = axis_z
return R
# Projects T to align with the provided direction vector v.
def proj_to_align(R, v):
max_entry = max(
enumerate([np.abs(np.dot(R[0:3, i], v)) for i in range(3)]),
key=lambda entry: entry[1],
)
return axes_to_mat(R[0:3, (max_entry[0] + 1) % 3], v)
def get_world_translation(prim):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
)
return transform.GetTranslation()
def get_world_pose(prim):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
)
return transform.GetRotation()
class SensorRig:
def __init__(self, name, path) -> None:
self.__sensors = []
self.__waypoints = []
self.__curr_waypoint_id = 0
self._prim_path = path
self._prim_name = name
self._full_prim_path = f"{self._prim_path}/{self._prim_name}"
self._prim = None
self._dc = None
self._rb = None
self.start_time = 0
self.velocity = 10
self.sample_rate = 10
self._waypoints_parent = None
self.time = 0
self.sample_time_counter = 0
self._o = "[SensorRig] "
def reset(self):
self.time = 0
def ray_cast(self, origin):
# pos, _ = self.get_pos_rot()
# print(pos)
hit = get_physx_scene_query_interface().raycast_closest(
origin, [0, 0, -1], 100000.0
)
if hit["hit"]:
distance = hit["distance"]
print(hit["position"][2])
return hit["position"][2]
return 0
def create_rig_from_file(self, path, stage, world):
self._world = world
pos, ori = self.load_sensors_from_file(path, stage)
print(
f"{self._o} Creating sensor righ with initial position of: {pos} and rot of {ori}"
)
position = np.array([pos[0], pos[1], pos[2]])
orientation = np.array([ori[0], ori[1], ori[2], ori[3]])
self._prim = XFormPrim(
name=self._prim_name,
prim_path=self._full_prim_path,
position=position / get_stage_units(),
orientation=orientation,
)
omni.kit.commands.execute(
"AddPhysicsComponent",
usd_prim=stage.GetPrimAtPath(self._full_prim_path),
component="PhysicsRigidBodyAPI",
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path(f"{self._full_prim_path}.physxRigidBody:disableGravity"),
value=True,
prev=None,
)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
def hide_waypoints_an_rig(self):
pass
def create_rig(self, position, orientation, stage):
self._dc = _dynamic_control.acquire_dynamic_control_interface()
self._prim = XFormPrim(
name=self._prim_name,
prim_path=self._full_prim_path,
position=position / get_stage_units(),
orientation=orientation,
)
self.actual_prim = stage.GetPrimAtPath(self._full_prim_path)
self.orient_val = self.actual_prim.GetAttribute("xformOp:orient")
# collisionAPI = PhysicsRigidBodyAPI.Apply(self._prim)
omni.kit.commands.execute(
"AddPhysicsComponent",
usd_prim=stage.GetPrimAtPath(self._full_prim_path),
component="PhysicsRigidBodyAPI",
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path(f"{self._full_prim_path}.physxRigidBody:disableGravity"),
value=True,
prev=None,
)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
def apply_veloc(self, veloc, ang_veloc):
# print('applying ', veloc)
self._rb = self._dc.get_rigid_body(self._full_prim_path)
self._dc.set_rigid_body_linear_velocity(self._rb, veloc)
x = ang_veloc
self._dc.set_rigid_body_angular_velocity(self._rb, x)
def setup_sensor_output_path(self, path):
print(path)
instance_mapping = helpers.get_instance_mappings()
print(" ================== initiating mapping")
print(instance_mapping)
np.save(f"{path}/mapping.npy", instance_mapping, allow_pickle=True)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
pathlib.Path(path + "/timestamps.csv")
self._time_stamp_file = open(path + "/timestamps.csv", "a")
for sensor in self.__sensors:
sensor.init_output_folder(path)
def add_sensor_to_rig(self, sensor):
self.__sensors.append(sensor)
self.__sensors[-1].init_sensor(self._full_prim_path)
def sample_sensors(self, n):
# print("sampling sensors")
self.time += n
self.sample_time_counter += n
# print(self.time)
# log timestep
# Sample all sensors
if self.sample_time_counter >= (1 / self.sample_rate):
# print("sampling at ", self.time)
for sensor in self.__sensors:
# print(sensor)
sensor.sample_sensor()
self._time_stamp_file.write(f"{str(self.time)}\n")
self.sample_time_counter = 0
def sample_sensors_return(self):
sensor_output = []
for sensor in self.__sensors:
sensor_output.append(sensor.sample_sensor())
return sensor_output
def sample_all_sensors(self):
for sensor in self.__sensors:
sensor.sample_sensor()
def get_pos_rot(self):
self._rb = self._dc.get_rigid_body(self._full_prim_path)
object_pose = self._dc.get_rigid_body_pose(self._rb)
return object_pose.p, object_pose.r
def initialize_waypoints(self, waypoint_parent_tag, stage):
# Reset the waypoints
self.__waypoints = []
# Get the current sensor rig position and orientation
# current_pos, current_rot = self.get_pos_rot()
# iter over the stage and get all the waypoints
# go through each child and save its tranform details to the waypoints list.
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
if "_waypoints_" in prim_ref_name:
self._waypoints_parent = prim_ref
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
self.__waypoints.append(get_world_translation(prim_child))
print(f"{self._o} SensorRig waypoints initialization complete:")
print(self.__waypoints)
def initialize_waypoints_preloaded(self, waypoints, parent_prim):
self.__waypoints = []
self.__waypoints = waypoints
self._waypoints_parent = parent_prim
print(f"{self._o} loaded waypoints from file ")
for i in range(len(self.__waypoints)):
origin = self.__waypoints[i]
z = self.ray_cast(origin)
z += 0.7
self.__waypoints[i][2] = z
print(f"{self._o} Synced waypoints to ground")
def _waypoint_update(self, pos):
print(f"{self._o} Waypoint {self.__curr_waypoint_id}/{len(self.__waypoints)}")
# Get the goal position and convert it into the correct type
# print("moving")
goal_pos = self.__waypoints[self.__curr_waypoint_id]
# goal_pos[2] = z_val
goal_pos = Gf.Vec3d(goal_pos)
ori_ = lookat_to_quatf(pos, goal_pos, Gf.Vec3d(0, 0, 1))
rot_vec = quat_to_euler_angles(ori_)
rot_float = 0.0
# Calculate the diff vector
move_vec = goal_pos - pos
distance = np.linalg.norm(goal_pos - pos)
move_vec = (move_vec / distance) * self.velocity
goal_pos_arr = np.array([[goal_pos[0], goal_pos[1], 0]])
pos_arr = np.array([[pos[0], pos[1], 0]])
ori_now = self.orient_val.Get()
rvg = rot_vec
rvc = quat_to_euler_angles(ori_now)
rot_ang = Gf.Vec3d(0, 0, rvg[2] - rvc[2])
calc = rvg[2] - rvc[2]
calc *= 57.2
x_ = rvg[0] - rvc[0]
y_ = rvg[1] - rvc[1]
rot_float = Gf.Vec3d(0, 0, calc / 5.73)
if distance < 0.5:
self.__curr_waypoint_id += 1
if self.__curr_waypoint_id >= len(self.__waypoints):
self.__curr_waypoint_id = 0
timeline = omni.timeline.get_timeline_interface()
timeline.pause()
return self._waypoint_update(pos)
return move_vec, rot_vec, rot_float
def move(self, time_step):
# timeline = omni.timeline.get_timeline_interface()
# timecode = (
# timeline.get_current_time() * timeline.get_time_codes_per_seconds()
# )
self.start_time += time_step
if len(self.__waypoints) == 0:
return
# Retrieve the current position and orientation of the sensor rig
current_pos, current_rot = self.get_pos_rot()
current_pos = Gf.Vec3d(current_pos[0], current_pos[1], current_pos[2])
# Load the correct waypoint, check if we should change to next one ..
# and then calculate the required move vector.
move_vec, rot_vec, rot_float = self._waypoint_update(current_pos)
# Apply the required veloc
self.apply_veloc(move_vec, rot_float)
def load_sensors_from_file(self, file_path, stage):
with open(file_path, "r+") as infile:
print(f"{self._o} Loading sensor rig from file at {file_path}.")
data = json.load(infile)
# print(data)
pos = data["POSITION"]
ori = data["ORIENTATION"]
self.velocity = data["VELOCITY"]
self.sample_rate = data["SAMPLE_RATE"]
self.create_rig(np.array(pos), np.asarray(ori), stage)
sensors = data["SENSORS"]
print(sensors)
for key in sensors:
if key == "LIDAR":
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
lidar = Lidar()
lidar.read_from_json(sensor_settings)
self.add_sensor_to_rig(lidar)
elif key == "CAMERA":
print("creating camera")
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
cam = DepthCamera()
cam.read_from_json(sensor_settings)
self.add_sensor_to_rig(cam)
elif key == "IMU":
for sensor_id in sensors[key]["instances"]:
sensor_settings = sensors[key]["instances"][sensor_id]
imu = IMUSensor()
imu.read_from_json(sensor_settings)
self.add_sensor_to_rig(imu)
else:
print(" ERROR, tried adding sensor with type ", key)
return pos, ori
def init_output_folder(self, path):
# create any paths needed
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
pathlib.Path(path + "/timestamps.csv")
print(instance_mapping)
self._time_stamp_file = open(path + "/timestamps.csv", "a")
# create any needed directories for the sensors
for sensor in self.__sensors:
sensor.init_output_folder(path)
"""
{"POSITION" : [0,0,0],
"ORIENTATION" : [0,0,0,0],
"SENSORS":{
"IMU":{
"instances":
{"1" :
{
"name" : "imu",
"position": [0.0, 0.0, 0.0],
"rotation" : [0.0,0.0,0.0]
}
}
},
"CAMERA" :
{"instances" :
{"1" :
{
"name" : "camera",
"focal_length": 24.0,
"focus_distance" : 400.0,
"f_stop": 0.0,
"horizontal_aperture": 20.955,
"horizontal_aperture_offset": 0,
"vertical_aperture_offset": 0,
"clipping_range": [1.0, 1000000.0],
"resolution": [1024,1024],
"position" : [0.0,0.0,0.0],
"rotation" : [0.0,0.0,0.0]
}
}
},
"LIDAR":
{"instances" :
{"1" :
{"name": 1,
"min_range": 0.4,
"max_range": 100.0,
"draw_points": false,
"draw_lines" : false,
"horizontal_fov": 360,
"vertical_fov": 60.0,
"rotation_rate": 0.0,
"horizontal_resolution": 0.4,
"vertical_resolution" : 0.4,
"high_lod":true,
"yaw_offset": 0.0,
"enable_semantics":true,
"origin_pos": [0,0,0],
"rotation" : [0.0,0.0,0.0]
}
}
}
}
}
def add_depth_camera_to_rig(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
image_size=(512, 512),
attach=True,
name='/DepthCamera',
):
self.__sensors.append(
DepthCamera(
position,
rotation,
image_size,
attach,
self._full_prim_path,
name,
)
)
def add_lidar_to_rig(self, name, origin_pos):
self.__sensors.append(
Lidar(
path=name, parent=self._full_prim_path, origin_pos=origin_pos
)
)
"""
| 17,079 | Python | 30.571164 | 94 | 0.569471 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
from .synthetic_perception import SyntheticPerception
from .synthetic_perception_extension import SyntheticPerceptionExtension
# from .sensors import Lidar
| 765 | Python | 33.81818 | 76 | 0.783007 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/setup.py | from setuptools import setup, find_packages
setup(
name = 'your_package_name',
packages = find_packages(),
) | 116 | Python | 22.399996 | 43 | 0.698276 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/synthetic_perception_extension.py | """
This class and functions handles the UI/UX side of the extension.
All UX is collected and managed here and call their respective functions in the main
SyntheticPerception sample.
"""
import time
import random
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.physx import acquire_physx_interface
import os
from pxr import Usd, Gf, Ar, Pcp, Sdf, UsdRi, UsdGeom, UsdPhysics
from pxr import UsdShade, Sdf
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.kit.window.popup_dialog import FormDialog
import asyncio
import omni.ui as ui
from omni.isaac.ui.ui_utils import (
btn_builder,
int_builder,
float_builder,
dropdown_builder,
combo_floatfield_slider_builder,
str_builder,
xyz_builder,
) # , str_builder
from omni.isaac.ui import (
FloatField,
CheckBox,
StateButton,
DropDown,
StringField,
Button,
CheckBox,
)
from omni.isaac.core import SimulationContext
from .PCG.WorldGenerator import WorldManager
from .synthetic_perception import SyntheticPerception
import omni
import json
from omni.isaac.core.utils.stage import (
update_stage,
add_reference_to_stage,
is_stage_loading,
update_stage_async,
)
from omni.isaac.core.utils.prims import define_prim, delete_prim
from .PCG.MeshGenerator import MeshGen
# from .Utils.EnvDataTool.EnvCreator import EnvTool
import open3d as o3d
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import omni.kit.asset_converter
import carb
from omni.kit.window.popup_dialog.dialog import PopupDialog
class SelectedPrim:
def __init__(self) -> None:
self.prim = None
self.prim_path = None
self.object_scale = 1
self.object_scale_delta = 0
self.allow_y_rot = False
self.unique_id = ''
self.usd_path = ''
self.class_name = ''
self.posson_size = 1
def get_y_rot_state(self):
if self.allow_y_rot:
return 'Enabled'
return 'Disabled'
def __str__(self) -> str:
return f'prim: {self.prim} \n prim_path: {self.prim_path}\n Object Scale: {self.object_scale}\n \
object scale delta: {self.object_scale_delta}\n allow y rot: {self.allow_y_rot}\n usdpath: {self.usd_path}\n unique_id: {self.unique_id}'
class SyntheticPerceptionExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name='ExtensionName',
submenu_name='',
name='Synthetic perception',
title='Synthetic Perception',
doc_link='https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html',
overview="This extension provides tools to both generate procedural environments within Isaac as well as capturing and saving sensor data. This also includes waypoint movement and other such movement types.",
sample=SyntheticPerception(),
file_path=os.path.abspath(__file__),
number_of_extra_frames=12,
window_width=700,
)
self.object_data_save_path = ''
self.task_ui_elements = {}
self.world_gen_ui_elements = {}
self.usd_context = omni.usd.get_context()
self.selected_prim = SelectedPrim()
self.selected_prim_dict = {}
self._object_selector = False
self.prim = None
self._object_path = ''
self._world_path = ''
self.mm = False
self.OBJECT_EDITING_ALLOWED = False
# frame = self.get_frame(index=0)
# self.build_task_controls_ui(frame)
# frame = self.get_frame(index=0)
# self.build_sensor_ui(frame)
# frame = self.get_frame(index=1)
# self.build_worldgen_ui(frame)
self._window.visible = True
frame = self.get_frame(index=0)
self.build_pcg_env_ui(frame)
self._window.visible = True
frame = self.get_frame(index=1)
self.build_sensor_rig_ui(frame)
frame = self.get_frame(index=2)
self.setup_worldgen_ui(frame)
self.events = self.usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self._get_obj_details, name='Object Info Selection Update'
)
def on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
prim_path = (
self.usd_context.get_selection().get_selected_prim_paths()
)
if not prim_path:
return
def shutdown_cleanup(self):
self.sample.remove_all_objects()
def build_popup(self, errors):
message_out = ''.join([str + '\n' for str in errors])
dialog = FormDialog(
title='ERROR',
message=message_out,
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
def build_sensor_rig_ui(self, frame):
self.build_sensor_rig_ui_values = {}
self.build_sensor_rig_ui_values['RigPath'] = ''
self.build_sensor_rig_ui_values['WaypointPath'] = ''
self.build_sensor_rig_ui_values['MovementType'] = None
self.build_sensor_rig_ui_values['OutputSavePath'] = ''
self.sample.setup_scene()
def has_missing_inputs_init_rig():
errors = []
if self.build_sensor_rig_ui_values['RigPath'] == '':
errors.append('No Sensor Rig path supplied')
if self.build_sensor_rig_ui_values['OutputSavePath'] == '':
errors.append('No output path supplied')
# Check if both files exist
if not self._check_file_exists(
self.build_sensor_rig_ui_values['RigPath']
):
errors.append(
'Sensor rig parameter file invalid or does not exist.'
)
if len(errors) != 0:
self.build_popup(errors)
return True
return False
def has_missing_inputs_wp():
errors = []
if self.build_sensor_rig_ui_values['WaypointPath'] == '':
errors.append('No waypoint path supplied')
# Check if both files exist
if not self._check_file_exists(
self.build_sensor_rig_ui_values['WaypointPath']
):
errors.append('Waypoint file is not valid or does not exist.')
if len(errors) != 0:
self.build_popup(errors)
return True
return False
async def init_rig_and_waypoints():
# await asyncio.ensure_future(self.sample.init_world())
self.sample.init_sensor_rig_from_file(
self.build_sensor_rig_ui_values['RigPath'],
self.build_sensor_rig_ui_values['OutputSavePath'],
)
def load_sensor_rig_from_path():
if has_missing_inputs_init_rig():
return
asyncio.ensure_future(init_rig_and_waypoints())
stage = omni.usd.get_context().get_stage()
parent = stage.GetPrimAtPath('/_WAYPOINTS_')
if not parent:
# parent = XFormPrim(
# name="_WAYPOINTS_",
# prim_path = "/"
# )
parent = define_prim('/_WAYPOINTS_', 'Xform')
cube_prim = stage.GetPrimAtPath('/_WAYPOINTS_/w_01')
if not cube_prim:
cube_prim = stage.DefinePrim('/_WAYPOINTS_/w_01', 'Cube')
UsdGeom.Xformable(cube_prim).AddTranslateOp().Set((0.0, 0.0, 0.0))
def update_sensor_rig_path(val):
self.build_sensor_rig_ui_values['RigPath'] = val
def update_rig_movement_type(val):
self.build_sensor_rig_ui_values['MovementType'] = val
def update_waypoint_path(val):
self.build_sensor_rig_ui_values['WaypointPath'] = val
def load_waypoints_intermediate():
asyncio.ensure_future(load_waypoints())
async def load_waypoints():
if has_missing_inputs_wp():
return
# self.sample.force_reload()
await asyncio.ensure_future(self.sample._on_load_world_async())
# await asyncio.ensure_future(self.sample.init_world())
# print(self.sample._world.GetAttributes())
# print(self.sample._world.__dir__())
stage = omni.usd.get_context().get_stage()
# Add a physics scene prim to stage
scene = UsdPhysics.Scene.Define(
stage, Sdf.Path('/World/physicsScene')
)
stage = omni.usd.get_context().get_stage()
if not self.build_sensor_rig_ui_values['WaypointPath']:
dialog = FormDialog(
title='ERROR No path',
message='No waypoint file was given. Not saving - please input a save path.',
)
return
with open(
self.build_sensor_rig_ui_values['WaypointPath'], 'r'
) as fh:
json_data = json.load(fh)
# print('Trying to load waypoints')
# print(json_data)
initial_prim_path = '/_WAYPOINTS_'
prim_check= stage.GetPrimAtPath(initial_prim_path)
parent = prim_check
if not prim_check:
parent = define_prim('/_WAYPOINTS_', 'Xform')
initial_prim_wp = '/_WAYPOINTS_/w_01'
prim_check = stage.GetPrimAtPath(initial_prim_wp)
if prim_check:
delete_prim(initial_prim_path)
for i, c in enumerate(json_data):
# parent = define_prim('/_WAYPOINTS_', 'Xform')
cube_prim = stage.DefinePrim(
'/_WAYPOINTS_/w_{:02d}'.format(i + 1), 'Cube'
)
UsdGeom.Xformable(cube_prim).AddTranslateOp().Set(
Gf.Vec3d(c)
)
self.sample.sr.initialize_waypoints_preloaded(json_data,stage.GetPrimAtPath("/_WAYPOINTS_"))
self.sample._world.add_physics_callback(
'sim_step', callback_fn=self.sample.sr.move
)
self.sample.attach_sensor_sample_callback()
def update_output_save_path(val):
self.build_sensor_rig_ui_values['OutputSavePath'] = val
def save_waypoints():
def __n():
print('')
if not self.build_sensor_rig_ui_values['WaypointPath']:
dialog = FormDialog(
title='ERROR No path',
message='No waypoint file was given. Not saving - please input a save path.',
)
return
stage = omni.usd.get_context().get_stage()
waypoints = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
if '_WAYPOINTS_' in prim_ref_name:
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
translate = prim_child.GetAttribute(
'xformOp:translate'
).Get()
waypoints.append(
[translate[0], translate[1], translate[2]]
)
with open(
self.build_sensor_rig_ui_values['WaypointPath'], 'w'
) as fh:
json.dump(waypoints, fh, indent=1)
def run():
def run_step_temp(n):
curr_time = time.time() - self.minhan_timer_start
# print(curr_time)
if curr_time > 2:
self.minhan_timer_start = time.time()
scale = random.uniform(1.0,10.0)
self.plane_prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d(scale,scale,scale))
# self.sample.sr.hide_waypoints_an_rig()
self.minhan_timer_start = 0
stage = omni.usd.get_context().get_stage()
self.plane_prim = stage.GetPrimAtPath("/World/Plane")
# print(plane_prim)
# print(plane_prim.GetAttributes())
self.plane_prim.GetAttribute('physics:angularVelocity').Set(Gf.Vec3d(5.0,5.0,5.0))
self.sample._world.add_physics_callback(
'demo_step', callback_fn=run_step_temp
)
def sample():
print("trying to sample")
self.sample.sr.sample_all_sensors()
self._sensor_rig_ui_inputs = {}
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Sensor Rig'
frame.visible = True
self._sensor_rig_ui_inputs['RigPath'] = StringField(
'Sensor Rig settings path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_sensor_rig_path,
)
self._sensor_rig_ui_inputs['OutputSavePath'] = StringField(
'Output path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_output_save_path,
)
self._sensor_rig_ui_inputs['LoadRig'] = Button(
'Load sensor rig',
'Load',
on_click_fn=load_sensor_rig_from_path,
)
self._sensor_rig_ui_inputs['MovementType'] = DropDown(
'Movement Type: ', on_selection_fn=update_rig_movement_type
)
self._sensor_rig_ui_inputs['MovementType'].set_items(
['WAYPOINT', 'KEYBOARD']
)
self._sensor_rig_ui_inputs['WaypointPath'] = StringField(
'Waypoints path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=update_waypoint_path,
)
self._sensor_rig_ui_inputs['LoadWaypoints'] = Button(
'Load & attach waypoints',
'Load',
on_click_fn=load_waypoints_intermediate,
)
self._sensor_rig_ui_inputs['SaveWaypoints'] = Button(
'Save waypoints',
'Save',
on_click_fn=save_waypoints,
)
# self._sensor_rig_ui_inputs['Run Simulation'] = Button(
# 'Run Simulation',
# 'Run',
# on_click_fn=run_sim,
# )
self._sensor_rig_ui_inputs['run'] = Button(
'Minghan run',
'run',
on_click_fn=run,
)
self._sensor_rig_ui_inputs['sample'] = Button(
'Sample sensors',
'sampe',
on_click_fn=sample,
)
def init_semantics_in_scene(self):
self.sample.init_semantics_in_scene()
def _rebuild_update(self, e):
if str(e) == 'Manual':
self.mm = True
if str(e) == 'Waypoints':
self.mm = False
print(self.mm)
return e
def update_scale(self, val):
if self.prim and val > 0:
_ = self.prim.GetAttribute('xformOp:scale').Set(
Gf.Vec3d([val, val, val])
)
self.selected_prim.object_scale = val
self.selected_prim_dict[self.current_path].object_scale = val
def update_scale_delta(self, val):
if self.prim and val > 0:
# update the local info
# _ = self.prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d([val,val,val]))
self.selected_prim.object_scale_delta = val
self.selected_prim_dict[self.current_path].object_scale_delta = val
def update_poisson_size(self, val):
if self.prim and val > 0:
# update the local info
# _ = self.prim.GetAttribute('xformOp:scale').Set(Gf.Vec3d([val,val,val]))
self.selected_prim_dict[self.current_path].posson_size = val
def update_yrot(self, val):
if self.prim and val != 'Not Selected':
enable_y_rot = False
if (
val == 'Enabled'
): # self.world_gen_ui_elements["AllowYRot"].get_selected() == "Enabled":
enable_y_rot = True
self.selected_prim.allow_y_rot = enable_y_rot
self.selected_prim_dict[
self.current_path
].allow_y_rot = enable_y_rot
def prim_name_update(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].unique_id = val
def class_name_update(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].class_name = val
def update_usd_path(self, val):
if self.prim and val != '':
self.selected_prim_dict[self.current_path].usd_path = val
def save_path_update(self, val):
self.object_data_save_path = val
def _true(self, val):
return True
def save_object_data_to_file(self):
def where_json(file_name):
return os.path.exists(file_name)
# Guard statement to catch no save path
if self.object_data_save_path == '':
dialog = FormDialog(
title='ERROR No path',
message='No save file was given. Not saving - please input a save path.',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
if '.json' not in self.object_data_save_path:
dialog = FormDialog(
title='ERROR no specific file',
message='No save file was given. Not saving - please input a save path with a filename and the .json extension.',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
if self.selected_prim_dict[self.current_path].usd_path == '':
dialog = FormDialog(
title='ERROR no usd path',
message='No USD path was specified. This is required and must exist!',
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
# Check if file exists at path
# print('Attempting to edit or create the save obj file')
data = {}
with open(self.object_data_save_path, 'r+') as infile:
try:
data = json.load(infile)
except:
pass
selected = self.selected_prim_dict[self.current_path]
with open(self.object_data_save_path, 'w+') as outfile:
specific_data = {
'object_scale': selected.object_scale,
'object_scale_delta': selected.object_scale_delta,
'poisson_size': selected.posson_size,
'allow_y_rot': selected.allow_y_rot,
'class_name': selected.class_name,
'usd_path': selected.usd_path,
}
data[selected.unique_id] = specific_data
# data[local_selected.unique_id]=
json.dump(data, outfile)
def setup_worldgen_ui(self, frame):
def test_check(val):
self.OBJECT_EDITING_ALLOWED = val
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Object set up'
frame.visible = True
self.world_gen_ui_elements['toggle'] = CheckBox(
'Object setup mode', on_click_fn=test_check
)
# print(self.world_gen_ui_elements["toggle"].__dir__())
self.world_gen_ui_elements['SavePath'] = StringField(
'SavePath',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self.save_path_update,
)
self.world_gen_ui_elements['PrimName'] = StringField(
'Unique Name',
'None',
read_only=False,
on_value_changed_fn=self.prim_name_update,
)
self.world_gen_ui_elements['PrimName'].set_value('None')
self.world_gen_ui_elements['ClassName'] = StringField(
'Class Name',
'None',
read_only=False,
on_value_changed_fn=self.class_name_update,
)
self.world_gen_ui_elements['SelectedObjScale'] = FloatField(
'Object Scale',
default_value=1.0,
on_value_changed_fn=self.update_scale,
)
self.world_gen_ui_elements[
'SelectedObjScaleDelta'
] = FloatField(
'Object Scale Delta +/-',
on_value_changed_fn=self.update_scale_delta,
)
self.world_gen_ui_elements['PoissonSize'] = FloatField(
'Poisson Point Size',
default_value=1.0,
on_value_changed_fn=self.update_poisson_size,
)
# self.world_gen_ui_elements["AllowYRot"] = CheckBox("Allow Y-axis rotation", default_value = False, on_click_fn=self.update_yrot)
self.world_gen_ui_elements['AllowYRot'] = DropDown(
'Allow Y-axis rotation', on_selection_fn=self.update_yrot
)
self.world_gen_ui_elements['AllowYRot'].set_items(
['Not Selected', 'Enabled', 'Disabled']
)
self.world_gen_ui_elements['USDPath'] = StringField(
'USD Path',
use_folder_picker=True,
on_value_changed_fn=self.update_usd_path,
)
self.world_gen_ui_elements['SAVE'] = Button(
'Save this object to file',
'SAVE',
on_click_fn=self.save_object_data_to_file,
)
self.prim = None
self.position = [0, 0, 0]
def _get_obj_details(self, event):
# if not self._object_selector:
# return
if not self.OBJECT_EDITING_ALLOWED:
return
prim_path = self.usd_context.get_selection().get_selected_prim_paths()
self.world_gen_ui_elements['SavePath'] = self.object_data_save_path
if not prim_path:
for key in self.world_gen_ui_elements:
if type(self.world_gen_ui_elements[key]) == FloatField:
self.world_gen_ui_elements[key].set_value(0)
if type(self.world_gen_ui_elements[key]) == DropDown:
self.world_gen_ui_elements[key].set_selection(
'Not Selected'
)
if type(self.world_gen_ui_elements[key]) == StringField:
self.world_gen_ui_elements[key].set_value('')
return
stage = self.usd_context.get_stage()
prim = stage.GetPrimAtPath(prim_path[0])
self.prim = prim
self.current_path = prim_path[0]
# Check if the prim exists in our current dictionary
if self.current_path not in self.selected_prim_dict:
# Create the entry
self.selected_prim_dict[self.current_path] = SelectedPrim()
# This entry should now exist so we can use it.
self.selected_prim.prim = prim
self.selected_prim.prim_path = prim_path[0]
# print('prim: ' + str(prim), " ", self.prim.GetAttributes())
obj_scale = self.prim.GetAttribute('xformOp:scale').Get()
# self.selected_prim.object_scale = sum(obj_scale) / len(obj_scale)
self.selected_prim_dict[self.current_path].object_scale = sum(
obj_scale
) / len(obj_scale)
if (
self.selected_prim_dict[self.current_path].unique_id == ''
or self.selected_prim_dict[self.current_path].unique_id == 'None'
):
self.selected_prim_dict[
self.current_path
].unique_id = self.current_path.split('/')[-1]
self.world_gen_ui_elements['PrimName'].set_value(
self.selected_prim_dict[self.current_path].unique_id
)
self.world_gen_ui_elements['ClassName'].set_value(
self.selected_prim_dict[self.current_path].class_name
)
self.world_gen_ui_elements['SelectedObjScale'].set_value(
self.selected_prim_dict[self.current_path].object_scale
)
self.world_gen_ui_elements['SelectedObjScaleDelta'].set_value(
self.selected_prim_dict[self.current_path].object_scale_delta
)
self.world_gen_ui_elements['PoissonSize'].set_value(
self.selected_prim_dict[self.current_path].posson_size
)
self.world_gen_ui_elements['AllowYRot'].set_selection(
self.selected_prim_dict[self.current_path].get_y_rot_state()
)
self.world_gen_ui_elements['USDPath'].set_value(
self.selected_prim_dict[self.current_path].usd_path
)
def _update_object_path(self, val):
if val != '':
self._object_path = val
def _update_world_path(self, val):
if val != '':
self._world_path = val
def _check_file_exists(self, path):
try:
with open(path, 'r+') as infile:
return True
except:
return False
def _run_world_creation(self):
# (
# obs_to_spawn,
# object_dict,
# height_map,
# ) = self.sample.generate_world_generator(
# 'C:\\Users\\jonem\\Desktop\\worlddata2.json',
# 'C:\\Users\\jonem\\Desktop\\new_objects_save.json',
# )
#
# asyncio.ensure_future(self.sample._on_load_world_async())
# asyncio.ensure_future(
# self.sample.spawn_all(obs_to_spawn, object_dict, height_map)
# )
# return
errors = []
if self._object_path == '':
errors.append('No Object path specified.')
if '.json' not in self._object_path:
errors.append('Object path does not contain .json extension.')
if self._world_path == '':
errors.append('No world path environment file was specified.')
if '.json' not in self._world_path:
errors.append('World path does not contain .json exntension.')
# Check if both files exist
if not self._check_file_exists(self._object_path):
errors.append('Object path file specified does not exist.')
if not self._check_file_exists(self._world_path):
errors.append('World path file specified does not exist.')
if len(errors) != 0:
message_out = ''.join([str + '\n' for str in errors])
dialog = FormDialog(
title='ERROR',
message=message_out,
ok_handler=lambda dialog: print(
f"Form accepted: '{dialog.get_values()}'"
),
)
return
print("Starting world gen")
WG = WorldManager()
WG.create_world(self._world_path, self._object_path)
print("world creation finished")
return
# (
# obs_to_spawn,
# object_dict,
# height_map,
# normals
# ) = self.sample.generate_world_generator(
# self._world_path, self._object_path
# )
# print("Starting obj spawn")
# asyncio.ensure_future(self.sample._on_load_world_async())
# asyncio.ensure_future(
# self.sample.spawn_all(obs_to_spawn, object_dict, height_map, normals)
# )
def build_pcg_env_ui(self, frame):
def open_world_creator():
pass
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = 'Generate World Set up'
frame.visible = True
self.world_gen_ui_elements['RunCreateTool'] = Button(
'Open the world creator tool',
'Open',
on_click_fn=open_world_creator,
)
self.world_gen_ui_elements['ObjectsPath'] = StringField(
'Objects Path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self._update_object_path,
)
self.world_gen_ui_elements['WorldPath'] = StringField(
'World Path',
'None',
read_only=False,
use_folder_picker=True,
item_filter_fn=self._true,
on_value_changed_fn=self._update_world_path,
)
self.world_gen_ui_elements['SAVE'] = Button(
'Initialize world generation',
'Create World',
on_click_fn=self._run_world_creation,
)
self.world_gen_ui_elements['InitSemantics'] = Button(
'Initialize semantics and physics (must do)',
'Initialize',
on_click_fn=self.init_semantics_in_scene,
)
# async def material_test(self):
#
# shape = (256, 256)
# threshold = 0.5
# region_value = 1
# # Convert to pymeshlab mesh
# l = shape[0] * 10 # 2560
# data = generate_perlin_noise_2d(shape, (8, 8))
# data = (data - np.min(data)) / (np.max(data) - np.min(data))
# data[data < threshold] = 0
# data[data >= threshold] = region_value
# mGen = MeshGen(
# 256,
# 10,
# data,
# 'C:/Users/jonem/Documents/Kit/apps/Isaac-Sim/exts/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG',
# )
# mGen.generate_terrain_mesh()
# return
# # asyncio.ensure_future(self.sample.init_world())
# print(' =========================== ')
# mat_path = 'http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Natural/Dirt.mdl'
# prim_path = '/World/mesh_1'
# mat = '/World/Looks/Dirt'
#
# stage = omni.usd.get_context().get_stage()
# obj_prim = stage.GetPrimAtPath(prim_path)
# mat_name = 'Dirt'
# # omni.kit.commands.execute(
# # 'CreateMdlMaterialPrimCommand',
# # mtl_url=mat_path,
# # mtl_name=f'{mat_name}',
# # mtl_path=f'/World/Looks/{mat_name}',
# # )
#
# # omni.kit.commands.execute(
# # 'CreateMdlMaterialPrimCommand',
# # mtl_url=mat_path,
# # mtl_name=f'{mat_name}',
# # mtl_path=f'/World/Looks/{mat_name}',
# # )
# #
# # # update_stage()
# # _ = omni.kit.commands.execute(
# # 'BindMaterialCommand',
# # prim_path=prim_path,
# # material_path=f'/World/Looks/{mat_name}',
# # )
# mtl_created_list = []
#
# omni.kit.commands.execute(
# 'CreateAndBindMdlMaterialFromLibrary',
# mdl_name=mat_path,
# mtl_name=mat_name,
# mtl_created_list=mtl_created_list,
# )
#
# mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
#
# omni.usd.create_material_input(
# mtl_prim,
# 'project_uvw',
# True,
# Sdf.ValueTypeNames.Bool,
# )
#
# omni.usd.create_material_input(
# mtl_prim,
# 'texture_scale',
# Gf.Vec2f(0.001, 0.001),
# Sdf.ValueTypeNames.Float2,
# )
# cube_mat_shade = UsdShade.Material(mtl_prim)
#
# UsdShade.MaterialBindingAPI(obj_prim).Bind(
# cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
# )
# return
#
# # Set material inputs, these can be determined by looking at the .mdl file
#
# # or by selecting the Shader attached to the Material in the stage window and looking at the details panel
#
# print('wait')
# await update_stage_async()
# print('continue')
# update_stage()
# while is_stage_loading():
# await update_stage_async()
#
# stage = omni.usd.get_context().get_stage()
# p = stage.GetPrimAtPath(f'{mat}/Shader')
# not_set = False
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# print('wait')
# await update_stage_async()
# print('continue')
# update_stage()
# while is_stage_loading():
# await update_stage_async()
# # while not not_set:
# # try:
# # material_attributes = p.GetAttributes()
# # p.GetAttribute('inputs:project_uvw').Set(True)
# # not_set = True
# # print("success: ", _)
# # except:
# #
# # print("failure: ", _)
# # await update_stage_async()
# #
#
# material_attributes = p.GetAttributes()
# p.GetAttribute('inputs:project_uvw').Set(True)
# p.GetAttribute('inputs:texture_scale').Set((0.001, 0.001))
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
#
# omni.kit.commands.execute(
# 'SelectPrims',
# old_selected_paths=['/World'],
# new_selected_paths=['/World/Looks/Dirt'],
# expand_in_stage=True,
# )
# def build_task_controls_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'Sensor Controls'
# frame.visible = True
#
# self.add_button_title(
# 'Attach Sys To Scene', 'Attach', self._loadtest
# )
# self.add_button_title(
# 'Init waypoints & attach', 'Attach', self._testRigWaypoint
# )
#
# # self.add_button('veloc', self._save_lidar_info_event)
# # self.task_ui_elements['veloc'].enabled = True
#
# self.add_button('sample sensors', self._on_sample_sensors)
# self.task_ui_elements['sample sensors'].enabled = True
# # self.add_string_field("test", self._empty_func)
#
# self.add_button('init_world', self.ui_init_world)
# self.task_ui_elements['init_world'].enabled = True
#
# self.add_button(
# 'load_sensors', self.test_load_sensors_from_file
# )
# self.task_ui_elements['load_sensors'].enabled = True
# # OTHER UI NEEDED
# # load sensor rig
# # ^ let the above handle waypoints and everything
#
# # self.add_button('init_semantics', self.ui_init_semantics)
# # self.task_ui_elements['init_semantics'].enabled = True
# # self.add_button('area gen test', self._empty_func)
# # self.task_ui_elements['area gen test'].enabled = True
# def build_sensor_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'Sensors'
# frame.visible = True
# self.task_ui_elements['movement_mode'] = dropdown_builder(
# items=['Waypoints', 'Manual', 'Linear'],
# on_clicked_fn=self._rebuild_update,
# )
# self.task_ui_elements['movement_speed'] = int_builder(
# 'move speed'
# )
# def add_button(self, label, on_clicked_fn):
# """Adds a button to the task frame"""
# dict = {
# 'label': label,
# 'type': 'button',
# 'text': label,
# 'tooltip': label,
# 'on_clicked_fn': on_clicked_fn,
# }
#
# self.task_ui_elements[label] = btn_builder(**dict)
# self.task_ui_elements[label].enabled = False
# async def ini(self):
# await asyncio.ensure_future(self.sample.init_world())
# self.sample.init_sensor_rig_from_file()
#
# stage = omni.usd.get_context().get_stage()
# self.sample.sr.initialize_waypoints('', stage)
# print('Attach move to callback')
# self.sample.attach_sensor_waypoint_callback(self.sample.sr)
# def _add_to_scene_event(self):
# self.sample.init_sensor_and_semantics()
#
# def _on_load_scene_button_event(self):
# self._add_to_scene_event()
# def build_worldgen_ui(self, frame):
# with frame:
# with ui.VStack(spacing=5):
# # Update the Frame Title
# frame.title = 'World Gen'
# frame.visible = True
# self.add_button('init_world', self.ui_init_world)
# self.task_ui_elements['init_world'].enabled = True
#
# self.add_button('init_semantics', self.ui_init_semantics)
# self.task_ui_elements['init_semantics'].enabled = True
# # self.add_button('area gen test', self._empty_func)
# # self.task_ui_elements['area gen test'].enabled = True
| 39,562 | Python | 35.430018 | 220 | 0.518048 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/worldUtils.py |
import open3d as o3d
import numpy as np
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import colorsys
import json
import asyncio
import numpy.typing as npt
class ObjectPrim:
def __init__(
self,
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
) -> None:
self.object_scale = scale
self.object_scale_delta = scale_delta
self.allow_y_rot = y_rot
self.unique_id = u_id
self.usd_path = usd_path
self.class_name = class_name
self.poisson_size = poisson_size
def __str__(self) -> str:
return f"""
{self.unique_id}
scale: {self.object_scale} +/- {self.object_scale_delta}
allow y rot: {self.allow_y_rot}
poisson size: {self.poisson_size}
class name: {self.class_name}
usd path: {self.usd_path}
"""
pass
class TerrainPrim:
def __init__(self, mesh_path, mat_path, scale=0.001) -> None:
self.mesh_path = mesh_path
self.material_path = mat_path
self.scale = scale
class WorldHandler:
def __init__(self, world_path, object_path) -> None:
# self.objects = []
self.objects_dict = {}
self._object_path = object_path
self._world_path = world_path
self.objects_to_spawn = {}
self._WORLD_TO_POISSON_SCALE = 1.6
def _read_objects(self):
with open(self._object_path, 'r+') as infile:
data = json.load(infile)
# print(data)
for key in data:
scale = data[key]['object_scale']
scale_delta = data[key]['object_scale_delta']
y_rot = data[key]['allow_y_rot']
u_id = key
usd_path = data[key]['usd_path']
class_name = data[key]['class_name']
poisson_size = data[key]['poisson_size']
tmp = ObjectPrim(
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
)
# self.objects.append(tmp)
self.objects_dict[u_id] = tmp
# for i in self.objects:
# print(i)
def _read_world(self):
# print("here")
self.objects_to_spawn = {}
data = None
objs_per_region = {}
with open(self._world_path, 'r+') as infile:
data = json.load(infile)
if data != None:
n = data['size']
arr = np.zeros((n, n))
total_arr = np.zeros((n, n))
regions = data['regions']
terrain_info = {}
# print( " == ", np.unique(total_arr))
for region_id in regions:
region_id = str(region_id)
terrain_info[region_id] = TerrainPrim(
'',
regions[region_id]['material_path'],
regions[region_id]['material_scale'],
)
# print("terrrain info key type ", type(region_id))
new_arr = PerlinNoise.generate_region2(
seed=int(region_id),
shape=(n, n),
threshold=float(regions[region_id]['threshold']),
show_plot=False,
region_value=int(region_id),
)
arr = append_to_area(arr, new_arr, int(region_id))
total_arr = arr
# handle objects in the zone
objs = regions[region_id]['objects']
objs_per_region[region_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[str(obj_uid)]
objs_per_region[region_id].append(object_prim)
# now we need to deal with sub zones
zones = regions[region_id]['zones']
for zone_id in zones:
terrain_info[str(zone_id)] = TerrainPrim(
'',
zones[zone_id]['material_path'],
zones[zone_id]['material_scale'],
)
new_arr = PerlinNoise.generate_region2(
seed=int(zone_id),
shape=(n, n),
threshold=float(zones[zone_id]['threshold']),
show_plot=False,
region_value=int(zone_id),
)
zone_to_save = append_inside_area(
arr, new_arr, int(zone_id)
)
# print("zone == ", zone_id, " ", zone_id)
total_arr = zone_to_save
objs = zones[zone_id]['objects']
objs_per_region[zone_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[obj_uid]
objs_per_region[zone_id].append(object_prim)
for key in objs_per_region:
obs = objs_per_region[key]
if len(obs) > 0:
for obj in obs:
area, coords = fill_area(
total_arr,
obj.poisson_size / self._WORLD_TO_POISSON_SCALE,
int(key),
999,
)
self.objects_to_spawn[obj.unique_id] = coords
return total_arr, n, terrain_info
| 6,051 | Python | 33.582857 | 76 | 0.455627 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/WorldGenerator.py | import asyncio
import random
import omni
import numpy as np
from .AreaMaskGenerator import generate_world_from_file
from omni.isaac.core.utils.semantics import get_semantics
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from pxr import Usd, Gf
from omni.isaac.core.utils.stage import (
add_reference_to_stage,
is_stage_loading,
update_stage_async,
update_stage,
)
from pxr import UsdShade, Sdf
class WorldManager:
def __init__(self) -> None:
self.__undefined_class_string = "NAN"
self.occupancy = []
self._o = "[World generator] "
def add_semantic(self, p, prim_class):
"""Adds semantic to prim"""
sem_dict = get_semantics(p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(p)
if 'Semantics' not in sem_dict:
# print(
# 'adding semantics and collider to ',
# p.GetPrimPath(),
# ' of class ',
# prim_class,
# )
sem = Semantics.SemanticsAPI.Apply(p, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(prim_class)
def __add_semantics_to_all2(self, stage):
"""Add semantic information to all prims on stage based on parent xform"""
prim_class = self.__undefined_class_string
completed_classes = []
for prim_ref in stage.Traverse():
prim_ref_name = str(prim_ref.GetPrimPath())
len_of_prim = len(prim_ref_name.split('/'))
for word in prim_ref_name.split('/'):
if 'class' in word and word not in completed_classes:
prim_class = word
# self.add_semantic(prim_ref, prim_class)
for i in range(len(prim_ref.GetChildren())):
prim_child = prim_ref.GetChildren()[i]
len_of_child = len(
str(prim_child.GetPrimPath()).split('/')
)
# print(len_of_prim, ' : ', len_of_child)
if abs(len_of_prim - len_of_child) == 1:
# print(prim_child)
self.add_semantic(prim_child, prim_class)
completed_classes.append(prim_class)
def spawn_asset(
self,
asset_path,
class_name,
prim_name,
x,
y,
z,
scale,
object_scale_delta,
allow_rot,
orign_p_name = "",
override=False,
rot = (0,0,0),
):
prim_path = '/World/' + 'class_' + class_name + '/' + prim_name
# if not override:
add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(prim_path)
# prim.GetReferences().AddReference(assetPath=asset_path, primPath=prim_path)
prim.SetInstanceable(True)
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(class_name)
# omni.kit.commands.execute('CopyPrim',
# path_from=orign_p_name,
# path_to=prim_path,
# duplicate_layers=False,
# combine_layers=False,
# exclusive_select=False,
# flatten_references=False,
# copy_to_introducing_layer=False)
# here we want to modify the scale
low_lim = scale - object_scale_delta
high_lim = scale + object_scale_delta
scale = random.uniform(low_lim, high_lim) #/ 100
random_rotation = 0
if allow_rot:
random_rotation = random.uniform(0, 360)
# omni.kit.commands.execute('CreatePayloadCommand',
# usd_context=omni.usd.get_context(),
# path_to=Sdf.Path(prim_path),
# asset_path=asset_path,
# instanceable=True)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=prim_path, # f"/World/{p_name}",
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
old_rotation_euler=Gf.Vec3f(0, 0, 0),
old_rotation_order=Gf.Vec3i(0, 1, 2),
new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=prim_path, # f"/World/{p_name}",
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
old_rotation_euler=Gf.Vec3f(0, 0, 0),
old_rotation_order=Gf.Vec3i(0, 1, 2),
new_rotation_euler=Gf.Vec3f(0, 0, random_rotation),
new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
def spawn_loop(
self,
path,
class_name,
p_name,
coll,
height_map,
scale=1,
object_scale_delta=0,
allow_rot=True,
):
for i, n in enumerate(coll):
override=False
# if i == 1:
#
# prim_path = '/World/' + 'class_' + class_name + '/' + p_name
#
# add_reference_to_stage(usd_path=path, prim_path=prim_path)
#
# override=True
x, y = n
x = float(x)
y = float(y)
mesh_scale = 10
x_ind = x * mesh_scale
y_ind = y * mesh_scale
mesh_height_modifier = 10
# if x_ind >= 2560:
# print('x, overfilled', x_ind)
# x_ind = 2559
# if y_ind >= 2560:
#
# print('y, overfilled', y_ind)
# y_ind = 2559
z = float(height_map[int(y_ind/10)][int(x_ind/10)])# / mesh_height_modifier # was abs
cc =(int(y_ind/10),int(x_ind/10) )
ind = np.ravel_multi_index(cc, (len(height_map), len(height_map)))
# print(np.asarray(self.t_normals))
poss_rot = np.asarray(self.t_normals)[ind]
# print("triangle normals")
# print(poss_rot)
# second one is iterated fasted
if self.occupancy[int(y_ind/10)][int(x_ind/10)] != 0:
# print("skipping oj spawn")
continue
self.occupancy[int(y_ind/10)][int(x_ind/10)]= 1
_p_name = f'{p_name}_{i}'
self.spawn_asset(
path,
class_name,
_p_name,
x,
y,
z,
scale,
object_scale_delta,
allow_rot,
override = override,
orign_p_name = p_name,
rot = poss_rot
)
def create_terrains(self, terrain_info):
# create the parent
omni.kit.commands.execute(
'CreatePrimWithDefaultXform',
prim_type='Xform',
prim_path='/World/t',
attributes={},
select_new_prim=True,
)
for key in terrain_info:
mesh_path = terrain_info[key].mesh_path
scale = terrain_info[key].scale
mat_path = terrain_info[key].material_path
mat_name = mat_path.split('/')[-1]
mat_name = mat_name.replace('.mdl', '')
mesh_path = mesh_path.replace('.obj', '.usd')
# spawn prim
prim_p = f'/World/t/class_{mat_name}'
# prim_p = f'/World/t/terrain{key}'
stage = omni.usd.get_context().get_stage()
scale = 1#0.01
# X SCALE SHOULD BE NEGATIVE TO FLIP IT CORRECTLY
random_rotation = 0.0
x, y, z = 0, 0, 0
add_reference_to_stage(usd_path=mesh_path, prim_path=prim_p)
self.create_material_and_bind(
mat_name, mat_path, prim_p, scale, stage
)
prim=stage.GetPrimAtPath(prim_p)
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
sem = Semantics.SemanticsAPI.Apply(prim, 'Semantics')
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set('class')
sem.GetSemanticDataAttr().Set(mat_name)
scale = 1#0.1
random_rotation = 0.0
x, y, z = 0, 0, 0
# stage = self.usd_context.get_stage()
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=f'/World/t',
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
omni.kit.commands.execute(
'TransformPrimSRTCommand',
path=f'/World/t',
old_scale=Gf.Vec3f(1.0, 1.0, 1.0),
new_scale=Gf.Vec3f(scale, scale, scale),
old_translation=Gf.Vec3f(x, y, z),
new_translation=Gf.Vec3f(x, y, z),
# old_rotation_euler=Gf.Vec3f(-90, 0, 0),
# old_rotation_order=Gf.Vec3i(0, 1, 2),
# new_rotation_euler=Gf.Vec3f(-90, 0, -180),
# new_rotation_order=Gf.Vec3i(0, 1, 2),
time_code=Usd.TimeCode(),
had_transform_at_key=False,
)
def spawn_all(self, obs_to_spawn, object_dict, height_map, normals):
self.t_normals = normals
length = len(obs_to_spawn)
counter = 1
for key in obs_to_spawn:
obj = object_dict[key]
path = object_dict[key].usd_path
print(f"{self._o} Spawning {len(obs_to_spawn[key])} of {path}. {counter} / {length}")
class_name = obj.class_name
if class_name == '':
class_name = obj.unique_id
self.spawn_loop(
path,
class_name,
f'{obj.unique_id}_',
obs_to_spawn[key],
height_map,
scale=obj.object_scale,
object_scale_delta=obj.object_scale_delta,
allow_rot=obj.allow_y_rot,
)
print("spawned, now we wait till stage loads")
update_stage()
# print("some time should have passed")
# return
counter += 1
def generate_world_generator(self, obj_path, world_path):
print("Tring to generator worldf rom file")
(
obs_to_spawn,
object_dict,
terrain_info,
meshGen,
) =generate_world_from_file(obj_path, world_path)
height_map = meshGen._points2
self.occupancy = np.zeros((len(height_map),len(height_map)))
self.create_terrains(terrain_info)
meshGen.clean_up_files()
return obs_to_spawn, object_dict, height_map, meshGen.normals
def create_material_and_bind(
self, mat_name, mat_path, prim_path, scale, stage
):
obj_prim = stage.GetPrimAtPath(prim_path)
mtl_created_list = []
omni.kit.commands.execute(
'CreateAndBindMdlMaterialFromLibrary',
mdl_name=mat_path,
mtl_name=mat_name,
mtl_created_list=mtl_created_list,
)
mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
omni.usd.create_material_input(
mtl_prim,
'project_uvw',
True,
Sdf.ValueTypeNames.Bool,
)
omni.usd.create_material_input(
mtl_prim,
'texture_scale',
Gf.Vec2f(scale, scale),
Sdf.ValueTypeNames.Float2,
)
cube_mat_shade = UsdShade.Material(mtl_prim)
UsdShade.MaterialBindingAPI(obj_prim).Bind(
cube_mat_shade, UsdShade.Tokens.strongerThanDescendants
)
def create_world(self, world_path, obj_path):
(
obs_to_spawn,
object_dict,
height_map,
normals
) =self.generate_world_generator(
world_path, obj_path
)
self.spawn_all(obs_to_spawn, object_dict, height_map, normals)
update_stage()
stage = omni.usd.get_context().get_stage()
self.__add_semantics_to_all2(stage)
| 13,308 | Python | 32.523929 | 99 | 0.514953 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/PerlinNoise.py | import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from perlin_numpy import (
generate_perlin_noise_2d, generate_fractal_noise_2d
)
def generate_region(threshold=0.5, shape=(256,256), region_value=1, show_plot=False) -> npt.NDArray[np.float64]:
# np.random.seed(0)
data = generate_perlin_noise_2d(shape, (8, 8))
data = (data-np.min(data))/(np.max(data)-np.min(data))
data[data < threshold] = 0
data[data >= threshold] = region_value
if show_plot:
plt.imshow(data, cmap='gray', interpolation='lanczos')
plt.colorbar()
plt.show()
return data
def generate_region2(seed = 1, threshold=0.5, shape=(256,256), region_value=1, show_plot=False) -> npt.NDArray[np.float64]:
np.random.seed(seed)
data = generate_perlin_noise_2d(shape, (8, 8))
data = (data-np.min(data))/(np.max(data)-np.min(data))
data[data < threshold] = 0
data[data >= threshold] = region_value
if show_plot:
plt.imshow(data, cmap='gray', interpolation='lanczos')
plt.colorbar()
plt.show()
return data
if __name__ == "__main__":
np.random.seed(0)
generate_region(show_plot=True)
| 1,189 | Python | 28.02439 | 123 | 0.640875 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
#from .AreaMaskGenerator import *
#from .PerlinNoise import *
#from .PoissonDisk import *
#from .world import *
#from .worldUtils import *
| 318 | Python | 18.937499 | 36 | 0.694969 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/PoissonDisk.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def Bridson_sampling(width=1.0, height=1.0, radius=0.025, k=30):
# References: Fast Poisson Disk Sampling in Arbitrary Dimensions
# Robert Bridson, SIGGRAPH, 2007
def squared_distance(p0, p1):
return (p0[0]-p1[0])**2 + (p0[1]-p1[1])**2
def random_point_around(p, k=1):
# WARNING: This is not uniform around p but we can live with it
R = np.random.uniform(radius, 2*radius, k)
T = np.random.uniform(0, 2*np.pi, k)
P = np.empty((k, 2))
P[:, 0] = p[0]+R*np.sin(T)
P[:, 1] = p[1]+R*np.cos(T)
return P
def in_limits(p):
return 0 <= p[0] < width and 0 <= p[1] < height
def neighborhood(shape, index, n=2):
row, col = index
row0, row1 = max(row-n, 0), min(row+n+1, shape[0])
col0, col1 = max(col-n, 0), min(col+n+1, shape[1])
I = np.dstack(np.mgrid[row0:row1, col0:col1])
I = I.reshape(I.size//2, 2).tolist()
I.remove([row, col])
return I
def in_neighborhood(p):
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
if M[i, j]:
return True
for (i, j) in N[(i, j)]:
if M[i, j] and squared_distance(p, P[i, j]) < squared_radius:
return True
return False
def add_point(p):
points.append(p)
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
P[i, j], M[i, j] = p, True
# Here `2` corresponds to the number of dimension
cellsize = radius/np.sqrt(2)
rows = int(np.ceil(width/cellsize))
cols = int(np.ceil(height/cellsize))
# Squared radius because we'll compare squared distance
squared_radius = radius*radius
# Positions cells
P = np.zeros((rows, cols, 2), dtype=np.float32)
M = np.zeros((rows, cols), dtype=bool)
# Cache generation for neighborhood
N = {}
for i in range(rows):
for j in range(cols):
N[(i, j)] = neighborhood(M.shape, (i, j), 2)
points = []
add_point((np.random.uniform(width), np.random.uniform(height)))
while len(points):
i = np.random.randint(len(points))
p = points[i]
del points[i]
Q = random_point_around(p, k)
for q in Q:
if in_limits(q) and not in_neighborhood(q):
add_point(q)
points = P[M]
return points
if __name__ == '__main__':
plt.figure()
plt.subplot(1, 1, 1, aspect=1)
points = Bridson_sampling()
X = [x for (x, y) in points]
Y = [y for (x, y) in points]
plt.scatter(X, Y, s=10)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.show()
| 2,673 | Python | 29.044943 | 73 | 0.546951 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/MeshGenerator.py | import open3d as o3d
import numpy as np
import os
from perlin_numpy import generate_perlin_noise_2d, generate_fractal_noise_2d
from sklearn.preprocessing import normalize
from perlin_noise import PerlinNoise
import matplotlib.pyplot as plt
import cv2
import colorsys
import asyncio
import omni.kit.asset_converter
import carb
# 0.001
# enable project uvw coordinates
class MeshGen:
def __init__(self, map_size, map_scale, regions_map, save_path) -> None:
pass
self._size = map_size
self._scale = map_scale
self._scale = 1
# REMOVE THIS NEXT LINE``
l = self._size * self._scale
self._map_shape = (self._size * self._scale, self._size * self._scale)
self._points = np.zeros(shape=(l * l, 3))
self._points2 = np.zeros(shape=(l , l))
self._noise_map_xy = None
self._faces = []
self._mesh = None
self._regions_map = cv2.resize(
regions_map,
dsize=(self._size * self._scale, self._size * self._scale),
interpolation=cv2.INTER_NEAREST,
)
self._save_path = save_path
self.meshes = []
self._o = '[MeshGenerator] '
self._files_to_clean = []
self.final_mesh_paths = []
self.final_mesh_paths_dict = {}
self.region_to_path = {}
async def convert(self, in_file, out_file, load_materials=False):
# This import causes conflicts when global
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.ignore_materials = not load_materials
converter_context.ignore_animation = True
converter_context.ignore_cameras = True
converter_context.single_mesh = True
converter_context.smooth_normals = True
# converter_context.preview_surface = False
# converter_context.support_point_instancer = False
# converter_context.embed_mdl_in_usd = False
converter_context.use_meter_as_world_unit = True
# converter_context.create_world_as_default_root_prim = False
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(
in_file, out_file, progress_callback, converter_context
)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
def cnv(self):
print(f'{self._o} Converting .obj files to .usd')
for file_path in self._files_to_clean:
new_path = file_path.replace('.obj', '.usd')
self.final_mesh_paths.append(new_path)
print(f'{self._o} Trying to convert {file_path} to {new_path}')
status = asyncio.get_event_loop().run_until_complete(
self.convert(file_path, new_path)
)
def generate_terrain_mesh(self):
self._create_noise_map()
self._compute_base_mesh()
self._save_meshes()
self.cnv()
def clean_up_files(self):
def file_exists(file_path):
return os.path.exists(file_path)
for file_path in self._files_to_clean:
if file_exists(file_path):
os.remove(file_path)
def _save_meshes(self):
print(f'{self._o} Saving meshes to folder {self._save_path}.')
for i, key in enumerate(list(self.meshes_dict.keys())):
self._files_to_clean.append(f'{self._save_path}/mesh_{i}.obj')
self._files_to_clean.append(f'{self._save_path}/mesh_{i}.usd')
self.final_mesh_paths_dict[key] = f'{self._save_path}/mesh_{i}.obj'
o3d.io.write_triangle_mesh(
filename=f'{self._save_path}/mesh_{i}.obj',
mesh=self.meshes_dict[int(key)],
compressed=False,
write_vertex_normals=True,
# write_vertex_colors=True,
# write_triangle_uvs=True,
print_progress=False,
)
def _create_noise_map(self):
scale = 5#250.0
print(f'{self._o} Creating Noise Map for terrain heights.')
# self._noise_map_xy = generate_fractal_noise_2d(
# self._map_shape, (8, 8), 5
# )
self._noise_map_xy = generate_perlin_noise_2d(
self._map_shape, (8, 8)
)
x = np.linspace(
0,
self._size * self._scale,
self._size * self._scale,
dtype=np.int32,
)
y = np.linspace(
0,
self._size * self._scale,
self._size * self._scale,
dtype=np.int32,
)
self._noise_map_xy *= scale
noise_flat = self._noise_map_xy.flatten()
X, Y = np.meshgrid(x, y)
self._points = np.column_stack(
(X.ravel(),Y.ravel(), noise_flat) # was abs::with
)
def _compute_base_mesh(self):
subdivisions = (self._size * self._scale) - 1
materials = list(np.unique(self._regions_map))
print(f"There are {len(materials)}, {materials}")
self.meshes_dict = {}
for key in materials:
self.meshes_dict[int(key)] = o3d.geometry.TriangleMesh()
print(f'{self._o} Computing the base mesh.')
self._faces = []
for j in range(subdivisions):
for i in range(subdivisions):
index = j * (subdivisions + 1) + i
face1 = [index, index + 1, index + subdivisions + 2]
face2 = [
index,
index + subdivisions + 2,
index + subdivisions + 1,
]
self._faces.append(face1)
self._faces.append(face2)
res_ind = int(self._regions_map[j,i])
self.meshes_dict[res_ind].triangles.append(face1)
self.meshes_dict[res_ind].triangles.append(face2)
self._mesh = o3d.geometry.TriangleMesh()
self._mesh.vertices = o3d.utility.Vector3dVector(self._points)
self._mesh.triangles = o3d.utility.Vector3iVector(
np.array(self._faces)
)
self._mesh.paint_uniform_color([1, 0.706, 0])
self._mesh.compute_vertex_normals()
self._mesh = self._mesh.compute_vertex_normals()
self._mesh = self._mesh.remove_unreferenced_vertices()
self._mesh = self._mesh.remove_duplicated_vertices()
self.normals = self._mesh.triangle_normals
l = self._scale * self._size
for i in range(len(self._mesh.vertices)):
ind = np.unravel_index(i, (l, l))
self._points2[ind] = self._mesh.vertices[i][2]
N = len(materials)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
for i, key in enumerate(list(self.meshes_dict.keys())):
self.meshes_dict[key].vertices = self._mesh.vertices
self.meshes_dict[key].vertex_normals = self._mesh.vertex_normals
self.meshes_dict[key] = self.meshes_dict[
key
].remove_unreferenced_vertices()
self.meshes_dict[key].paint_uniform_color(RGB_tuples[i])
self.meshes_dict[key] = self.meshes_dict[
key
].compute_vertex_normals()
self.meshes_dict[key] = self.meshes_dict[
key
].compute_triangle_normals()
print(np.array(self.meshes_dict[key].triangle_normals))
| 7,745 | Python | 33.735426 | 79 | 0.559458 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG/AreaMaskGenerator.py | """
This module handles area and point generation.
"""
from .MeshGenerator import MeshGen
# import omni.kit.commands
import json
import numpy as np
import numpy.typing as npt
import tempfile
from . import PoissonDisk
import matplotlib.colors
from . import PerlinNoise
import matplotlib.pyplot as plt
from typing import Tuple
from pxr import Usd, Sdf, Gf
def append_inside_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a new mask that is only within the first mask
"""
mask_indices = np.where((area_to_add >= area_value) & (area != 0))
area2 = np.copy(area)
area2[mask_indices] = area_value # area_value
return area2
def append_to_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a mask appended to another one
"""
mask_indices = np.where(area_to_add >= area_value)
area[mask_indices] = area_value
return area
def show_plot(area):
cvals = [0, 1, 2, 3, 4]
colors = ['lightgreen', 'green', 'yellow', 'brown', 'red']
norm = plt.Normalize(min(cvals), max(cvals))
tuples = list(zip(map(norm, cvals), colors))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', tuples)
plt.imshow(area, cmap=cmap, norm=norm)
plt.colorbar()
plt.show()
def fill_area(
area: npt.NDArray[np.float64],
size: int,
region_value: int,
object_value: int,
) -> Tuple[npt.NDArray[np.float64], list]:
# Generate points and fill the area with objects using Poisson
points = PoissonDisk.Bridson_sampling(
width=area.shape[0], height=area.shape[1], radius=size, k=30
)
new_points = []
for p in points:
x_int = int(p[0])
y_int = int(p[1])
if area[y_int][x_int] == region_value:
# area[y_int][x_int] = object_value
new_points.append(p)
return area, new_points
class ObjectPrim:
def __init__(
self,
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
) -> None:
self.object_scale = scale
self.object_scale_delta = scale_delta
self.allow_y_rot = y_rot
self.unique_id = u_id
self.usd_path = usd_path
self.class_name = class_name
self.poisson_size = poisson_size
def __str__(self) -> str:
return f"""
{self.unique_id}
scale: {self.object_scale} +/- {self.object_scale_delta}
allow y rot: {self.allow_y_rot}
poisson size: {self.poisson_size}
class name: {self.class_name}
usd path: {self.usd_path}
"""
pass
class TerrainPrim:
def __init__(self, mesh_path, mat_path, scale=0.001) -> None:
self.mesh_path = mesh_path
self.material_path = mat_path
self.scale = scale
class WorldHandler:
def __init__(self, world_path, object_path) -> None:
# self.objects = []
self.objects_dict = {}
self._object_path = object_path
self._world_path = world_path
self.objects_to_spawn = {}
self._WORLD_TO_POISSON_SCALE = 1.6
def _read_objects(self):
with open(self._object_path, 'r+') as infile:
data = json.load(infile)
# print(data)
for key in data:
scale = data[key]['object_scale']
scale_delta = data[key]['object_scale_delta']
y_rot = data[key]['allow_y_rot']
u_id = key
usd_path = data[key]['usd_path']
class_name = data[key]['class_name']
poisson_size = data[key]['poisson_size']
tmp = ObjectPrim(
scale,
scale_delta,
y_rot,
u_id,
usd_path,
class_name,
poisson_size,
)
# self.objects.append(tmp)
self.objects_dict[u_id] = tmp
# for i in self.objects:
# print(i)
def _read_world(self):
# print("here")
self.objects_to_spawn = {}
data = None
objs_per_region = {}
with open(self._world_path, 'r+') as infile:
data = json.load(infile)
if data != None:
n = data['size']
arr = np.zeros((n, n))
total_arr = np.zeros((n, n))
regions = data['regions']
terrain_info = {}
# print( " == ", np.unique(total_arr))
for region_id in regions:
region_id = str(region_id)
terrain_info[region_id] = TerrainPrim(
'',
regions[region_id]['material_path'],
regions[region_id]['material_scale'],
)
# print("terrrain info key type ", type(region_id))
new_arr = PerlinNoise.generate_region2(
seed=int(region_id),
shape=(n, n),
threshold=float(regions[region_id]['threshold']),
show_plot=False,
region_value=int(region_id),
)
arr = append_to_area(arr, new_arr, int(region_id))
total_arr = arr
# handle objects in the zone
objs = regions[region_id]['objects']
objs_per_region[region_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[str(obj_uid)]
objs_per_region[region_id].append(object_prim)
# now we need to deal with sub zones
zones = regions[region_id]['zones']
for zone_id in zones:
terrain_info[str(zone_id)] = TerrainPrim(
'',
zones[zone_id]['material_path'],
zones[zone_id]['material_scale'],
)
new_arr = PerlinNoise.generate_region2(
seed=int(zone_id),
shape=(n, n),
threshold=float(zones[zone_id]['threshold']),
show_plot=False,
region_value=int(zone_id),
)
zone_to_save = append_inside_area(
arr, new_arr, int(zone_id)
)
# print("zone == ", zone_id, " ", zone_id)
total_arr = zone_to_save
objs = zones[zone_id]['objects']
objs_per_region[zone_id] = []
if len(objs) > 0:
for obj_uid in objs:
# get corresponding object from objects
object_prim = self.objects_dict[obj_uid]
objs_per_region[zone_id].append(object_prim)
for key in objs_per_region:
obs = objs_per_region[key]
if len(obs) > 0:
for obj in obs:
print(f"{key} has poisson of size {obj.poisson_size} which ends up being {obj.poisson_size / self._WORLD_TO_POISSON_SCALE}")
area, coords = fill_area(
total_arr,
obj.poisson_size / self._WORLD_TO_POISSON_SCALE,
int(key),
999,
)
self.objects_to_spawn[obj.unique_id] = coords
return total_arr, n, terrain_info
def generate_world_from_file(world_path, object_path):
print("creating world handler")
world = WorldHandler(world_path, object_path)
print("reading objects")
world._read_objects()
print("reading world")
res = world._read_world()
mesh_scale = 10
terrain_mesh_paths = []
if res:
region_map, map_size, terrain_info = res
# print(" ------- ")
# print(map_size, 10, region_map.shape)
# print(set(region_map.flatten()))
# unique, counts = np.unique(region_map, return_counts=True)
# print(dict(zip(unique, counts)))
# return None
m_path = tempfile.gettempdir()#'C:/Users/jonem/Documents/Kit/apps/Isaac-Sim/exts/IsaacSyntheticPerception/com/SyntheticPerception/app/PCG'
meshGen = MeshGen(map_size, mesh_scale, region_map, m_path)
meshGen.generate_terrain_mesh()
regs = list(np.unique(region_map))
for key in terrain_info:
print(key)
if float(key) in regs:
terrain_info[
str(key)
].mesh_path = meshGen.final_mesh_paths_dict[int(key)]
print(
f'[AreaMaskGenerator] All terrain infos updated. Passing data back to main sample to genereate objects and load the terrain in.'
)
return (
world.objects_to_spawn,
world.objects_dict,
terrain_info,
meshGen,
) # ._points2#_noise_map_xy
return world.objects_to_spawn, world.objects_dict, None, None
| 9,469 | Python | 31.655172 | 148 | 0.50829 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/DataFormatter.py | import numpy as np
import open3d as o3d
import numpy as np
import glob
import random
from tqdm import tqdm
id = 100
from pathlib import Path
import sys
def vis_pc(pc):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
o3d.visualization.draw_geometries([pcd])
distance = 200
data_path = "/home/jon/Desktop/SparseNewDS"
folder_path = f"{data_path}/velodyne/"
core_path="/home/jon/Documents/Datasets/SparseFinal/sequences/"
def fix_orb_around_pc(data , distance):
# print(" ================= ")
start = np.array([0.0,0.0,0.0])
points =np.concatenate(data,axis=0)
# print(f"Points before: {len(points)}")
new_arr = []
indexes_to_remove = []
for i,point in enumerate(points):
dist = np.linalg.norm(start - point)
# print(dist)
if dist<distance:
new_arr.append(point)
else:
indexes_to_remove.append(i)
# print(f"Points after: {len(new_arr)}")
limit = 4096*5
if len(new_arr) < limit:
print("array too small")
return np.array(new_arr), indexes_to_remove
# load mappings
# map_dict = {}
# print(map_dict)
# sys.exit()
#
# {1: 'Asphalt', 3: 'Carpet_Beige', 2: 'Carpet_Pattern_Squares_Multi', 4: 'ladder', 5: 'sofa', 6: 'table', 7: 'tree'}
class_names = {
0:"unlabled",
1:"ground",
2:"tree",
3:"vegetation",
4:"ladder",
5:"sofa",
6:"table",
7:"bicycle",
8:"pole",
9:"fence",
}
class_to_id_remap= {
"Grass_Countryside": 1,
"Leaves" : 2,
"Carpet_Pattern_Squares_Multi":1,
"tree":2,
"vegetation":3,
"Asphalt": 1,
"Carpet_Beige":1,
"ladder":4,
"sofa":5,
"table":6,
"bicycle":7,
"fence":9,
"pole":8,
"sign":8,
}
computed_remap = {}
mappings = np.load(f"{data_path}/mapping.npy", allow_pickle=True)
print(mappings)
# print(np.unique(mappings,axis=2))
unique_dict = {}
for row in mappings:
unique_dict[row[3]] = row[2]
print(unique_dict)
# sys.exit()
for tup in mappings:
current_val = tup[2]
class_name = tup[3]
real_class_val = class_to_id_remap[class_name]
computed_remap[current_val] = real_class_val
print("Computed remap")
print(computed_remap)
mapping = computed_remap
# {3: 'Grass_Countryside', 1: 'Leaves', 2: 'Carpet_Pattern_Squares_Multi', 4: 'tree', 5: 'vegetation'}
Path(core_path+"00/velodyne").mkdir(parents=True, exist_ok=True)
Path(core_path+"00/labels").mkdir(parents=True, exist_ok=True)
txtfiles = glob.glob(f"{folder_path}/*.npy")
txtfiles = sorted(txtfiles)
num_files = len(txtfiles)
num_seq = 8
num_files_per_seq = int(num_files/num_seq)
seq_id = 0
seq_id_addresses = []
count = 0
pcs_removed = 0
for seq_id in range(num_seq):
Path(core_path+f"{seq_id:02d}/velodyne/").mkdir(parents=True, exist_ok=True)
Path(core_path+f"{seq_id:02d}/labels/").mkdir(parents=True, exist_ok=True)
seq_id_addresses.append(0)
for file in tqdm(txtfiles):
id_name = file.split("/")[-1]
data = np.load(file)
# print(data, len(data))
if len(data) == 0:
# print("data too small")
pcs_removed +=1
continue
# now handle the labels
labels = np.load(f"{data_path}/velodyneLabels/{id_name}")
labels = np.concatenate(labels,axis=0)
if len(labels) == 0:
continue
k = np.array(list(mapping.keys()))
v = np.array(list(mapping.values()))
out= np.zeros_like(labels)
for key,val in zip(k,v):
out[labels==key] = val
labels = out
original_pc, inds_to_remove = fix_orb_around_pc(data, distance)
print(original_pc)
print(original_pc.shape)
# vis_pc(original_pc)
labels = np.delete(labels, inds_to_remove)
mu, sigma = 0, 0.1
noise = np.random.normal(mu, sigma, [original_pc.shape[0],original_pc.shape[1]])
noisified_pc = original_pc + noise
# vis_pc(noisified_pc)
limit = 4096*5
if noisified_pc.shape[0] <= limit:
pcs_removed += 1
continue
# print(noisified_pc)
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(noisified_pc)
# o3d.visualization.draw_geometries([pcd])
seq_id +=1
if seq_id >= num_seq:
seq_id = 0
# sys.exit()
"""
if count >= num_files_per_seq:
seq_id+=1
count = 0
Path(core_path+f"{seq_id:02d}/velodyne/").mkdir(parents=True, exist_ok=True)
Path(core_path+f"{seq_id:02d}/labels/").mkdir(parents=True, exist_ok=True)
"""
id_name = count
id_name = seq_id_addresses[seq_id]
seq_id_addresses[seq_id]+=1
np.save(f"{core_path}{seq_id:02d}/velodyne/{id_name}",noisified_pc)
np.save(f"{core_path}{seq_id:02d}/labels/{id_name}",labels)
count +=1
print(f"removed {pcs_removed} files")
print(f"kept {count}")
"""
re_map_natural = {0:0,
1:2, # leaves to tree
2:1, #carpet to ground
3:1, #grass ground to ground
4:2, #tree to tree
5:3 # veg to veg
}
re_map_manmade = {
1:1, # asphalt to ground
2:1, # carpet to ground
3:1,#carpet to ground
4:4,# ladder to ladder
5:5,# sofa to sofa
6:6,#Table to table
7:2,# tree to tree
}
mapping= re_map_natural
mapping = re_map_manmade
"""
| 5,345 | Python | 25.73 | 117 | 0.592891 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
| 178 | Python | 16.899998 | 36 | 0.657303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/vis_results.py |
import pptk
from os.path import join
import numpy as np
import os, argparse, pickle
import open3d as o3d
import yaml
from os.path import exists, join, isfile, dirname, abspath
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import sys
np.set_printoptions(threshold=sys.maxsize)
# from main_SemanticKITTI import SemanticKITTI
import sys
def rgba2rgb( rgba, background=(255,255,255) ):
row, col, ch = rgba.shape
if ch == 3:
return rgba
assert ch == 4, 'RGBA image has 4 channels.'
rgb = np.zeros( (row, col, 3), dtype='float32' )
r, g, b, a = rgba[:,:,0], rgba[:,:,1], rgba[:,:,2], rgba[:,:,3]
a = np.asarray( a, dtype='float32' ) / 255.0
R, G, B = background
rgb[:,:,0] = r * a + (1.0 - a) * R
rgb[:,:,1] = g * a + (1.0 - a) * G
rgb[:,:,2] = b * a + (1.0 - a) * B
return np.asarray( rgb, dtype='uint8' )
def process_clouds(pc, cols):
print("begin proc")
pc_out = []
cols_out = []
for x in range(len(pc)):
for y in range(len(pc[x])):
pc_out.append(pc[x][y])
cols_out.append((cols[x][y][0]/255,cols[x][y][1]/255,cols[x][y][2]/255))
return np.array(pc_out), np.array(cols_out)
def process_clouds2(pc, cols):
print("begin proc")
pc_out = []
cols_out = []
for x in range(len(pc)):
for y in range(len(pc[x])):
pc_out.append(pc[x][y])
cols_out.append(cols[x][y])
return np.array(pc_out), np.array(cols_out)
def lin_col_to_norm(cols):
print("linear: ", cols.shape[0])
new_cols = []
for i in range(0,cols.shape[0],4):
new_cols.append((cols[i]/255,cols[i+1]/255,cols[i+2]/255))
return np.array(new_cols)
if __name__ == '__main__':
COLOR_MAP = {
0: (0, 0, 0),
1: (245, 150, 100),
2: (245, 230, 100),
3: (150, 60, 30),
4: (180, 30, 80),
5: (255, 0., 0),
6: (30, 30, 255),
7: (200, 40, 255),
8: (90, 30, 150),
9: (255, 0, 255),
10: (255, 150, 255),
11: (75, 0, 75),
12: (75, 0., 175),
13: (0, 200, 255),
14: (50, 120, 255),
15: (0, 175, 0),
16: (0, 60, 135),
17: (80, 240, 150),
18: (150, 240, 255),
19: (0, 0, 255),
}
for label in COLOR_MAP:
COLOR_MAP[label] = tuple(val/255 for val in COLOR_MAP[label])
seq_id = sys.argv[1:][0]
file_id = sys.argv[1:][1]
full_pc_path = f"{seq_id}/full_pointcloud/{file_id}.npy"
colour_path = f"{seq_id}/rgb/{file_id}.png"
full_pc_path = "_pc.npy"
colour_path = "_sem.npy"
im = np.load(colour_path, allow_pickle=True)
full_pc = np.load(full_pc_path,allow_pickle=True)
full_pc, im = process_clouds2(full_pc, im)
pcd = o3d.geometry.PointCloud()
colors = [COLOR_MAP[clr] for clr in im]
pcd.points = o3d.utility.Vector3dVector(full_pc)
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([pcd])
sys.exit()
| 3,018 | Python | 26.953703 | 84 | 0.541087 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/main.py | from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
print('starting')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# plot function is created for
# plotting the graph in
# tkinterim window
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from PCG import AreaMaskGenerator
from PCG import PerlinNoise
def plot():
# the figure that will contain the plot
fig = Figure(figsize=(5, 5), dpi=100)
# adding the subplot
plot1 = fig.add_subplot(111)
n = 256
forrest_region = PerlinNoise.generate_region(
shape=(n, n), threshold=0.5, show_plot=False
)
# plotting the graph
plot1.imshow(forrest_region)
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(fig, master=right_frame)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack() # grid(row=0,column=0, padx=5, pady=5)
# placing the toolbar on the Tkinter window
# canvas.get_tk_widget().pack() # grid(row=0,column=0, padx=5, pady=5)
# Creating Toolbar using Matplotlib
toolbar = NavigationToolbar2Tk(canvas, right_frame)
toolbar.update()
canvas.get_tk_widget().pack()
# button that displays the plot
# plot_button = Button(master = window,
# command = plot,
# height = 2,
# width = 10,
# text = "Plot")
# place the button
# in main window
# plot_button.pack()
def draw_main_menu(m_window):
load_create_base_window_button = Button(
master=m_window,
command=plot,
height=2,
width=18,
text='Create Base Grid',
)
load_create_base_window_button.grid(row=0, column=0, padx=5, pady=5)
# the main Tkinter window
window = Tk()
root = window
# setting the title
window.title('Main Menu')
# dimensions of the main window
window.geometry('800x800')
left_frame = Frame(root, width=200, height=400, bg='grey')
left_frame.grid(row=0, column=0, padx=10, pady=5)
right_frame = Frame(root, width=650, height=400, bg='grey')
right_frame.grid(row=0, column=1, padx=10, pady=5)
draw_main_menu(window)
# run the gui
window.mainloop()
| 2,416 | Python | 22.930693 | 75 | 0.66846 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
| 178 | Python | 16.899998 | 36 | 0.657303 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/test.py | import tkinter as tk
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
print('starting')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# plot function is created for
# plotting the graph in
# tkinterim window
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from PCG import AreaMaskGenerator
from PCG import PerlinNoise
from PCG.AreaMaskGenerator import ObjectPrim, WorldHandler
# def load_objects():
# # Code for the "Load Objects" page
# print('Load Objects page')
#
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
import numpy as np
from matplotlib import colors
cbar = None
import json
import tkinter.ttk as ttk
from tkinter.filedialog import askopenfilename, askdirectory
class EntryInfo:
def __init__(self, name, threshold):
self.name = name
self.threshold = threshold
self.identifier = None
self.color = None
self.in_region = None
self.objects_in_zone = []
self.is_region = True
self.material_path = None
self.material_scale = None
def get_objs_as_str(self):
return ''.join(self.objects_in_zone)
worldHandler = WorldHandler(',', '')
def load_objects():
# Code for the "Load Objects" page
print('Load Objects page')
filename = askopenfilename()
worldHandler._object_path = filename
worldHandler._read_objects()
def create_regions():
# Code for the "Create Regions" page
print('Create Regions page')
# Create a new window for the "Create Regions" page
regions_window = tk.Toplevel()
regions_window.title('Create Regions')
# First column: List of entries with delete buttons
entries_frame = tk.Frame(regions_window)
entries_frame.grid(row=0, column=0, padx=10, pady=10, sticky='nsew')
yscrollbar = Scrollbar(entries_frame)
yscrollbar.pack(side=RIGHT, fill=Y)
entries_label = tk.Label(entries_frame, text='Entries:')
entries_label.pack()
options = ['Option 1', 'Option 2', 'Option 3', 'Option 4', 'Option 5']
options = [str(i) for i in range(100)]
selected_items = []
# List to store entry objects
entry_list = []
def write_data():
global n
data = {}
for entry in entry_list:
if entry.is_region:
data[entry.identifier] = {}
data[entry.identifier]['objects'] = entry.objects_in_zone
data[entry.identifier]['zones'] = {}
data[entry.identifier]['threshold'] = entry.threshold
data[entry.identifier]['material_path'] = entry.material_path
data[entry.identifier]['material_scale'] =entry.material_scale
else:
# we are in a zone - get the region we are in
id = int(entry.in_zone)
print(id)
# if not data[id]["zones"][entry.identifier]:
if not id in data.keys():
data[id]['zones'][entry.identifier] = {}
if not entry.identifier in data[id]['zones'].keys():
data[id]['zones'][entry.identifier] = {}
data[id]['zones'][entry.identifier][
'objects'
] = entry.objects_in_zone
data[id]['zones'][entry.identifier][
'threshold'
] = entry.threshold
data[id]['zones'][entry.identifier]['material_path'] = entry.material_path
data[id]['zones'][entry.identifier]['material_scale'] =entry.material_scale
# json.dump(data)
full_data = {}
full_data['seed'] = 0
full_data['regions'] = data
full_data['size'] = n
folder_path = askdirectory()
with open(f'{folder_path}/worlddata2.json', 'w') as f:
json.dump(full_data, f)
print(full_data)
# Function to delete an entry from the list
def delete_entry(entry, index):
entry.destroy()
entry_list.pop(index)
update_identifiers()
update_plot()
def update_identifiers():
for i, entry_info in enumerate(entry_list):
entry_info.identifier = i + 1
def add_entry():
name = input_entry1.get()
threshold = input_entry2.get()
parent_zone = input_entry3.get()
mat_path = input_entry_mat_path.get()
mat_scale = input_entry_mat_scale.get()
if name and threshold:
entry_frame = tk.Frame(entries_frame)
entry_frame.pack(anchor='w')
entry_info = EntryInfo(name, threshold)
entry_info.material_path = mat_path
entry_info.material_scale = mat_scale
for i in listbx.curselection():
entry_info.objects_in_zone.append(listbx.get(i))
entry_info.identifier = len(entry_list) #+ 1
id = entry_info.identifier
entry_info.color = generate_random_color()
if parent_zone != '':
entry_info.in_zone = parent_zone
entry_info.is_region = False
else:
entry_info.in_zone = 0
parent_zone = 0
entry_list.append(entry_info)
entry_label = tk.Label(
entry_frame,
text=f'ID: {id}, Name: {name}, Threshold: {threshold}, parent zone: {parent_zone}, objects: {entry_info.get_objs_as_str()}',
fg=entry_info.color,
)
entry_label.pack(side='left')
delete_button = tk.Button(
entry_frame,
text='Delete',
command=lambda entry=entry_frame, index=len(
entry_list
) - 1: delete_entry(entry, index),
)
delete_button.pack(side='left')
# entries_listbox.insert(
# tk.END, f'Name: {name}, Threshold: {threshold}'
# )
input_entry1.delete(0, tk.END)
input_entry2.delete(0, tk.END)
input_entry3.delete(0, tk.END)
update_plot()
def update_plot():
# fig.clear()
global cbar
global n
cbar.remove()
ax.clear()
arr = np.zeros((n, n))
past_id = 0
for entry in entry_list:
print(
'identigier ',
entry.identifier,
' in int form ',
int(entry.identifier),
)
print('base array')
print(arr)
# check the parent zone. if it is not 0 we need to generate it inside this zone
# we want to keep both tho.
# the inside zone one must not completely overwite the parent REGION
# in this case we dont add it to the main array we just perfrom the calculation and save it
new_arr = PerlinNoise.generate_region2(
seed=int(entry.identifier),
shape=(n, n),
threshold=float(entry.threshold),
show_plot=False,
region_value=int(entry.identifier),
)
print('new array')
print(new_arr)
# This zone will be saved and used later
if entry.in_zone != 0:
zone_to_save = AreaMaskGenerator.append_inside_area(
arr, new_arr, int(entry.identifier)
)
arr = zone_to_save
else:
print('Adding region to general area')
arr = AreaMaskGenerator.append_to_area(
arr, new_arr, int(entry.identifier)
)
i = ax.imshow(arr)
cbar = fig.colorbar(i)
cbar_ticks = [
int(e.identifier) for e in entry_list
] # np.linspace(0.0, 1.0, num=6, endpoint=True)
cbar.set_ticks(cbar_ticks)
cbar.draw_all()
# ax.bar(x, y, color=colors)
# ax.set_xlabel('Entry')
# ax.set_ylabel('Threshold')
canvas.draw()
def extract_regions(arr):
regions = []
for entry in entry_list:
if int(entry.in_zone) == 0:
# remove all the non identifier values in the array and save it
mask_indices = np.where(arr != int(entry.identifier))
area2 = np.copy(arr)
area2[mask_indices] = 0 # area_value
regions.append(area2)
def generate_random_color():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
color = f'#{r:02x}{g:02x}{b:02x}'
return color
# Second column: Text entries and a button
inputs_frame = tk.Frame(regions_window)
inputs_frame.grid(row=0, column=1, padx=10, pady=10, sticky='nsew')
input_label1 = tk.Label(inputs_frame, text='Name:')
input_label1.pack()
input_entry1 = tk.Entry(inputs_frame)
input_entry1.pack()
input_label2 = tk.Label(inputs_frame, text='Threshold:')
input_label2.pack()
input_entry2 = tk.Entry(inputs_frame)
input_entry2.pack()
input_label3 = tk.Label(inputs_frame, text='In zone ID:')
input_label3.pack()
input_entry3 = tk.Entry(inputs_frame)
input_entry3.pack()
input_label_mat_path = tk.Label(inputs_frame, text='Material Path')
input_label_mat_path.pack()
input_entry_mat_path = tk.Entry(inputs_frame)
input_entry_mat_path.pack()
input_label_mat_scale = tk.Label(inputs_frame, text='Material Scale')
input_label_mat_scale.pack()
input_entry_mat_scale = tk.Entry(inputs_frame)
input_entry_mat_scale.pack()
# process_button = tk.Button(
# inputs_frame, text='Add Entry', command=add_entry
# )
# process_button.pack()
# separator = ttk.Separator(inputs_frame, orient='horizontal')
# separator.pack(fill='x')
ttk.Label(inputs_frame, text='Add objects to zone').pack()
input_label4 = tk.Label(inputs_frame, text='Add to zone with the ID of:')
input_label4.pack()
# input_entry4 = tk.Entry(inputs_frame)
# input_entry4.pack()
# combobox = ttk.Combobox(
# inputs_frame,
# values=options,
# width=25,
# state='readonly',
# justify='left',
# selectmode="multiple",
# )
# combobox.set('Select Options')
# combobox.pack(padx=10, pady=10)
yscrollbar = Scrollbar(inputs_frame)
yscrollbar.pack(side=RIGHT, fill=Y)
listbx = Listbox(
inputs_frame, selectmode='multiple', yscrollcommand=yscrollbar.set
)
listbx.pack(padx=10, pady=10, expand=YES, fill='both')
x = []
for item in worldHandler.objects:
x.append(item.unique_id)
for each_item in range(len(x)):
listbx.insert(END, x[each_item])
listbx.itemconfig(each_item, bg='white')
yscrollbar.config(command=listbx.yview)
process_button = tk.Button(
inputs_frame, text='Add entry', command=add_entry
)
process_button.pack()
# Third column: Empty column
third_column_frame = tk.Frame(regions_window)
third_column_frame.grid(row=0, column=2, padx=10, pady=10, sticky='nsew')
save_all_button = tk.Button(
inputs_frame, text='save all', command=write_data
)
save_all_button.pack()
# Example Matplotlib plot
fig, ax = plt.subplots()
canvas = FigureCanvasTkAgg(fig, master=third_column_frame)
canvas.get_tk_widget().pack()
global n
global cbar
arr = np.zeros((n, n))
i = ax.imshow(arr)
cbar = plt.colorbar(i)
cbar.ax.set_autoscale_on(True)
def create_zones():
# Code for the "Create Zones" page
print('Create Zones page')
def main_page():
main_window = tk.Tk()
main_window.title('Main Window')
load_objects_button = tk.Button(
main_window, text='Load Objects', command=load_objects
)
load_objects_button.pack()
input_sizelabel= tk.Label(main_window, text='World Size:')
input_sizelabel.pack()
input_sizeentry = tk.Entry(main_window)
input_sizeentry.pack()
def size():
global n
n = int(input_sizeentry.get())
if not n or n < 0:
n = 256
set_size_button = tk.Button(
main_window, text='set size', command=size
)
set_size_button.pack()
create_regions_button = tk.Button(
main_window, text='Create Regions', command=create_regions
)
create_regions_button.pack()
# create_zones_button = tk.Button(
# main_window, text='Create Zones', command=create_zones
# )
# create_zones_button.pack()
main_window.mainloop()
if __name__ == '__main__':
main_page()
| 12,927 | Python | 29.92823 | 140 | 0.580104 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Utils/EnvDataTool/EnvCreator.py | import tkinter as tk
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
NavigationToolbar2Tk,
)
from PCG import PoissonDisk
import matplotlib.colors
from PCG import PerlinNoise
import matplotlib.pyplot as plt
from typing import Tuple
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
# from PCG import AreaMaskGenerator
from PCG import PerlinNoise
# from PCG.AreaMaskGenerator import ObjectPrim, WorldHandler
from PCG.worldUtils import WorldHandler,ObjectPrim
import numpy.typing as npt
# def load_objects():
# # Code for the "Load Objects" page
# print('Load Objects page')
#
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
import numpy as np
from matplotlib import colors
import json
import tkinter.ttk as ttk
from tkinter.filedialog import askopenfilename, askdirectory, asksaveasfile
class EntryInfo:
def __init__(self, name, threshold):
self.name = name
self.threshold = threshold
self.identifier = None
self.color = None
self.in_region = None
self.objects_in_zone = []
self.is_region = True
self.material_path = None
self.material_scale = None
def get_objs_as_str(self):
return ''.join(self.objects_in_zone)
def append_inside_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a new mask that is only within the first mask
"""
mask_indices = np.where((area_to_add >= area_value) & (area != 0))
area2 = np.copy(area)
area2[mask_indices] = area_value # area_value
return area2
def append_to_area(
area: npt.NDArray[np.float64],
area_to_add: npt.NDArray[np.float64],
area_value: float,
) -> npt.NDArray[np.float64]:
"""
Function returns a mask appended to another one
"""
mask_indices = np.where(area_to_add >= area_value)
area[mask_indices] = area_value
return area
def show_plot(area):
cvals = [0, 1, 2, 3, 4]
colors = ['lightgreen', 'green', 'yellow', 'brown', 'red']
norm = plt.Normalize(min(cvals), max(cvals))
tuples = list(zip(map(norm, cvals), colors))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', tuples)
plt.imshow(area, cmap=cmap, norm=norm)
plt.colorbar()
plt.show()
def fill_area(
area: npt.NDArray[np.float64],
size: int,
region_value: int,
object_value: int,
) -> Tuple[npt.NDArray[np.float64], list]:
# Generate points and fill the area with objects using Poisson
points = PoissonDisk.Bridson_sampling(
width=area.shape[0], height=area.shape[1], radius=size, k=30
)
new_points = []
for p in points:
x_int = int(p[0])
y_int = int(p[1])
if area[y_int][x_int] == region_value:
# area[y_int][x_int] = object_value
new_points.append(p)
return area, new_points
class EnvTool:
def __init__(self) -> None:
self.worldHandler = WorldHandler(',', '')
self.size =256
self.seed = 0
self.cbar = None
def load_objects(self):
# Code for the "Load Objects" page
print('Load Objects page')
filename = askopenfilename()
self.worldHandler._object_path = filename
self.worldHandler._read_objects()
def write_data(self):
data = {}
entry_list = self.entry_list
for entry in entry_list:
if entry.is_region:
data[entry.identifier] = {}
data[entry.identifier]['objects'] = entry.objects_in_zone
data[entry.identifier]['zones'] = {}
data[entry.identifier]['threshold'] = entry.threshold
data[entry.identifier]['material_path'] = entry.material_path
data[entry.identifier]['material_scale'] =entry.material_scale
else:
# we are in a zone - get the region we are in
id = int(entry.in_zone)
print(id)
# if not data[id]["zones"][entry.identifier]:
if not id in data.keys():
data[id]['zones'][entry.identifier] = {}
if not entry.identifier in data[id]['zones'].keys():
data[id]['zones'][entry.identifier] = {}
data[id]['zones'][entry.identifier][
'objects'
] = entry.objects_in_zone
data[id]['zones'][entry.identifier][
'threshold'
] = entry.threshold
data[id]['zones'][entry.identifier]['material_path'] = entry.material_path
data[id]['zones'][entry.identifier]['material_scale'] =entry.material_scale
# json.dump(data)
full_data = {}
full_data['seed'] = self.seed
full_data['regions'] = data
full_data['size'] = self.size
# folder_path = askdirectory()
files = [('json', "*.json")]
folder_path = asksaveasfile(filetypes=files,defaultextension=files)
print(folder_path)
folder_path = folder_path.name
with open(f'{folder_path}', 'w') as f:
json.dump(full_data, f)
print(full_data)
# Function to delete an entry from the list
def delete_entry(self, entry, index):
entry.destroy()
self.entry_list.pop(index)
self.update_identifiers()
self.update_plot()
def update_identifiers(self):
for i, entry_info in enumerate(self.entry_list):
entry_info.identifier = i + 1
def add_entry(self):
name = self.input_entry1.get()
threshold = self.input_entry2.get()
parent_zone = self.input_entry3.get()
mat_path = self.input_entry_mat_path.get()
mat_scale = self.input_entry_mat_scale.get()
if name and threshold:
self.entry_frame = tk.Frame(self.entries_frame)
self.entry_frame.pack(anchor='w')
self.entry_info = EntryInfo(name, threshold)
self.entry_info.material_path = mat_path
self.entry_info.material_scale = mat_scale
for i in self.listbx.curselection():
self.entry_info.objects_in_zone.append(self.listbx.get(i))
self.entry_info.identifier = len(self.entry_list) + 1
id = self.entry_info.identifier
self.entry_info.color = "BLACK"#generate_random_color()
if parent_zone != '':
self.entry_info.in_zone = parent_zone
self.entry_info.is_region = False
else:
self.entry_info.in_zone = 0
parent_zone = 0
self.entry_list.append(self.entry_info)
self.entry_label = tk.Label(
self.entry_frame,
text=f'ID: {id}, Name: {name}, Threshold: {threshold}, parent zone: {parent_zone}, objects: {self.entry_info.get_objs_as_str()}',
fg=self.entry_info.color,
)
self.entry_label.pack(side='left')
self.delete_button = tk.Button(
self.entry_frame,
text='Delete',
command=lambda entry=self.entry_frame, index=len(
self.entry_list
) - 1: self.delete_entry(entry, index),
)
self.delete_button.pack(side='left')
# entries_listbox.insert(
# tk.END, f'Name: {name}, Threshold: {threshold}'
# )
self.input_entry1.delete(0, tk.END)
self.input_entry2.delete(0, tk.END)
self.input_entry3.delete(0, tk.END)
self.update_plot()
def update_plot(self):
# fig.clear()
self.cbar.remove()
self.ax.clear()
self.arr = np.zeros((self.size, self.size))
self.past_id = 0
for entry in self.entry_list:
print(
'identigier ',
entry.identifier,
' in int form ',
int(entry.identifier),
)
# check the parent zone. if it is not 0 we need to generate it inside this zone
# we want to keep both tho.
# the inside zone one must not completely overwite the parent REGION
# in this case we dont add it to the main array we just perfrom the calculation and save it
print("here")
print(self.size, entry.threshold)
self.new_arr = PerlinNoise.generate_region2(
seed=int(entry.identifier),
shape=(self.size, self.size),
threshold=float(entry.threshold),
show_plot=False,
region_value=int(entry.identifier),
)
# This zone will be saved and used later
if entry.in_zone != 0:
self.zone_to_save = append_inside_area(
self.arr, self.new_arr, int(entry.identifier)
)
self.arr = self.zone_to_save
else:
print('Adding region to general area')
self.arr =append_to_area(
self.arr, self.new_arr, int(entry.identifier)
)
self.i = self.ax.imshow(self.arr)
self.cbar = self.fig.colorbar(self.i)
cbar_ticks = [
int(e.identifier) for e in self.entry_list
] # np.linspace(0.0, 1.0, num=6, endpoint=True)
self.cbar.set_ticks(cbar_ticks)
self.cbar.draw_all()
# ax.bar(x, y, color=colors)
# ax.set_xlabel('Entry')
# ax.set_ylabel('Threshold')
self.canvas.draw()
def create_regions(self):
# Code for the "Create Regions" page
print('Create Regions page')
# Create a new window for the "Create Regions" page
self.regions_window = tk.Toplevel()
self.regions_window.title('Create Regions')
# First column: List of entries with delete buttons
self.entries_frame = tk.Frame(self.regions_window)
self.entries_frame.grid(row=0, column=0, padx=10, pady=10, sticky='nsew')
self.yscrollbar = Scrollbar(self.entries_frame)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.entries_label = tk.Label(self.entries_frame, text='Entries:')
self.entries_label.pack()
options = ['Option 1', 'Option 2', 'Option 3', 'Option 4', 'Option 5']
options = [str(i) for i in range(100)]
self.selected_items = []
# List to store entry objects
self.entry_list = []
# Second column: Text entries and a button
self.inputs_frame = tk.Frame(self.regions_window)
self.inputs_frame.grid(row=0, column=1, padx=10, pady=10, sticky='nsew')
self.input_label1 = tk.Label(self.inputs_frame, text='Name:')
self.input_label1.pack()
self.input_entry1 = tk.Entry(self.inputs_frame)
self.input_entry1.pack()
self.input_label2 = tk.Label(self.inputs_frame, text='Threshold:')
self.input_label2.pack()
self.input_entry2 = tk.Entry(self.inputs_frame)
self.input_entry2.pack()
self.input_label3 = tk.Label(self.inputs_frame, text='In zone ID:')
self.input_label3.pack()
self.input_entry3 = tk.Entry(self.inputs_frame)
self.input_entry3.pack()
self.input_label_mat_path = tk.Label(self.inputs_frame, text='Material Path')
self.input_label_mat_path.pack()
self.input_entry_mat_path = tk.Entry(self.inputs_frame)
self.input_entry_mat_path.pack()
self.input_label_mat_scale = tk.Label(self.inputs_frame, text='Material Scale')
self.input_label_mat_scale.pack()
self.input_entry_mat_scale = tk.Entry(self.inputs_frame)
self.input_entry_mat_scale.pack()
ttk.Label(self.inputs_frame, text='Add objects to zone').pack()
self.input_label4 = tk.Label(self.inputs_frame, text='Add to zone with the ID of:')
self.input_label4.pack()
self.yscrollbar = Scrollbar(self.inputs_frame)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.listbx = Listbox(
self.inputs_frame, selectmode='multiple', yscrollcommand=self.yscrollbar.set
)
self.listbx.pack(padx=10, pady=10, expand=YES, fill='both')
x = []
for key in self.worldHandler.objects_dict:
x.append(key)
for each_item in range(len(x)):
self.listbx.insert(END, x[each_item])
self.listbx.itemconfig(each_item, bg='white')
self.yscrollbar.config(command=self.listbx.yview)
self.process_button = tk.Button(
self.inputs_frame, text='Add entry', command=self.add_entry
)
self.process_button.pack()
# Third column: Empty column
self.third_column_frame = tk.Frame(self.regions_window)
self.third_column_frame.grid(row=0, column=2, padx=10, pady=10, sticky='nsew')
self.save_all_button = tk.Button(
self.inputs_frame, text='save all', command=self.write_data
)
self.save_all_button.pack()
# Example Matplotlib plot
self.fig, self.ax = plt.subplots()
self.canvas = FigureCanvasTkAgg(self.fig, master=self.third_column_frame)
self.canvas.get_tk_widget().pack()
self.arr = np.zeros((self.size, self.size))
self.i = self.ax.imshow(self.arr)
self.cbar = plt.colorbar(self.i)
self.cbar.ax.set_autoscale_on(True)
def set_size(self):
print(" =========== updating size to ==============")
self.size = int(self.input_sizeentry.get())
if not self.size or self.size < 0:
self.size = 256
print(self.size)
def set_seed(self):
self.seed = int(self.input_seed_entry.get())
def main_page(self):
self.main_window = tk.Tk()
self.main_window.geometry("500x500")
self.main_window.title('Main Window')
self.load_objects_button = tk.Button(
self.main_window, text='Load Objects', command=self.load_objects
)
self.load_objects_button.pack()
self.input_sizelabel= tk.Label(self.main_window, text='World Size:')
self.input_sizelabel.pack()
self.input_sizeentry = tk.Entry(self.main_window)
self.input_sizeentry.pack()
self.set_size_button = tk.Button(
self.main_window, text='set size', command=self.set_size
)
self.set_size_button.pack()
self.input_seed_label= tk.Label(self.main_window, text='seed:')
self.input_seed_label.pack()
self.input_seed_entry = tk.Entry(self.main_window)
self.input_seed_entry.pack()
self.set_seed_button = tk.Button(
self.main_window, text='set seed', command=self.set_seed
)
self.set_seed_button.pack()
self.create_regions_button = tk.Button(
self.main_window, text='Open map creator', command=self.create_regions
)
self.create_regions_button.pack()
# create_zones_button = tk.Button(
# main_window, text='Create Zones', command=create_zones
# )
# create_zones_button.pack()
self.main_window.mainloop()
if __name__ == '__main__':
# main_page()
tool = EnvTool()
tool.main_page()
| 15,702 | Python | 32.842672 | 145 | 0.587441 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/Camera.py | import pathlib
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import numpy as np
import omni.replicator.core as rep
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from PIL import Image
class DepthCamera:
def __init__(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
image_size=(512, 512),
attach=True,
parent='/World/DepthCamera',
name='DepthCamera',
) -> None:
self.__rgb_annot: Annotator
self.__save_path = ''
self.__pos = position
self.__rot = rotation
self.__image_size = image_size
self.__attach = attach
self.__name = name
self.__focal_length = 24.0
self.__focus_distance = 400.0
self.__f_stop = 0.0
self.__horizontal_aperture = 20.955
self.__horizontal_aperture_offset = 0.0
self.__vertical_aperture_offset = 0.0
self.__clipping_range = (1.0, 10000000.0)
self.__resolution = (512, 512)
self.sample_count = 0
self.save_path = None
self._o = "[DepthCamera] "
def init_output_folder(self, path):
self.save_path = path
print(f"{self._o} Initializing output folders")
pathlib.Path(path +"/camera").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraDepth").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraLabels").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/cameraPC").mkdir(parents=True, exist_ok=True)
def init_sensor(self, parent):
print(self.__clipping_range)
self.__cam = rep.create.camera(
position=self.__pos,
parent=parent,
name=self.__name,
rotation=self.__rot,
focal_length=self.__focal_length,
focus_distance=self.__focus_distance,
f_stop=self.__f_stop,
horizontal_aperture=self.__horizontal_aperture,
horizontal_aperture_offset=self.__horizontal_aperture_offset,
vertical_aperture_offset=self.__vertical_aperture_offset,
clipping_range=self.__clipping_range,
)
print("resolution ", self.__resolution)
self.__rp: og.Node = rep.create.render_product(
self.__cam, self.__resolution
)
print(f"{self._o} Attaching annotaors to camera.")
if self.__attach:
self.__init_annotators()
self.__attach_annotoators()
def read_from_json(self, data):
# We have been given data["LIDAR"]
# for instance_ids in data:
camera_settings = data
self.__name = camera_settings['name']
self.__focal_length = camera_settings['focal_length']
self.__focus_distance = camera_settings['focus_distance']
self.__f_stop = camera_settings['f_stop']
self.__horizontal_aperture = camera_settings['horizontal_aperture']
self.__horizontal_aperture_offset = camera_settings[
'horizontal_aperture_offset'
]
self.__vertical_aperture_offset = camera_settings[
'vertical_aperture_offset'
]
self.__clipping_range = (camera_settings['clipping_range'][0],camera_settings["clipping_range"][1])
self.__resolution = camera_settings['resolution']
self.__pos = camera_settings["position"]
self.__rot = camera_settings["rotation"]
def construct_pc(self, rgb_image, depth_image):
pass
def __init_annotators(self):
self.rgb_annot = rep.AnnotatorRegistry.get_annotator('rgb')
self.depth_annot = rep.AnnotatorRegistry.get_annotator(
'distance_to_camera'
)
# self.pc_annot = rep.AnnotatorRegistry.get_annotator("pointcloud")
self.sem_annot = rep.AnnotatorRegistry.get_annotator(
'semantic_segmentation'
)
def __attach_annotoators(self):
self.depth_annot.attach(self.__rp)
self.rgb_annot.attach(self.__rp)
self.sem_annot.attach(self.__rp)
# self.pc_annot.attach(self.__rp)
def __detatch_annototators(self):
self.depth_annot.detach(self.__rp)
self.rgb_annot.detach(self.__rp)
self.sem_annot.detach(self.__rp)
# self.pc_annot.dettach(self.__rp)
def sample_sensor(self):
# return
# await rep.orchestrator.step_async()
rgb_data = self.rgb_annot.get_data()
np.save(f"{self.save_path}camera/{self.sample_count}.npy", rgb_data)
# print(rgb_data)
im = Image.fromarray(rgb_data,"RGBA")
path = f"{self.save_path}camera/{self.sample_count}_img.png"
im.save(path)
depth_data = self.depth_annot.get_data()
np.save(f"{self.save_path}cameraDepth/{self.sample_count}.npy",depth_data)
# np.save('/home/jon/Documents/temp/depth.npy', depth_data)
sem_data = self.sem_annot.get_data()
np.save(f"{self.save_path}cameraLabels/{self.sample_count}.npy",sem_data)
# pc_data = self.pc_annot.get_data()
# np.save(f"{self.save_path}cameraPC/{self.sample_count}.npy",pc_data)
self.sample_count += 1
# np.save('/home/jon/Documents/temp/sem.npy', sem_data)
return
def sample_sensor_return(self):
# return
# await rep.orchestrator.step_async()
rgb_data = self.rgb_annot.get_data()
return rgb_data
| 5,705 | Python | 35.113924 | 107 | 0.605434 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/__init__.py |
import importlib
import sys
print ("[CUSTOM] Reloading...")
L = list(sys.modules.keys())
for k in L:
if "com.copycat" in k:
print (k)
importlib.reload(sys.modules[k])
from .LIDAR import Lidar
from .Camera import DepthCamera
| 236 | Python | 18.749998 | 36 | 0.694915 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/IMU.py | import pathlib
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
import numpy as np
import omni.replicator.core as rep
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
import omni
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from pxr import Usd, Gf, UsdGeom
from omni.isaac.sensor import _sensor
class IMUSensor:
def __init__(
self,
position=(0, 0, 0),
rotation=(0, 0, 0),
orientation=(1, 1, 1, 1),
parent='/World',
name='/DepthCamera',
) -> None:
self.__pos = position
self.__ori = orientation
self.__rot = rotation
self.__name = name
# self.__imu_prim
self._is = _sensor.acquire_imu_sensor_interface()
self.__path = ''
self.save_path = ""
self.sample_count = 0
# self.__attach_annotoators()
self._o = "[IMUSensor] "
def init_output_folder(self, path):
print(f"{self._o} Initializing output folders")
self.save_path =path +"/posesIMU"
pathlib.Path(self.save_path).mkdir(parents=True, exist_ok=True)
def init_sensor(self, parent):
x,y,z = self.__pos
qx,qw,qy,qz = self.__rot
self.__path = parent + "/" + self.__name
result, self.__imu_prim = omni.kit.commands.execute(
'IsaacSensorCreateImuSensor',
path='/' + self.__name,
parent=parent,
sensor_period=-1.0,
translation=Gf.Vec3d(x,y,z),
orientation=Gf.Quatd(qx,qw,qy,qz),
visualize=True,
)
def read_from_json(self, data):
# We have been given data["LIDAR"]
self.__name = data['name']
self.__pos = data['position']
self.__rot =data['rotation']
def sample_sensor(self):
# print(self.__path)
# return
# await rep.orchestrator.step_async()
reading = self._is.get_sensor_readings(self.__path)
np.save(f"{self.save_path}/{self.sample_count}.npy",reading)
self.sample_count += 1
| 2,316 | Python | 26.915662 | 71 | 0.585924 |
RPL-CS-UCL/IsaacSyntheticPerception/com/SyntheticPerception/app/Sensors/LIDAR.py | import pathlib
from omni.syntheticdata.scripts.sensors import enable_sensors
from pxr import (
UsdGeom,
Gf,
UsdPhysics,
Semantics,
) # pxr usd imports used to create cube
from omni.isaac.range_sensor import _range_sensor
import omni
import omni.kit.commands
import omni.timeline
import omni.kit.viewport
from pxr import Usd, Gf, UsdGeom
import omni.kit.commands
import numpy as np
import omni.replicator.core as rep
import numpy as np
from typing import Any, Dict, Sequence, Tuple, Union
import omni.graph.core as og
from omni.replicator.core.scripts.annotators import Annotator
from omni.isaac.core.prims import XFormPrim, RigidPrim
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dynamic_control import _dynamic_control
from pxr import Sdf
class Lidar:
def __init__(
self,
path='/Lidar1',
parent='/World',
min_range=0.4,
max_range=100.0,
draw_points=False,
draw_lines=False,
horizontal_fov=360.0,
vertical_fov=60.0,
horizontal_resolution=0.4,
vertical_resolution=0.4,
rotation_rate=0,
high_lod=True,
yaw_offset=0.0,
enable_semantics=False,
origin_pos=(2.0, 0.0, 4.0),
):
self.__path = '/' + path
self.__min_range = min_range
self.__max_range = max_range
self.__draw_points = draw_points
self.__draw_lines = draw_lines
self.__horizontal_fov = horizontal_fov
self.__vertical_fov = vertical_fov
self.__horizontal_resolution = horizontal_resolution
self.__vertical_resolution = vertical_resolution
self.__rotation_rate = rotation_rate
self.__high_lod = high_lod
self.__yaw_offset = yaw_offset
self.__enable_semantics = enable_semantics
self.__origin_pos = origin_pos
self.__rotation = [0.0,0.0,0.0]
self.sample_count = 0
self.save_path = None
self._o = "[LiDAR] "
def init_output_folder(self, path):
print(f"{self._o} Initializing output folders")
self.save_path = path
pathlib.Path(path +"/velodyne").mkdir(parents=True, exist_ok=True)
pathlib.Path(path +"/velodyneLabels").mkdir(parents=True, exist_ok=True)
def read_from_json(self, data):
# We have been given data["LIDAR"]
# for instance_ids in data:
lidar_settings = data
print(lidar_settings["name"])
self.__path = '/' + lidar_settings['name']
self.__min_range = lidar_settings['min_range']
self.__max_range = lidar_settings['max_range']
self.__draw_points = lidar_settings['draw_points']
self.__draw_lines = lidar_settings['draw_lines']
self.__horizontal_fov = lidar_settings['horizontal_fov']
self.__vertical_fov = lidar_settings['vertical_fov']
self.__horizontal_resolution = lidar_settings[
'horizontal_resolution'
]
self.__vertical_resolution = lidar_settings['vertical_resolution']
self.__rotation_rate = lidar_settings['rotation_rate']
self.__high_lod = lidar_settings['high_lod']
self.__yaw_offset = lidar_settings['yaw_offset']
self.__enable_semantics = lidar_settings['enable_semantics']
self.__origin_pos = lidar_settings['origin_pos']
self.__rotation = lidar_settings['rotation']
def init_sensor(self, parent):
print(f'init the lidar {parent}')
# self.__lidarInterface = _range_sensor.acquire_lidar_sensor_interface()
_, self.__lidar_prim = omni.kit.commands.execute(
'RangeSensorCreateLidar',
path=self.__path,
parent=parent,
min_range=self.__min_range,
max_range=self.__max_range,
draw_points=self.__draw_points,
draw_lines=self.__draw_lines,
horizontal_fov=self.__horizontal_fov,
vertical_fov=self.__vertical_fov,
horizontal_resolution=self.__horizontal_resolution,
vertical_resolution=self.__vertical_resolution,
rotation_rate=self.__rotation_rate,
high_lod=self.__high_lod,
yaw_offset=self.__yaw_offset,
enable_semantics=self.__enable_semantics,
)
UsdGeom.XformCommonAPI(self.__lidar_prim).SetTranslate(
self.__origin_pos
)
self.__lidar_path = parent + self.__path
print(f'lidar path should be {self.__lidar_path}')
self.__lidarInterface = _range_sensor.acquire_lidar_sensor_interface()
# def sample_sensor(self):
# self.get_pc_and_semantic()
def sample_sensor(self):
# return
self.get_pc_and_semantic()
self.sample_count += 1
def get_pc_and_semantic(self, save_path='/home/jon/Documents/temp/a'):
pointcloud = self.__lidarInterface.get_point_cloud_data(
self.__lidar_path
)
semantics = self.__lidarInterface.get_semantic_data(self.__lidar_path)
# print(semantics)
np.save(f"{self.save_path}velodyne/{self.sample_count}.npy",pointcloud)
np.save(f"{self.save_path}velodyneLabels/{self.sample_count}.npy",semantics, allow_pickle=True)
return pointcloud, semantics
def __get_position(self):
transform = Gf.Transform()
transform.SetMatrix(
UsdGeom.Xformable(self.__lidar_prim).ComputeLocalToWorldTransform(
Usd.TimeCode.Default()
)
)
return transform.GetTranslation()
def __clear_max_lidar_points(self, pc, sem, lidar_pos, max_dist):
new_points = []
new_sems = []
for seq_id in range(len(pc)):
for point_id in range(len(pc[seq_id])):
point = pc[seq_id][point_id]
dist = np.linalg.norm(point - lidar_pos)
if dist < max_dist - 10:
new_points.append(pc[seq_id][point_id])
new_sems.append(sem[seq_id][point_id])
return np.array(new_points), np.array(new_sems)
| 6,074 | Python | 35.377245 | 103 | 0.609318 |
RPL-CS-UCL/IsaacSyntheticPerception/config/extension.toml | [core]
reloadable = true
display_name = "Synthetic Perception"
[package]
title="Synthetic Perception"
description="Synthetic Perception desc"
category=""
authors=['Jon']
keywords=['custom']
[dependencies]
"omni.isaac.dynamic_control" = {}
"omni.isaac.range_sensor" = {}
"omni.syntheticdata" = {}
[[python.module]]
name = "com.SyntheticPerception.app"
[[native.plugin]]
recursive = false
| 393 | TOML | 15.416666 | 39 | 0.722646 |
RPL-CS-UCL/IsaacSyntheticPerception/docs/source/index.rst | .. SyntheticPerception documentation master file, created by
sphinx-quickstart on Tue Mar 14 13:24:15 2023.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to SyntheticPerception's documentation!
===============================================
.. toctree::
:maxdepth: 2
:caption: Contents:
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 473 | reStructuredText | 21.571428 | 76 | 0.625793 |
RPL-CS-UCL/IsaacSyntheticPerception/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'SyntheticPerception'
copyright = '2023, Jonathan Edward Embley-Riches'
author = 'Jonathan Edward Embley-Riches'
release = '0.1'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = []
templates_path = ['_templates']
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| 978 | Python | 32.75862 | 87 | 0.635992 |
vstrozzi/FRL-SHAC-Extension/diffrl_conda.yml | name: shac
channels:
- pytorch
- defaults
dependencies:
- python=3.8.13=h12debd9_0
- pytorch=1.11.0=py3.8_cuda11.3_cudnn8.2.0_0
- torchvision=0.12.0=py38_cu113
- pip:
- pyyaml==6.0
- tensorboard==2.8.0
- tensorboardx==2.5
- urdfpy==0.0.22
- usd-core==22.3
| 288 | YAML | 18.266665 | 46 | 0.611111 |
vstrozzi/FRL-SHAC-Extension/README.md | # SHAC
This repository contains the implementation for the paper [Accelerated Policy Learning with Parallel Differentiable Simulation](https://short-horizon-actor-critic.github.io/) (ICLR 2022).
In this paper, we present a GPU-based differentiable simulation and propose a policy learning method named SHAC leveraging the developed differentiable simulation. We provide a comprehensive benchmark set for policy learning with differentiable simulation. The benchmark set contains six robotic control problems for now as shown in the figure below.
<p align="center">
<img src="figures/envs.png" alt="envs" width="800" />
</p>
## Installation
- `git clone https://github.com/NVlabs/DiffRL.git --recursive`
- The code has been tested on
- Operating System: Ubuntu 16.04, 18.04, 20.04, 21.10, 22.04
- Python Version: 3.7, 3.8
- GPU: TITAN X, RTX 1080, RTX 2080, RTX 3080, RTX 3090, RTX 3090 Ti
#### Prerequisites
- In the project folder, create a virtual environment in Anaconda:
```
conda env create -f diffrl_conda.yml
conda activate shac
```
- dflex
```
cd dflex
pip install -e .
```
- rl_games, forked from [rl-games](https://github.com/Denys88/rl_games) (used for PPO and SAC training):
````
cd externals/rl_games
pip install -e .
````
- Install an older version of protobuf required for TensorboardX:
````
pip install protobuf==3.20.0
````
#### Test Examples
A test example can be found in the `examples` folder.
```
python test_env.py --env AntEnv
```
If the console outputs `Finish Successfully` in the last line, the code installation succeeds.
## Training
Running the following commands in `examples` folder allows to train Ant with SHAC.
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --logdir ./logs/Ant/shac
```
We also provide a one-line script in the `examples/train_script.sh` folder to replicate the results reported in the paper for both our method and for baseline method. The results might slightly differ from the paper due to the randomness of the cuda and different Operating System/GPU/Python versions. The plot reported in paper is produced with TITAN X on Ubuntu 16.04.
#### SHAC (Our Method)
For example, running the following commands in `examples` folder allows to train Ant and SNU Humanoid (Humanoid MTU in the paper) environments with SHAC respectively for 5 individual seeds.
```
python train_script.py --env Ant --algo shac --num-seeds 5
```
```
python train_script.py --env SNUHumanoid --algo shac --num-seeds 5
```
#### Baseline Algorithms
For example, running the following commands in `examples` folder allows to train Ant environment with PPO implemented in RL_games for 5 individual seeds,
```
python train_script.py --env Ant --algo ppo --num-seeds 5
```
## Testing
To test the trained policy, you can input the policy checkpoint into the training script and use a `--play` flag to indicate it is for testing. For example, the following command allows to test a trained policy (assume the policy is located in `logs/Ant/shac/policy.pt`)
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --checkpoint ./logs/Ant/shac/policy.pt --play [--render]
```
The `--render` flag indicates whether to export the video of the task execution. If does, the exported video is encoded in `.usd` format, and stored in the `examples/output` folder. To visualize the exported `.usd` file, refer to [USD at NVIDIA](https://developer.nvidia.com/usd).
## Citation
If you find our paper or code is useful, please consider citing:
```kvk
@inproceedings{xu2021accelerated,
title={Accelerated Policy Learning with Parallel Differentiable Simulation},
author={Xu, Jie and Makoviychuk, Viktor and Narang, Yashraj and Ramos, Fabio and Matusik, Wojciech and Garg, Animesh and Macklin, Miles},
booktitle={International Conference on Learning Representations},
year={2021}
}
``` | 3,885 | Markdown | 34.327272 | 370 | 0.73668 |
vstrozzi/FRL-SHAC-Extension/examples/train_script.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import argparse
configs = {'Ant': 'ant.yaml', 'CartPole': 'cartpole_swing_up.yaml', 'Hopper': 'hopper.yaml', 'Cheetah': 'cheetah.yaml', 'Humanoid': 'humanoid.yaml', 'SNUHumanoid': 'snu_humanoid.yaml'}
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Ant', choices=['Ant', 'CartPole', 'Hopper', 'Cheetah', 'Humanoid', 'SNUHumanoid'])
parser.add_argument('--algo', type=str, default='shac', choices=['shac', 'ppo', 'sac', 'bptt'])
parser.add_argument('--num-seeds', type=int, default=5)
parser.add_argument('--save-dir', type=str, default='./logs/')
args = parser.parse_args()
''' generate seeds '''
seeds = []
for i in range(args.num_seeds):
seeds.append(i * 10)
''' generate commands '''
commands = []
for i in range(len(seeds)):
seed = seeds[i]
save_dir = os.path.join(args.save_dir, args.env, args.algo, str(seed))
config_path = os.path.join('./cfg', args.algo, configs[args.env])
if args.algo == 'shac':
script_name = 'train_shac.py'
elif args.algo == 'ppo' or args.algo == 'sac':
script_name = 'train_rl.py'
elif args.algo == 'bptt':
script_name = 'train_bptt.py'
else:
raise NotImplementedError
cmd = 'python {} '\
'--cfg {} '\
'--seed {} '\
'--logdir {} '\
'--no-time-stamp'\
.format(script_name, config_path, seed, save_dir)
commands.append(cmd)
for command in commands:
os.system(command) | 1,898 | Python | 34.830188 | 184 | 0.653319 |
vstrozzi/FRL-SHAC-Extension/examples/train_bptt.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# gradient-based policy optimization by actor critic method
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import argparse
import envs
import algorithms.bptt as bptt
import os
import sys
import yaml
import torch
import numpy as np
import copy
from utils.common import *
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--cfg", "type": str, "default": "./cfg/ac/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights"},
{"name": "--logdir", "type": str, "default": "logs/tmp/ac/"},
{"name": "--save-interval", "type": int, "default": 0},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"},
{"name": "--device", "type": str, "default": "cuda:0"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."}]
# parse arguments
args = parse_arguments(
description="BPTT",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
args.device = torch.device(args.device)
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
traj_optimizer = bptt.BPTT(cfg_train)
if args.train:
traj_optimizer.train()
else:
traj_optimizer.play(cfg_train) | 4,024 | Python | 34.307017 | 124 | 0.601392 |
vstrozzi/FRL-SHAC-Extension/examples/combine_batch_logs.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
based on https://stackoverflow.com/questions/43068200/how-to-display-the-average-of-multiple-runs-on-tensorboard
'''
import os
from collections import defaultdict
import numpy as np
import shutil
import tensorflow as tf
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from tensorboardX import SummaryWriter
import argparse
tag_mapping = {#'rewards0/frame': 'policy_loss/step', 'rewards0/iter': 'policy_loss/iter', 'rewards0/time': 'policy_loss/time',
'rewards0/frame': 'rewards/step', 'rewards0/iter': 'rewards/iter', 'rewards0/time': 'rewards/time',
# 'rewards/frame': 'policy_loss/step', 'rewards/iter': 'policy_loss/iter', 'rewards/time': 'policy_loss/time',
'rewards/frame': 'rewards/step', 'rewards/step': 'rewards/step', 'rewards/iter': 'rewards/iter', 'rewards/time': 'rewards/time',
'policy_loss/step': 'policy_loss/step', 'policy_loss/iter': 'policy_loss/iter', 'policy_loss/time': 'policy_loss/time',
'actor_loss/iter': 'actor_loss/iter', 'actor_loss/step': 'actor_loss/step',
# 'policy_loss/step': 'rewards/step', 'policy_loss/iter': 'rewards/iter', 'policy_loss/time': 'rewards/time',
'training_loss/step': 'training_loss/step', 'training_loss/iter': 'training_loss/iter', 'training_loss/time': 'training_loss/time',
'best_policy_loss/step': 'best_policy_loss/step',
'episode_lengths/iter': 'episode_lengths/iter', 'episode_lengths/step': 'episode_lengths/step', 'episode_lengths/frame': 'episode_lengths/step',
'value_loss/step': 'value_loss/step', 'value_loss/iter': 'value_loss/iter'}
def tabulate_events(dpath):
summary_iterators = []
for dname in os.listdir(dpath):
for subfolder_name in args.subfolder_names:
if os.path.exists(os.path.join(dpath, dname, subfolder_name)):
summary_iterators.append(EventAccumulator(os.path.join(dpath, dname, subfolder_name)).Reload())
break
tags = summary_iterators[0].Tags()['scalars']
# for it in summary_iterators:
# assert it.Tags()['scalars'] == tags
out_values = dict()
out_steps = dict()
for tag in tags:
if tag not in tag_mapping.keys():
continue
# gathering steps
steps_set = set()
for summary in summary_iterators:
for event in summary.Scalars(tag):
steps_set.add(event.step)
is_reward = ('reward' in tag)
is_loss = ('loss' in tag)
steps = list(steps_set)
steps.sort()
# steps = steps[:500]
new_tag_name = tag_mapping[tag]
out_values[new_tag_name] = np.zeros((len(steps), len(summary_iterators)))
out_steps[new_tag_name] = np.array(steps)
for summary_id, summary in enumerate(summary_iterators):
events = summary.Scalars(tag)
i = 0
for step_id, step in enumerate(steps):
while i + 1 < len(events) and events[i + 1].step <= step:
i += 1
# if events[i].value > 100000. or events[i].value < -100000.:
# import IPython
# IPython.embed()
out_values[new_tag_name][step_id, summary_id] = events[i].value
return out_steps, out_values
def write_combined_events(dpath, acc_steps, acc_values, dname='combined'):
fpath = os.path.join(dpath, dname)
if os.path.exists(fpath):
shutil.rmtree(fpath)
writer = SummaryWriter(fpath)
tags = acc_values.keys()
for tag in tags:
for i in range(len(acc_values[tag])):
mean = np.array(acc_values[tag][i]).mean()
writer.add_scalar(tag, mean, acc_steps[tag][i])
writer.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--batch-folder', type = str, default='path/to/batch/folder')
parser.add_argument('--subfolder-names', type = str, nargs = '+', default=['log', 'runs']) # 'runs' for rl
args = parser.parse_args()
dpath = args.batch_folder
acc_steps, acc_values = tabulate_events(dpath)
write_combined_events(dpath, acc_steps, acc_values) | 4,683 | Python | 39.730434 | 160 | 0.62823 |
vstrozzi/FRL-SHAC-Extension/examples/test_env.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import torch
import random
import envs
from utils.common import *
import argparse
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--env', type = str, default = 'AntEnv')
parser.add_argument('--num-envs', type = int, default = 64)
parser.add_argument('--render', default = False, action = 'store_true')
args = parser.parse_args()
seeding()
env_fn = getattr(envs, args.env)
env = env_fn(num_envs = args.num_envs, \
device = 'cuda:0', \
render = args.render, \
seed = 0, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad = True)
obs = env.reset()
num_actions = env.num_actions
t_start = time.time()
reward_episode = 0.
for i in range(1000):
actions = torch.randn((args.num_envs, num_actions), device = 'cuda:0')
obs, reward, done, info = env.step(actions)
reward_episode += reward
t_end = time.time()
print('fps = ', 1000 * args.num_envs / (t_end - t_start))
print('mean reward = ', reward_episode.mean().detach().cpu().item())
print('Finish Successfully')
| 1,731 | Python | 25.646153 | 76 | 0.677643 |
vstrozzi/FRL-SHAC-Extension/examples/train_shac.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# gradient-based policy optimization by actor critic method
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import argparse
import envs
import algorithms.shac as shac
import os
import sys
import yaml
import torch
import numpy as np
import copy
from utils.common import *
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--cfg", "type": str, "default": "./cfg/shac/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights"},
{"name": "--logdir", "type": str, "default": "logs/tmp/shac/"},
{"name": "--save-interval", "type": int, "default": 0},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"},
{"name": "--device", "type": str, "default": "cuda:0"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."}]
# parse arguments
args = parse_arguments(
description="SHAC",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
args.device = torch.device(args.device)
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
traj_optimizer = shac.SHAC(cfg_train)
if args.train:
traj_optimizer.train()
else:
traj_optimizer.play(cfg_train) | 4,024 | Python | 34.307017 | 124 | 0.602386 |
vstrozzi/FRL-SHAC-Extension/examples/train_rl.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
from rl_games.common import env_configurations, experiment, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.torch_runner import Runner
from rl_games.algos_torch import torch_ext
import argparse
import envs
import os
import sys
import yaml
import numpy as np
import copy
import torch
from utils.common import *
def create_dflex_env(**kwargs):
env_fn = getattr(envs, cfg_train["params"]["diff_env"]["name"])
env = env_fn(num_envs=cfg_train["params"]["config"]["num_actors"], \
render=args.render, seed=args.seed, \
episode_length=cfg_train["params"]["diff_env"].get("episode_length", 1000), \
no_grad=True, stochastic_init=cfg_train['params']['diff_env']['stochastic_env'], \
MM_caching_frequency=cfg_train['params']['diff_env'].get('MM_caching_frequency', 1))
print('num_envs = ', env.num_envs)
print('num_actions = ', env.num_actions)
print('num_obs = ', env.num_obs)
frames = kwargs.pop('frames', 1)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.full_state = {}
self.rl_device = "cuda:0"
self.full_state["obs"] = self.env.reset(force_reset=True).to(self.rl_device)
print(self.full_state["obs"].shape)
def step(self, actions):
self.full_state["obs"], reward, is_done, info = self.env.step(actions.to(self.env.device))
return self.full_state["obs"].to(self.rl_device), reward.to(self.rl_device), is_done.to(self.rl_device), info
def reset(self):
self.full_state["obs"] = self.env.reset(force_reset=True)
return self.full_state["obs"].to(self.rl_device)
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
print(info['action_space'], info['observation_space'])
return info
vecenv.register('DFLEX', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('dflex', {
'env_creator': lambda **kwargs: create_dflex_env(**kwargs),
'vecenv_type': 'DFLEX'})
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--num_envs", "type": int, "default": 0, "help": "Number of envirnments"},
{"name": "--cfg", "type": str, "default": "./cfg/rl/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights, only for rl_games RL library"},
{"name": "--rl_device", "type": str, "default": "cuda:0",
"help": "Choose CPU or GPU device for inferencing policy network"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."},
{"name": "--logdir", "type": str, "default": "logs/tmp/rl/"},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"}]
# parse arguments
args = parse_arguments(
description="RL Policy",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
if args.num_envs > 0:
cfg_train["params"]["config"]["num_actors"] = args.num_envs
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
# save config
if cfg_train['params']['general']['train']:
log_dir = cfg_train["params"]["general"]["logdir"]
os.makedirs(log_dir, exist_ok = True)
# save config
yaml.dump(cfg_train, open(os.path.join(log_dir, 'cfg.yaml'), 'w'))
runner = Runner()
runner.load(cfg_train)
runner.reset()
runner.run(vargs)
| 6,658 | Python | 34.801075 | 124 | 0.611745 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/hopper.pth
config:
name: 'Hopper_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'ant' | 982 | YAML | 17.203703 | 31 | 0.602851 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 512, 512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/humanoid_mtu.pth
config:
name: 'Humanoid_SNU_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 10000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.0002
actor_lr: 0.0003
critic_lr: 0.0003
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 2
replay_buffer_size: 1000000
num_actors: 256
env_config:
env_name: 'snu_humanoid' | 1,011 | YAML | 17.74074 | 33 | 0.614243 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/humanoid.pth
config:
name: 'Humanoid_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.0002
actor_lr: 0.0003
critic_lr: 0.0003
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 2
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'humanoid' | 985 | YAML | 17.259259 | 31 | 0.611168 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: 'Ant_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 128
env_config:
env_name: 'ant' | 974 | YAML | 17.055555 | 31 | 0.599589 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [64, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/cartpole_swing_up.pth
config:
name: 'CartPoleSwingUp_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 1000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 1024
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 32
env_config:
env_name: 'ant' | 1,003 | YAML | 17.592592 | 37 | 0.611166 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/sac/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/cheetah.pth
config:
name: 'Cheetah_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'ant' | 985 | YAML | 17.259259 | 31 | 0.604061 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam # ['gd', 'adam', 'sgd', 'lbfgs']
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_hopp_bptt
env_name: dflex
actor_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 128
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 727 | YAML | 18.675675 | 48 | 0.580468 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [512, 256]
activation: elu
actor_logstd_init: -1.0
config:
name: df_humanoid_ac
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 16
save_interval: 200
player:
determenistic: True
games_num: 4
num_actors: 1
print_stats: True
| 716 | YAML | 17.868421 | 48 | 0.599162 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [256, 128]
activation: elu
actor_logstd_init: -1.0
config:
name: df_humanoid_bptt
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 32
save_interval: 200
player:
determenistic: True
games_num: 4
num_actors: 1
print_stats: True
| 723 | YAML | 18.052631 | 48 | 0.593361 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_ant_bptt
env_name: dflex
actor_learning_rate: 4e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 691 | YAML | 17.702702 | 48 | 0.586107 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [64, 64]
activation: elu
actor_logstd_init: -1.0
config:
name: df_cartpole_swing_up_bptt
env_name: dflex
actor_learning_rate: 1e-2 # adam with linear schedule
lr_schedule: linear # ['constant', 'linear']
betas: [0.7, 0.95] # adam
max_epochs: 500
steps_num: 240
grad_norm: 1.0
truncate_grads: True
num_actors: 64
player:
# render: True
determenistic: True
games_num: 12
num_actors: 4
print_stats: True
| 711 | YAML | 18.777777 | 57 | 0.609001 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/bptt/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam # ['gd', 'adam', 'sgd', 'lbfgs']
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_cheetah_bptt
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 128
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 731 | YAML | 18.783783 | 48 | 0.582763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.