file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVlabs/DiffRL/dflex/tests/rl_swing_up.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = True
name = "cartpole"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
link_width = 0.5
max_depth = depth
# if (True):
# # create a branched tree
# builder.add_articulation()
# test_util.build_tree(builder, angle=0.0, width=link_width, max_depth=max_depth)
# self.ground = False
# # add weight
# if (False):
# radius = 0.5
# X_pj = df.transform((link_width * 2.0, 0.0, 0.0), df.quat_from_axis_angle( (0.0, 0.0, 1.0), 0.0))
# X_cm = df.transform((radius, 0.0, 0.0), df.quat_identity())
# parent = len(builder.body_mass)-1
# link = builder.add_link(parent, X_pj, (0.0, 0.0, 1.0), df.JOINT_REVOLUTE)
# shape = builder.add_shape_sphere(link, pos=(0.0, 0.0, 0.0), radius=radius)
# builder.joint_q[0] = -math.pi*0.45
# cartpole
test_util.urdf_load(builder, "assets/" + self.name + ".urdf", df.transform((0.0, 2.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)), floating=False)
builder.joint_q[1] = -math.pi
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
if self.name == "cartpole":
self.marker_body = 2
self.marker_offset = 1.0
self.discount_scale = 2.0
self.discount_factor = 0.5
if self.name == "cartpole_double":
self.marker_body = 3
self.marker_offset = 0.5
self.discount_scale = 6.0
self.discount_factor = 0.5
# # humanoid
# test_util.urdf_load(
# builder,
# "assets/humanoid.urdf",
# df.transform((0.0, 1.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
# floating=True,
# shape_ke=1.e+3*5.0,
# shape_kd=1.e+3,
# shape_kf=1.e+2,
# shape_mu=0.5)
# # set pd-stiffness
# for i in range(len(builder.joint_target_ke)):
# builder.joint_target_ke[i] = 10.0
# builder.joint_target_kd[i] = 1.0
#builder.joint_q[0] = -math.pi*0.45
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros(self.sim_steps, device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
traj = []
for i in range(0, self.sim_steps):
# df.config.no_grad = True
#df.config.verify_fp = True
self.state.joint_act[0] = self.actions[i]
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
traj.append((X_pole[0], X_pole[1], X_pole[2]))
self.renderer.add_line_strip(traj, (1.0, 1.0, 1.0), self.render_time)
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# reward
if self.sim_time > 2.0:
discount_time = (self.sim_time - 2.0)
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
if self.name == "cartpole":
loss = loss + (torch.pow(self.state.joint_q[1], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[1], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[0], 2.0)*self.cart_position_penalty +
torch.pow(self.state.joint_qd[0], 2.0)*self.cart_velocity_penalty)*discount
if self.name == "cartpole_double":
loss = loss + (torch.pow(self.state.joint_q[1], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[1], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[2], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[2], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[0], 2.0)*self.cart_position_penalty +
torch.pow(self.state.joint_qd[0], 2.0)*self.cart_velocity_penalty)*discount
return loss + torch.dot(self.actions, self.actions)*self.cart_action_penalty
def run(self):
#with torch.no_grad():
l = self.loss()
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
robot.load()
robot.run()
#robot.train(mode='lbfgs')
#df.config.verify_fp = True
#robot.verify(eps=1.e-2)
| 11,631 | Python | 30.523035 | 172 | 0.509414 |
NVlabs/DiffRL/dflex/tests/test_articulation_fk.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 10.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.05 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
x = 0.0
w = 0.5
max_depth = 3
# create a branched tree
builder.add_articulation()
test_util.build_tree(builder, angle=0.0, width=w, max_depth=max_depth)
# add weight
if (True):
radius = 0.1
X_pj = df.transform((w * 2.0, 0.0, 0.0), df.quat_from_axis_angle( (0.0, 0.0, 1.0), 0.0))
X_cm = df.transform((radius, 0.0, 0.0), df.quat_identity())
parent = len(builder.body_mass)-1
link = builder.add_link(parent, X_pj, (0.0, 0.0, 1.0), df.JOINT_REVOLUTE)
shape = builder.add_shape_sphere(link, pos=(0.0, 0.0, 0.0), radius=radius)
self.model = builder.finalize(adapter)
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 100.0
self.model.contact_mu = 0.75
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# base state
self.state = self.model.state()
self.state.joint_q.requires_grad_()
# ik target
self.target = torch.tensor((1.0, 2.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/articulation_fk.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
if (True):
self.state.body_X_sc, self.state.body_X_sm = df.adjoint.launch(df.eval_rigid_fk,
1,
[ # inputs
self.model.articulation_start,
self.model.joint_type,
self.model.joint_parent,
self.model.joint_q_start,
self.model.joint_qd_start,
self.state.joint_q,
self.model.joint_X_pj,
self.model.joint_X_cm,
self.model.joint_axis
],
[ # outputs
self.state.body_X_sc,
self.state.body_X_sm
],
adapter='cpu',
preserve_output=True)
p = self.state.body_X_sm[3][0:3]
err = torch.norm(p - self.target)
# try:
# art_start = self.art.articulation_start.clone()
# art_end = self.art.articulation_end.clone()
# joint_type = self.art.joint_type.clone()
# joint_parent = self.art.joint_parent.clone()
# joint_q_start = self.art.joint_q_start.clone()
# joint_qd_start = self.art.joint_qd_start.clone()
# joint_q = self.art.joint_q.clone()
# joint_X_pj = self.art.joint_X_pj.clone()
# joint_X_cm = self.art.joint_X_cm.clone()
# joint_axis = self.art.joint_axis.clone()
# torch.autograd.gradcheck(df.EvalRigidFowardKinematicsFunc.apply, (
# art_start,
# art_end,
# joint_type,
# joint_parent,
# joint_q_start,
# joint_qd_start,
# joint_q,
# joint_X_pj,
# joint_X_cm,
# joint_axis,
# 'cpu'), eps=1e-3, atol=1e-3, raise_exception=True)
# except Exception as e:
# print("failed: " + str(e))
# render
with df.ScopedTimer("render", False):
if (self.stage):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#self.stage.Save()
self.sim_time += self.sim_dt
return err
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.state.joint_q]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=0.2, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
robot = Robot(adapter='cpu')
#robot.run()
mode = 'lbfgs'
robot.set_target((1.0, 2.0, 0.0), "target_1")
robot.train(mode)
robot.set_target((1.0, -2.0, 0.0), "target_2")
robot.train(mode)
robot.set_target((-1.0, -2.0, 0.0), "target_3")
robot.train(mode)
robot.set_target((-2.0, 2.0, 0.0), "target_4")
robot.train(mode)
#rigid.stage.Save()
| 8,503 | Python | 28.425605 | 141 | 0.49359 |
NVlabs/DiffRL/dflex/tests/test_fem.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class FEM:
sim_duration = 5.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.01 #1.0/(sim_dt*sim_dt)
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/prop.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/mesh"))
points = geom.GetPointsAttr().Get()
tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
tri_indices = geom.GetFaceVertexIndicesAttr().Get()
tri_counts = geom.GetFaceVertexCountsAttr().Get()
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.0))
builder.add_soft_mesh(pos=(0.0, 2.0, 0.0),
rot=r,
scale=1.0,
vel=(1.5, 0.0, 0.0),
vertices=points,
indices=tet_indices,
density=1.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=1.0)
#builder.add_soft_grid(pos=(0.0, 0.5, 0.0), rot=(0.0, 0.0, 0.0, 1.0), vel=(0.0, 0.0, 0.0), dim_x=1, dim_y=2, dim_z=1, cell_x=0.5, cell_y=0.5, cell_z=0.5, density=1.0)
# s = 2.0
# builder.add_particle((0.0, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((s, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, 0.5, s), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, s + 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_tetrahedron(1, 3, 0, 2)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1.0
self.model.contact_kf = 10.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.05
self.model.ground = True
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/fem.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# build sinusoidal input phases
with df.ScopedTimer("inference", False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq * self.sim_time + p * self.phase_step)
# compute activations (rest angles)
self.model.tet_activations = self.network(phases) * self.activation_strength
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd, 0)
#act_loss = torch.norm(selfactivation)*self.activation_penalty
loss = loss - com_loss[0] # - act_loss
return loss
def run(self, profile=False, render=True):
df.config.no_grad = True
with torch.no_grad():
with df.ScopedTimer("run"):
if profile:
cp = cProfile.Profile()
cp.clear()
cp.enable()
# run forward dynamics
if profile:
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
else:
l = self.loss(render)
if profile:
cp.disable()
cp.print_stats(sort='tottime')
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
fem = FEM(adapter='cuda')
fem.run(profile=False, render=True)
#fem.train('lbfgs')
#fem.train('sgd')
| 8,649 | Python | 30.918819 | 174 | 0.513239 |
NVlabs/DiffRL/dflex/tests/test_chain.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Chain:
sim_duration = 10.0 # seconds
sim_substeps = 2
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 20
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
# anchor
builder.add_particle((0.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
for i in range(1, 10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0., 0.0), 1.0)
builder.add_spring(i - 1, i, 1.e+6, 0.0, 0)
self.model = builder.finalize(adapter)
self.model.ground = False
self.impulse = torch.tensor((0.0, 0.0, 0.0), requires_grad=True, device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/chain.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
#self.integrator = df.sim.SemiImplicitIntegrator()
self.integrator = df.sim.XPBDIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
self.state.particle_qd[1] = self.impulse
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
target = torch.tensor((0.0, 2.0, 0.0), device=self.model.adapter)
loss = torch.norm(self.state.particle_q[1] - target)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.impulse
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print(l)
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
chain = Chain(adapter='cpu')
#cloth.train('lbfgs')
chain.run()
| 3,794 | Python | 24.993151 | 136 | 0.552188 |
NVlabs/DiffRL/dflex/tests/test_cloth_collisions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 10.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 4.0
builder = df.sim.ModelBuilder()
# builder.add_particle(pos=(2.5, 3.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
# builder.add_particle(pos=(2.5, 4.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
# builder.add_particle(pos=(2.5, 5.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi / 2),
vel=(0, 5.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
usd = Usd.Stage.Open("assets/box.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Cube/Cube"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
mesh = df.sim.Mesh(points, indices)
rigid = builder.add_rigid_body(pos=(2.5, 3.0, 2.5),
rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0),
vel=(0.0, 0.0, 0.0),
omega=(0.0, 0.0, 0.0))
shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(0.5, 0.5, 0.5), density=100.0, ke=1.e+5, kd=1000.0, kf=1000.0, mu=0.5)
# rigid = builder.add_rigid_body(pos=(2.5, 5.0, 2.5), rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi*0.0), vel=(0.0, 0.0, 0.0), omega=(0.0, 0.0, 0.0))
# shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(0.5, 0.5, 0.5), density=100.0, ke=1.e+5, kd=1000.0, kf=1000.0, mu=0.5)
# attach0 = 1
# attach1 = 21
# attach2 = 423
# attach3 = 443
# anchor0 = builder.add_particle(pos=builder.particle_x[attach0]-(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor1 = builder.add_particle(pos=builder.particle_x[attach1]+(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor2 = builder.add_particle(pos=builder.particle_x[attach2]-(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor3 = builder.add_particle(pos=builder.particle_x[attach3]+(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# builder.add_spring(anchor0, attach0, 500.0, 1000.0, 0)
# builder.add_spring(anchor1, attach1, 10000.0, 1000.0, 0)
# builder.add_spring(anchor2, attach2, 10000.0, 1000.0, 0)
# builder.add_spring(anchor3, attach3, 10000.0, 1000.0, 0)
self.model = builder.finalize(adapter)
# self.model.tri_ke = 10000.0
# self.model.tri_ka = 10000.0
# self.model.tri_kd = 100.0
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 50.0
self.model.tri_drag = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.1
self.model.ground = True
self.model.tri_collisions = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth_collision.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
# cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 8,834 | Python | 34.199203 | 164 | 0.514716 |
NVlabs/DiffRL/dflex/tests/test_cloth_sphere_collisions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 10.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 4.0
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi / 2),
vel=(0, 5.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
usd = Usd.Stage.Open("assets/sphere.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Sphere/Sphere"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
mesh = df.sim.Mesh(points, indices)
rigid = builder.add_rigid_body(pos=(2.5, 1.0, 2.5),
rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0),
vel=(0.0, 0.0, 0.0),
omega=(0.0, 0.0, 0.0))
shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(1 / 100, 1 / 100, 1 / 100), density=0.0, ke=1e3, kd=1e3, kf=1e3, mu=1.0)
self.model = builder.finalize(adapter)
# self.model.tri_ke = 10000.0
# self.model.tri_ka = 10000.0
# self.model.tri_kd = 100.0
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 50.0
self.model.tri_drag = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.1
self.model.ground = False
self.model.tri_collisions = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth_sphere.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
# cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 7,413 | Python | 31.234782 | 154 | 0.507757 |
NVlabs/DiffRL/dflex/tests/test_beam.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Beam:
sim_duration = 3.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 1.0
def __init__(self, device='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
builder.add_soft_grid(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=2,
dim_z=2,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=10.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=5.0,
fix_left=True,
fix_right=True)
self.model = builder.finalize(device)
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.particle_radius = 0.05
self.model.ground = False
self.target = torch.tensor((-0.5)).to(device)
self.material = torch.tensor((100.0, 50.0, 5.0), requires_grad=True, device=device)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/beam.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# clamp material params to reasonable range
mat_min = torch.tensor((1.e+1, 1.e+1, 5.0), device=self.model.adapter)
mat_max = torch.tensor((1.e+5, 1.e+5, 5.0), device=self.model.adapter)
mat_val = torch.max(torch.min(mat_max, self.material), mat_min)
# broadcast stiffness params to all tets
self.model.tet_materials = mat_val.expand((self.model.tet_count, 3)).contiguous()
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_q, 0)
# minimize y
loss = loss - torch.norm(com_loss[1] - self.target)
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [
self.material,
]
def closure():
if optimizer:
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(self.material)
print(self.material.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for param in params:
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.5, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
beam = Beam(device='cpu')
#beam.run()
#beam.train('lbfgs')
beam.train('gd')
| 6,623 | Python | 28.704036 | 141 | 0.495697 |
NVlabs/DiffRL/dflex/tests/test_rigid_bounce.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class RigidBounce:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = True
name = "rigid_bounce"
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
builder.add_articulation()
# add sphere
link = builder.add_link(-1, df.transform((0.0, 0.0, 0.0), df.quat_identity()), (0,0,0), df.JOINT_FREE)
shape = builder.add_shape_sphere(
link,
(0.0, 0.0, 0.0),
df.quat_identity(),
radius=0.1,
ke=1.e+4,
kd=10.0,
kf=1.e+2,
mu=0.25)
builder.joint_q[1] = 1.0
#v_s = df.get_body_twist((0.0, 0.0, 0.0), (1.0, -1.0, 0.0), builder.joint_q[0:3])
w_m = (0.0, 0.0, 3.0) # angular velocity (expressed in world space)
v_m = (0.0, 0.0, 0.0) # linear velocity at center of mass (expressed in world space)
p_m = builder.joint_q[0:3] # position of the center of mass (expressed in world space)
# set body0 twist
builder.joint_qd[0:6] = df.get_body_twist(w_m, v_m, p_m)
# get decomposed velocities
print(df.get_body_angular_velocity(builder.joint_qd[0:6]))
print(df.get_body_linear_velocity(builder.joint_qd[0:6], p_m))
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
# initial velocity
#self.model.joint_qd[3] = 0.5
#self.model.joint_qd[4] = -0.5
#self.model.joint_qd[2] = 1.0
self.model.joint_qd.requires_grad_()
self.target = torch.tensor((1.0, 1.0, 0.0), dtype=torch.float32, device=adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
for i in range(0, self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
#loss = loss + torch.dot(self.state.joint_qd[3:6], self.state.joint_qd[3:6])*self.balance_penalty*discount
pos = self.state.joint_q[0:3]
loss = torch.norm(pos-self.target)
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.model.joint_qd
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.model.joint_qd.grad.tolist()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.model.joint_qd]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.model.joint_qd, "outputs/" + self.name + ".pt")
def load(self):
self.model.joint_qd = torch.load("outputs/" + self.name + ".pt")
#---------
robot = RigidBounce(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.check_grad = True
#df.config.no_grad = True
robot.run()
#df.config.verify_fp = True
#robot.load()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e-3)
| 8,881 | Python | 27.196825 | 118 | 0.516158 |
NVlabs/DiffRL/dflex/tests/test_rigid_slide.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
from pxr import Usd, UsdGeom, Gf
class RigidSlide:
sim_duration = 3.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.1
discount_scale = 1.0
discount_factor = 0.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
# load mesh
usd = Usd.Stage.Open("assets/suzanne.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Suzanne/Suzanne"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
builder = df.sim.ModelBuilder()
mesh = df.sim.Mesh(points, indices)
articulation = builder.add_articulation()
rigid = builder.add_link(
parent=-1,
X_pj=df.transform((0.0, 0.0, 0.0), df.quat_identity()),
axis=(0.0, 0.0, 0.0),
type=df.JOINT_FREE)
ke = 1.e+4
kd = 1.e+3
kf = 1.e+3
mu = 0.5
# shape = builder.add_shape_mesh(
# rigid,
# mesh=mesh,
# scale=(0.2, 0.2, 0.2),
# density=1000.0,
# ke=1.e+4,
# kd=1000.0,
# kf=1000.0,
# mu=0.75)
radius = 0.1
#shape = builder.add_shape_sphere(rigid, pos=(0.0, 0.0, 0.0), ke=ke, kd=kd, kf=kf, mu=mu, radius=radius)
#shape = builder.add_shape_capsule(rigid, pos=(0.0, 0.0, 0.0), radius=radius, half_width=0.5)
shape = builder.add_shape_box(rigid, pos=(0.0, 0.0, 0.0), hx=radius, hy=radius, hz=radius, ke=ke, kd=kd, kf=kf, mu=mu)
builder.joint_q[1] = radius
self.model = builder.finalize(adapter)
self.model.joint_qd.requires_grad = True
self.vel = torch.tensor((1.0, 0.0, 0.0), dtype=torch.float32, device=adapter, requires_grad=True)
self.target = torch.tensor((3.0, 0.2, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/rigid_slide.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#---------------
# run simulation
# construct contacts once at startup
self.model.joint_qd = torch.cat((torch.tensor((0.0, 0.0, 0.0), dtype=torch.float32, device=self.model.adapter), self.vel))
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#com = self.state.joint_q[0:3]
com = self.state.body_X_sm[0, 0:3]
loss = loss + torch.norm(com - self.target)
return loss
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.vel]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
rigid = RigidSlide(adapter='cpu')
#rigid.run()
rigid.train('adam')
| 7,018 | Python | 28.124481 | 141 | 0.526503 |
NVlabs/DiffRL/dflex/tests/test_snu_mlp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class MultiLayerPerceptron(nn.Module):
def __init__(self, n_in, n_out, n_hd, adapter, inference=False):
super(MultiLayerPerceptron,self).__init__()
self.n_in = n_in
self.n_out = n_out
self.n_hd = n_hd
#self.ll = nn.Linear(n_in, n_out)
self.fc1 = nn.Linear(n_in, n_hd).to(adapter)
self.fc2 = nn.Linear(n_hd, n_hd).to(adapter)
self.fc3 = nn.Linear(n_hd, n_out).to(adapter)
self.bn1 = nn.LayerNorm(n_in, elementwise_affine=False).to(adapter)
self.bn2 = nn.LayerNorm(n_hd, elementwise_affine=False).to(adapter)
self.bn3 = nn.LayerNorm(n_out, elementwise_affine=False).to(adapter)
def forward(self, x: torch.Tensor):
x = F.leaky_relu(self.bn2(self.fc1(x)))
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = torch.tanh(self.bn3(self.fc3(x))-2.0)
return x
class HumanoidSNU:
train_iters = 100000000
train_rate = 0.001
train_size = 128
train_batch_size = 4
train_batch_iters = 128
train_batch_count = int(train_size/train_batch_size)
train_data = None
ground = True
name = "humanoid_snu_lower"
regularization = 1.e-3
inference = False
initial_y = 1.0
def __init__(self, depth=1, mode='numpy', render=True, sim_duration=1.0, adapter='cpu', inference=False):
self.sim_duration = sim_duration # seconds
self.sim_substeps = 16
self.sim_dt = (1.0 / 60.0) / self.sim_substeps
self.sim_steps = int(self.sim_duration / self.sim_dt)
self.sim_time = 0.0
torch.manual_seed(41)
np.random.seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL" }
self.ground = False
if self.name == "humanoid_snu_lower":
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.ground = True
self.initial_y = 1.0
if self.name == "humanoid_snu":
self.filter = {}
self.ground = True
self.skeletons = []
self.inference = inference
# if (self.inference):
# self.train_batch_size = 1
for i in range(self.train_batch_size):
skeleton = test_util.Skeleton("assets/snu/arm.xml", "assets/snu/muscle284.xml", builder, self.filter)
# set initial position 1m off the ground
builder.joint_q[skeleton.coord_start + 0] = i*1.5
builder.joint_q[skeleton.coord_start + 1] = self.initial_y
# offset on z-axis
#builder.joint_q[skeleton.coord_start + 2] = 10.0
# initial velcoity
#builder.joint_qd[skeleton.dof_start + 5] = 3.0
self.skeletons.append(skeleton)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
#self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.network = MultiLayerPerceptron(3, len(self.skeletons[0].muscles), 128, adapter)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
# generate training data
targets = []
for i in range(self.train_size):
# generate a random point in -1, 1 away from the head
t = np.random.rand(2)*2.0 - 1.0
t[1] += 0.5
targets.append((t[0], t[1] + 0.5, 1.0))
self.train_data = torch.tensor(targets, dtype=torch.float32, device=self.adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target(torch.tensor((0.75, 0.4, 0.5), dtype=torch.float32, device=self.adapter), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = x
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name, self.render_time)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# apply actions
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# compute activations for each target in the batch
targets = self.train_data[0:self.train_batch_size]
activations = torch.flatten(self.network(targets))
self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# one time collision
self.model.collide(self.state)
for i in range(self.sim_steps):
# apply random actions per-frame
#self.model.muscle_activation = (activations*0.5 + 0.5 + torch.rand_like(activations,dtype=torch.float32, device=self.model.adapter))*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
#if self.inference:
#x = math.cos(self.sim_time*0.5)*0.5
#y = math.sin(self.sim_time*0.5)*0.5
# t = self.sim_time*0.5
# x = math.sin(t)*0.5
# y = math.sin(t)*math.cos(t)*0.5
# self.set_target(torch.tensor((x, y + 0.5, 1.0), dtype=torch.float32, device=self.adapter), "target")
# activations = self.network(self.target)
# self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):#.self.model.muscle_count):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
for i in range(self.train_batch_size):
skel = self.skeletons[i]
head_pos = self.state.body_X_sc[skel.node_map["Head"]][0:3]
head_forward = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], up_dir)
target_dir = self.train_data[i] - head_pos
loss_forward = torch.dot(head_forward, target_dir)*self.target_penalty
loss_up = torch.dot(head_up, up_dir)*self.target_penalty*0.5
loss_penalty = torch.dot(activations, activations)*self.action_penalty
loss = loss - loss_forward - loss_up + loss_penalty
#self.writer.add_scalar("loss_forward", loss_forward.item(), self.step_count)
#self.writer.add_scalar("loss_up", loss_up.item(), self.step_count)
#self.writer.add_scalar("loss_penalty", loss_penalty.item(), self.step_count)
return loss
def run(self):
df.config.no_grad = True
self.inference = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
self.writer = SummaryWriter()
self.writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.best_loss = math.inf
optimizer = None
scheduler = None
params = self.network.parameters()#[self.activations]
def closure():
batch = int(self.step_count/self.train_batch_iters)%self.train_batch_count
print("Batch: " + str(batch) + " Iter: " + str(self.step_count%self.train_batch_iters))
if (optimizer):
optimizer.zero_grad()
# compute loss on all examples
with df.ScopedTimer("forward"):#, detailed=True):
l = self.loss()
# compute gradient
with df.ScopedTimer("backward"):#, detailed=True):
l.backward()
# batch stats
self.writer.add_scalar("loss_batch", l.item(), self.step_count)
self.writer.flush()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
self.stage.Save()
except:
print("USD save error")
# save network
if (l < self.best_loss):
self.save()
self.best_loss = l
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
last_LR = 1e-5
init_LR = 1e-3
decay_LR_steps = 2000
gamma = math.exp(math.log(last_LR/init_LR)/decay_LR_steps)
optimizer = torch.optim.Adam(params, lr=self.train_rate, weight_decay=1e-5)
#scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = gamma)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
if optimizer:
optimizer.step(closure)
if scheduler:
scheduler.step()
# final save
try:
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self, suffix=""):
self.network = torch.load("outputs/" + self.name + suffix + ".pt")
if self.inference:
self.network.eval()
else:
self.network.train()
#---------
#env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda')
#env.train(mode='adam')
env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda', inference=True)
#env.load()
env.run()
| 17,357 | Python | 32.445087 | 235 | 0.526358 |
NVlabs/DiffRL/dflex/tests/test_allegro.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "allegro"
regularization = 1.e-3
env_count = 1
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# allegro
for i in range(self.env_count):
test_util.urdf_load(
builder,
#"assets/franka_description/robots/franka_panda.urdf",
"assets/allegro_hand_description/allegro_hand_description_right.urdf",
df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi*0.5)),
floating=False,
limit_ke=0.0,#1.e+3,
limit_kd=0.0)#1.e+2)
# set fingers to mid-range of their limits
for i in range(len(builder.joint_q_start)):
if (builder.joint_type[i] == df.JOINT_REVOLUTE):
dof = builder.joint_q_start[i]
mid = (builder.joint_limit_lower[dof] + builder.joint_limit_upper[dof])*0.5
builder.joint_q[dof] = mid
builder.joint_target[dof] = mid
builder.joint_target_kd[i] = 0.02
builder.joint_target_ke[i] = 1.0
solid = False
# create FEM block
if (solid):
builder.add_soft_grid(
pos=(-0.05, 0.2, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
vel=(0.0, 0.0, 0.0),
dim_x=10,
dim_y=5,
dim_z=5,
cell_x=0.01,
cell_y=0.01,
cell_z=0.01,
density=1000.0,
k_mu=500.0,
k_lambda=1000.0,
k_damp=1.0)
else:
builder.add_cloth_grid(
pos=(-0.1, 0.2, -0.1),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi*0.5),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=20,
cell_x=0.01,
cell_y=0.01,
mass=0.0125)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.contact_ke = 1.e+3
self.model.contact_kd = 2.0
self.model.contact_kf = 0.1
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
if (solid):
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
else:
self.model.tri_ke = 100.0
self.model.tri_ka = 100.0
self.model.tri_kd = 1.0
self.model.tri_kb = 0.0
self.model.edge_ke = 0.01
self.model.edge_kd = 0.001
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
robot.run()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 10,608 | Python | 27.986339 | 165 | 0.488028 |
NVlabs/DiffRL/dflex/tests/test_adjoint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
| 626 | Python | 25.124999 | 82 | 0.785942 |
NVlabs/DiffRL/dflex/tests/assets/humanoid.xml | <!-- ======================================================
This file is part of MuJoCo.
Copyright 2009-2015 Roboti LLC.
Model :: Humanoid
Mujoco :: Advanced physics simulation engine
Source : www.roboti.us
Version : 1.31
Released : 23Apr16
Author :: Vikash Kumar
Contacts : [email protected]
Last edits : 30Apr'16, 30Nov'15, 26Sept'15
====================================================== -->
<mujoco model='humanoid (v1.31)'>
<compiler inertiafromgeom='true' angle='degree'/>
<default>
<joint limited='true' damping='1' armature='0' />
<geom contype='1' conaffinity='1' condim='1' rgba='0.8 0.6 .4 1'
margin="0.001" solref=".02 1" solimp=".8 .8 .01" material="geom"/>
<motor ctrlrange='-.4 .4' ctrllimited='true'/>
</default>
<option timestep='0.002' iterations="50" solver="PGS">
<flag energy="enable"/>
</option>
<size nkey='5'/>
<visual>
<map fogstart="3" fogend="5" force="0.1"/>
<quality shadowsize="2048"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1=".4 .6 .8"
rgb2="0 0 0"/>
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278"
rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
<texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2"
width="100" height="100"/>
<material name='MatPlane' reflectance='0.5' texture="texplane" texrepeat="1 1" texuniform="true"/>
<material name='geom' texture="texgeom" texuniform="true"/>
</asset>
<worldbody>
<geom name='floor' pos='0 0 0' size='10 10 0.125' type='plane' material="MatPlane" condim='3'/>
<body name='torso' pos='0 0 1.4'>
<light mode='trackcom' directional='false' diffuse='.8 .8 .8' specular='0.3 0.3 0.3' pos='0 0 4.0' dir='0 0 -1'/>
<joint name='root' type='free' pos='0 0 0' limited='false' damping='0' armature='0' stiffness='0'/>
<geom name='torso1' type='capsule' fromto='0 -.07 0 0 .07 0' size='0.07' />
<geom name='head' type='sphere' pos='0 0 .19' size='.09'/>
<geom name='uwaist' type='capsule' fromto='-.01 -.06 -.12 -.01 .06 -.12' size='0.06'/>
<body name='lwaist' pos='-.01 0 -0.260' quat='1.000 0 -0.002 0' >
<geom name='lwaist' type='capsule' fromto='0 -.06 0 0 .06 0' size='0.06' />
<joint name='abdomen_z' type='hinge' pos='0 0 0.065' axis='0 0 1' range='-45 45' damping='5' stiffness='20' armature='0.02' />
<joint name='abdomen_y' type='hinge' pos='0 0 0.065' axis='0 1 0' range='-75 30' damping='5' stiffness='10' armature='0.02' />
<body name='pelvis' pos='0 0 -0.165' quat='1.000 0 -0.002 0' >
<joint name='abdomen_x' type='hinge' pos='0 0 0.1' axis='1 0 0' range='-35 35' damping='5' stiffness='10' armature='0.02' />
<geom name='butt' type='capsule' fromto='-.02 -.07 0 -.02 .07 0' size='0.09' />
<body name='right_thigh' pos='0 -0.1 -0.04' >
<joint name='right_hip_x' type='hinge' pos='0 0 0' axis='1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_z' type='hinge' pos='0 0 0' axis='0 0 1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='right_thigh1' type='capsule' fromto='0 0 0 0 0.01 -.34' size='0.06' />
<body name='right_shin' pos='0 0.01 -0.403' >
<joint name='right_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='right_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='right_foot' pos='0 0 -.39' >
<joint name='right_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='right_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='right_foot_cap1' type='capsule' fromto='-.07 -0.02 0 0.14 -0.04 0' size='0.027' />
<geom name='right_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 0.02 0' size='0.027' />
</body>
</body>
</body>
<body name='left_thigh' pos='0 0.1 -0.04' >
<joint name='left_hip_x' type='hinge' pos='0 0 0' axis='-1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_z' type='hinge' pos='0 0 0' axis='0 0 -1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='left_thigh1' type='capsule' fromto='0 0 0 0 -0.01 -.34' size='0.06' />
<body name='left_shin' pos='0 -0.01 -0.403' >
<joint name='left_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='left_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='left_foot' pos='0 0 -.39' >
<joint name='left_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='left_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='left_foot_cap1' type='capsule' fromto='-.07 0.02 0 0.14 0.04 0' size='0.027' />
<geom name='left_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 -0.02 0' size='0.027' />
</body>
</body>
</body>
</body>
</body>
<body name='right_upper_arm' pos='0 -0.17 0.06' >
<joint name='right_shoulder1' type='hinge' pos='0 0 0' axis='2 1 1' range='-85 60' stiffness='1' armature='0.0068' />
<joint name='right_shoulder2' type='hinge' pos='0 0 0' axis='0 -1 1' range='-85 60' stiffness='1' armature='0.0051' />
<geom name='right_uarm1' type='capsule' fromto='0 0 0 .16 -.16 -.16' size='0.04 0.16' />
<body name='right_lower_arm' pos='.18 -.18 -.18' >
<joint name='right_elbow' type='hinge' pos='0 0 0' axis='0 -1 1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='right_larm' type='capsule' fromto='0.01 0.01 0.01 .17 .17 .17' size='0.031' />
<geom name='right_hand' type='sphere' pos='.18 .18 .18' size='0.04'/>
</body>
</body>
<body name='left_upper_arm' pos='0 0.17 0.06' >
<joint name='left_shoulder1' type='hinge' pos='0 0 0' axis='2 -1 1' range='-60 85' stiffness='1' armature='0.0068' />
<joint name='left_shoulder2' type='hinge' pos='0 0 0' axis='0 1 1' range='-60 85' stiffness='1' armature='0.0051' />
<geom name='left_uarm1' type='capsule' fromto='0 0 0 .16 .16 -.16' size='0.04 0.16' />
<body name='left_lower_arm' pos='.18 .18 -.18' >
<joint name='left_elbow' type='hinge' pos='0 0 0' axis='0 -1 -1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='left_larm' type='capsule' fromto='0.01 -0.01 0.01 .17 -.17 .17' size='0.031' />
<geom name='left_hand' type='sphere' pos='.18 -.18 .18' size='0.04'/>
</body>
</body>
</body>
</worldbody>
<tendon>
<fixed name='left_hipknee'>
<joint joint='left_hip_y' coef='-1'/>
<joint joint='left_knee' coef='1'/>
</fixed>
<fixed name='right_hipknee'>
<joint joint='right_hip_y' coef='-1'/>
<joint joint='right_knee' coef='1'/>
</fixed>
</tendon>
<keyframe>
<key qpos='-0.0233227 0.00247283 0.0784829 0.728141 0.00223397 -0.685422 -0.00181805 -0.000580139 -0.245119 0.0329713 -0.0461148 0.0354257 0.252234 -0.0347763 -0.4663 -0.0313013 0.0285638 0.0147285 0.264063 -0.0346441 -0.559198 0.021724 -0.0333332 -0.718563 0.872778 0.000260393 0.733088 0.872748' />
<key qpos='0.0168601 -0.00192002 0.127167 0.762693 0.00191588 0.646754 -0.00210291 -0.000199049 0.0573113 -4.05731e-005 0.0134177 -0.00468944 0.0985945 -0.282695 -0.0469067 0.00874203 0.0263262 -0.00295056 0.0984851 -0.282098 -0.044293 0.00475795 0.127371 -0.42895 0.882402 -0.0980573 0.428506 0.88193' />
<key qpos='0.000471586 0.0317577 0.210587 0.758805 -0.583984 0.254155 0.136322 -0.0811633 0.0870309 -0.0935227 0.0904958 -0.0278004 -0.00978614 -0.359193 0.139761 -0.240168 0.060149 0.237062 -0.00622109 -0.252598 -0.00376874 -0.160597 0.25253 -0.278634 0.834376 -0.990444 -0.169065 0.652876' />
<key qpos='-0.0602175 0.048078 0.194579 -0.377418 -0.119412 -0.675073 -0.622553 0.139093 0.0710746 -0.0506027 0.0863461 0.196165 -0.0276685 -0.521954 -0.267784 0.179051 0.0371897 0.0560134 -0.032595 -0.0480022 0.0357436 0.108502 0.963806 0.157805 0.873092 -1.01145 -0.796409 0.24736' />
</keyframe>
<actuator>
<motor name='abdomen_y' gear='200' joint='abdomen_y' />
<motor name='abdomen_z' gear='200' joint='abdomen_z' />
<motor name='abdomen_x' gear='200' joint='abdomen_x' />
<motor name='right_hip_x' gear='200' joint='right_hip_x' />
<motor name='right_hip_z' gear='200' joint='right_hip_z' />
<motor name='right_hip_y' gear='600' joint='right_hip_y' />
<motor name='right_knee' gear='400' joint='right_knee' />
<motor name='right_ankle_x' gear='100' joint='right_ankle_x' />
<motor name='right_ankle_y' gear='100' joint='right_ankle_y' />
<motor name='left_hip_x' gear='200' joint='left_hip_x' />
<motor name='left_hip_z' gear='200' joint='left_hip_z' />
<motor name='left_hip_y' gear='600' joint='left_hip_y' />
<motor name='left_knee' gear='400' joint='left_knee' />
<motor name='left_ankle_x' gear='100' joint='left_ankle_x' />
<motor name='left_ankle_y' gear='100' joint='left_ankle_y' />
<motor name='right_shoulder1' gear='100' joint='right_shoulder1' />
<motor name='right_shoulder2' gear='100' joint='right_shoulder2' />
<motor name='right_elbow' gear='200' joint='right_elbow' />
<motor name='left_shoulder1' gear='100' joint='left_shoulder1' />
<motor name='left_shoulder2' gear='100' joint='left_shoulder2' />
<motor name='left_elbow' gear='200' joint='left_elbow' />
</actuator>
</mujoco>
| 11,517 | XML | 68.385542 | 314 | 0.528784 |
NVlabs/DiffRL/dflex/tests/assets/ant.xml | <mujoco model="ant">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.001" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<worldbody>
<body name="torso" pos="0 0 0.75">
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="150"/>
</actuator>
</mujoco> | 4,043 | XML | 61.215384 | 125 | 0.550829 |
NVlabs/DiffRL/dflex/docs/index.rst | Welcome to dFlex's documentation!
==================================
dFlex is a differentiable multiphysics engine for PyTorch. It is written entirely in Python and supports reverse mode differentiation w.r.t. to any simulation inputs.
It includes a USD-based visualization module (:class:`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly.
Prerequisites
-------------
* Python 3.6
* PyTorch 1.4.0 or higher
* Pixar USD lib (for visualization)
Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable.
.. toctree::
:maxdepth: 3
:caption: Contents:
modules/model
modules/sim
modules/render
Quick Start
-----------------
First ensure that the package is installed in your local Python environment (use the -e option if you will be doing development):
.. code-block::
pip install -e dflex
Then, to use the engine you can import the simulation module as follows:
.. code-block::
import dflex
To build physical models there is a helper class available in :class:`dflex.model.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles:
.. code-block::
builder = dflex.model.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1,10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
# add ground plane
builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0)
Once you have built your model you must convert it to a finalized PyTorch simulation data structure using :func:`dflex.model.ModelBuilder.finalize()`:
.. code-block::
model = builder.finalize('cpu')
The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state.
Time Stepping
-------------
To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the :class:`dflex.sim.SemiImplicitIntegrator` class as follows:
.. code-block::
sim_dt = 1.0/60.0
sim_steps = 100
integrator = dflex.sim.SemiImplicitIntegrator()
for i in range(0, sim_steps):
state = integrator.forward(model, state, sim_dt)
Rendering
---------
To visualize the scene dFlex supports a USD-based update via. the :class:`dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model.
.. code-block::
import dflex.render
stage = Usd.Stage.CreateNew("test.usda")
renderer = dflex.render.UsdRenderer(model, stage)
renderer.draw_points = True
renderer.draw_springs = True
renderer.draw_shapes = True
Each frame the renderer should be updated with the current model state and the current elapsed simulation time:
.. code-block::
renderer.update(state, sim_time)
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 3,311 | reStructuredText | 27.8 | 228 | 0.700393 |
NVlabs/DiffRL/dflex/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../dflex'))
# -- Project information -----------------------------------------------------
project = 'dFlex'
copyright = '2020, NVIDIA'
author = 'NVIDIA'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
'sphinx.ext.todo',
'autodocsumm'
]
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = 'description'
# document class *and* __init__ methods
autoclass_content = 'both' #
todo_include_todos = True
intersphinx_mapping = {
'python': ("https://docs.python.org/3", None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'PyTorch': ('http://pytorch.org/docs/master/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 2,515 | Python | 32.105263 | 81 | 0.659245 |
NVlabs/DiffRL/dflex/docs/modules/sim.rst | dflex.sim
===========
.. currentmodule:: dflex.sim
.. toctree::
:maxdepth: 2
.. automodule:: dflex.sim
:members:
:undoc-members:
:show-inheritance:
| 171 | reStructuredText | 12.230768 | 28 | 0.567251 |
NVlabs/DiffRL/dflex/docs/modules/model.rst | dflex.model
===========
.. currentmodule:: dflex.model
.. toctree::
:maxdepth: 2
model.modelbuilder
model.model
model.state
| 151 | reStructuredText | 10.692307 | 30 | 0.569536 |
NVlabs/DiffRL/dflex/docs/modules/model.model.rst | dflex.model.Model
========================
.. autoclasssumm:: dflex.model.Model
.. autoclass:: dflex.model.Model
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
NVlabs/DiffRL/dflex/docs/modules/render.rst | dflex.render
============
.. currentmodule:: dflex.render
.. toctree::
:maxdepth: 2
.. automodule:: dflex.render
:members:
:undoc-members:
:show-inheritance:
| 178 | reStructuredText | 11.785713 | 31 | 0.595506 |
NVlabs/DiffRL/dflex/docs/modules/model.state.rst | dflex.model.State
========================
.. autoclasssumm:: dflex.model.State
.. autoclass:: dflex.model.State
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
NVlabs/DiffRL/dflex/docs/modules/model.modelbuilder.rst | dflex.model.ModelBuilder
========================
.. autoclasssumm:: dflex.model.ModelBuilder
.. autoclass:: dflex.model.ModelBuilder
:members:
:undoc-members:
:show-inheritance:
| 194 | reStructuredText | 16.727271 | 43 | 0.628866 |
NVlabs/DiffRL/utils/common.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
# if there's overlap between args_list and commandline input, use commandline input
def solve_argv_conflict(args_list):
arguments_to_be_removed = []
arguments_size = []
for argv in sys.argv[1:]:
if argv.startswith('-'):
size_count = 1
for i, args in enumerate(args_list):
if args == argv:
arguments_to_be_removed.append(args)
for more_args in args_list[i+1:]:
if not more_args.startswith('-'):
size_count += 1
else:
break
arguments_size.append(size_count)
break
for args, size in zip(arguments_to_be_removed, arguments_size):
args_index = args_list.index(args)
for _ in range(size):
args_list.pop(args_index)
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
raise RuntimeError
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
from datetime import datetime
def get_time_stamp():
now = datetime.now()
year = now.strftime('%Y')
month = now.strftime('%m')
day = now.strftime('%d')
hour = now.strftime('%H')
minute = now.strftime('%M')
second = now.strftime('%S')
return '{}-{}-{}-{}-{}-{}'.format(month, day, year, hour, minute, second)
import argparse
def parse_model_args(model_args_path):
fp = open(model_args_path, 'r')
model_args = eval(fp.read())
model_args = argparse.Namespace(**model_args)
return model_args
import torch
import numpy as np
import random
import os
def seeding(seed=0, torch_deterministic=False):
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed | 2,965 | Python | 31.23913 | 91 | 0.629005 |
NVlabs/DiffRL/utils/torch_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
# torch quat/vector utils
def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False):
return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([x, y, z, w], dim=-1).view(shape)
return quat
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, :3]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
@torch.jit.script
def quat_axis(q, axis=0):
# type: (Tensor, int) -> Tensor
basis_vec = torch.zeros(q.shape[0], 3, device=q.device)
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def tf_inverse(q, t):
q_inv = quat_conjugate(q)
return q_inv, -quat_apply(q_inv, t)
@torch.jit.script
def tf_apply(q, t, v):
return quat_apply(q, v) + t
@torch.jit.script
def tf_vector(q, v):
return quat_apply(q, v)
@torch.jit.script
def tf_combine(q1, t1, q2, t2):
return quat_mul(q1, q2), quat_apply(q1, t2) + t1
@torch.jit.script
def get_basis_vector(q, v):
return quat_rotate(q, v)
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
def grad_norm(params):
grad_norm = 0.
for p in params:
if p.grad is not None:
grad_norm += torch.sum(p.grad ** 2)
return torch.sqrt(grad_norm)
def print_leaf_nodes(grad_fn, id_set):
if grad_fn is None:
return
if hasattr(grad_fn, 'variable'):
mem_id = id(grad_fn.variable)
if not(mem_id in id_set):
print('is leaf:', grad_fn.variable.is_leaf)
print(grad_fn.variable)
id_set.add(mem_id)
# print(grad_fn)
for i in range(len(grad_fn.next_functions)):
print_leaf_nodes(grad_fn.next_functions[i][0], id_set)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
return kl.mean() | 6,536 | Python | 27.176724 | 114 | 0.568696 |
NVlabs/DiffRL/utils/average_meter.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import numpy as np
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy() | 1,368 | Python | 34.102563 | 82 | 0.65424 |
NVlabs/DiffRL/utils/load_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import torch
import random
import xml.etree.ElementTree as ET
import dflex as df
def set_np_formatting():
np.set_printoptions(edgeitems=30, infstr='inf',
linewidth=4000, nanstr='nan', precision=2,
suppress=False, threshold=10000, formatter=None)
def set_seed(seed, torch_deterministic=False):
if seed == -1 and torch_deterministic:
seed = 42
elif seed == -1:
seed = np.random.randint(0, 10000)
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+4,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=1.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# Mujoco file format parser
def parse_mjcf(
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=1e4,
contact_kd=1e4,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0,
armature=0.01,
radians=False,
load_stiffness=False,
load_armature=False):
file = ET.parse(filename)
root = file.getroot()
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_bool(node, key, default):
if key in node.attrib:
if node.attrib[key] == "true":
return True
else:
return False
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent, last_joint_pos):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
# last_joint_pos = np.zeros(3)
#-----------------
# add body for each joint, we assume the joints attached to one body have the same joint_pos
for i, joint in enumerate(body.findall("joint")):
joint_name = joint.attrib["name"]
joint_type = type_map[joint.attrib.get("type", 'hinge')]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_limited = parse_bool(joint, "limited", True)
if joint_limited:
if radians:
joint_range = parse_vec(joint, "range", (np.deg2rad(-170.), np.deg2rad(170.)))
else:
joint_range = np.deg2rad(parse_vec(joint, "range", (-170.0, 170.0)))
else:
joint_range = np.array([-1.e+6, 1.e+6])
if load_stiffness:
joint_stiffness = parse_float(joint, 'stiffness', stiffness)
else:
joint_stiffness = stiffness
joint_damping = parse_float(joint, 'damping', damping)
if load_armature:
joint_armature = parse_float(joint, "armature", armature)
else:
joint_armature = armature
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
#-----------------
# add body
link = builder.add_link(
parent,
X_pj=df.transform(body_pos + joint_pos - last_joint_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=joint_range[0],
limit_upper=joint_range[1],
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
# assume that each joint is one body in simulation
parent = link
body_pos = [0.0, 0.0, 0.0]
last_joint_pos = joint_pos
#-----------------
# add shapes to the last joint in the body
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos - last_joint_pos, # position relative to the parent frame
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
if ("axisangle" in geom.attrib):
axis_angle = parse_vec(geom, "axisangle", (0.0, 1.0, 0.0, 0.0))
geom_rot = df.quat_from_axis_angle(axis_angle[0:3], axis_angle[3])
if ("quat" in geom.attrib):
q = parse_vec(geom, "quat", df.quat_identity())
geom_rot = q
geom_rot = df.quat_multiply(geom_rot, df.quat_from_axis_angle((0.0, 1.0, 0.0), -math.pi*0.5))
builder.add_shape_capsule(
link,
pos=geom_pos - last_joint_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link, last_joint_pos)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1, np.zeros(3))
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
self.muscle_strength = 0.0
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder,
filter={},
visualize_shapes=True,
stiffness=5.0,
damping=2.0,
contact_ke=5000.0,
contact_kd=2000.0,
contact_kf=1000.0,
contact_mu=0.5,
limit_ke=1000.0,
limit_kd=10.0,
armature = 0.05):
self.armature = armature
self.stiffness = stiffness
self.damping = damping
self.contact_ke = contact_ke
self.contact_kd = contact_kd
self.contact_kf = contact_kf
self.limit_ke = limit_ke
self.limit_kd = limit_kd
self.contact_mu = contact_mu
self.visualize_shapes = visualize_shapes
self.parse_skeleton(skeleton_file, builder, filter)
if muscle_file != None:
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = float(body.attrib["mass"])
x=body_size[0]
y=body_size[1]
z=body_size[2]
density = body_mass / (x*y*z)
max_body_mass = 15.0
mass_scale = body_mass / max_body_mass
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = -1.e+3
joint_upper = 1.e+3
if (joint_type == type_map["Revolute"]):
if ("lower" in joint.attrib):
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")[0]
if ("upper" in joint.attrib):
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")[0]
# print(joint_type, joint_lower, joint_upper)
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
limit_lower=joint_lower,
limit_upper=joint_upper,
limit_ke=self.limit_ke * mass_scale,
limit_kd=self.limit_kd * mass_scale,
damping=self.damping,
stiffness=self.stiffness * math.sqrt(mass_scale),
armature=self.armature)
# armature=self.armature * math.sqrt(mass_scale))
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=x*0.5,
hy=y*0.5,
hz=z*0.5,
density=density,
ke=self.contact_ke,
kd=self.contact_kd,
kf=self.contact_kf,
mu=self.contact_mu)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
m.muscle_strength = unit_f0
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 22,759 | Python | 30.523546 | 130 | 0.482622 |
NVlabs/DiffRL/utils/dataset.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
class CriticDataset:
def __init__(self, batch_size, obs, target_values, shuffle = False, drop_last = False):
self.obs = obs.view(-1, obs.shape[-1])
self.target_values = target_values.view(-1)
self.batch_size = batch_size
if shuffle:
self.shuffle()
if drop_last:
self.length = self.obs.shape[0] // self.batch_size
else:
self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1
def shuffle(self):
index = np.random.permutation(self.obs.shape[0])
self.obs = self.obs[index, :]
self.target_values = self.target_values[index]
def __len__(self):
return self.length
def __getitem__(self, index):
start_idx = index * self.batch_size
end_idx = min((index + 1) * self.batch_size, self.obs.shape[0])
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
| 1,420 | Python | 37.405404 | 110 | 0.645775 |
NVlabs/DiffRL/utils/time_report.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import time
from utils.common import *
class Timer:
def __init__(self, name):
self.name = name
self.start_time = None
self.time_total = 0.
def on(self):
assert self.start_time is None, "Timer {} is already turned on!".format(self.name)
self.start_time = time.time()
def off(self):
assert self.start_time is not None, "Timer {} not started yet!".format(self.name)
self.time_total += time.time() - self.start_time
self.start_time = None
def report(self):
print_info('Time report [{}]: {:.2f} seconds'.format(self.name, self.time_total))
def clear(self):
self.start_time = None
self.time_total = 0.
class TimeReport:
def __init__(self):
self.timers = {}
def add_timer(self, name):
assert name not in self.timers, "Timer {} already exists!".format(name)
self.timers[name] = Timer(name = name)
def start_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].on()
def end_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].off()
def report(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
else:
print_info("------------Time Report------------")
for timer_name in self.timers.keys():
self.timers[timer_name].report()
print_info("-----------------------------------")
def clear_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].clear()
else:
for timer_name in self.timers.keys():
self.timers[timer_name].clear()
def pop_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
del self.timers[name]
else:
self.report()
self.timers = {}
| 2,688 | Python | 34.853333 | 90 | 0.58631 |
NVlabs/DiffRL/utils/running_mean_std.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Tuple
import torch
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = (), device = 'cuda:0'):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = torch.zeros(shape, dtype = torch.float32, device = device)
self.var = torch.ones(shape, dtype = torch.float32, device = device)
self.count = epsilon
def to(self, device):
rms = RunningMeanStd(device = device)
rms.mean = self.mean.to(device).clone()
rms.var = self.var.to(device).clone()
rms.count = self.count
return rms
@torch.no_grad()
def update(self, arr: torch.tensor) -> None:
batch_mean = torch.mean(arr, dim = 0)
batch_var = torch.var(arr, dim = 0, unbiased = False)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: torch.tensor, batch_var: torch.tensor, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + torch.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def normalize(self, arr:torch.tensor, un_norm = False) -> torch.tensor:
if not un_norm:
result = (arr - self.mean) / torch.sqrt(self.var + 1e-5)
else:
result = arr * torch.sqrt(self.var + 1e-5) + self.mean
return result | 2,462 | Python | 40.745762 | 111 | 0.638099 |
NVlabs/fast-explicit-teleop/README.md | # Fast Explicit-Input Assistance for Teleoperation in Clutter
This repository contains the research code for [Fast Explicit-Input Assistance for Teleoperation in Clutter](https://arxiv.org/abs/2402.02612).
The performance of prediction-based assistance for robot teleoperation degrades in unseen or goal-rich environments due to incorrect or quickly-changing intent inferences.
Poor predictions can confuse operators or cause them to change their control input to implicitly signal their goal, resulting in unnatural movement. We present a new assistance algorithm and interface for robotic manipulation where an operator can explicitly communicate a manipulation goal by pointing the end-effector. Rapid optimization and parallel collision checking in a local region around the pointing target enable direct, interactive control over grasp and place pose candidates.
This codebase enables running the explicit and implicit assistance conditions on a simulated environment in Isaac Sim, as used in the experiments. It has a dependency on the [spacemouse extension](https://github.com/NVlabs/spacemouse-extension), which is the device used for teleoperating the robot. Some of the tools and utilities might be helpful as a guidance in developing your own simulation environments and teleoperation interfaces.
# srl.teleop
The codebase is structured as an Isaac Sim Extension. It is currently supported on Isaac Sim 2022.2.1.
## Installation
Clone into `~/Documents/Kit/apps/Isaac-Sim/exts`, and ensure the folder is titled `srl.teleop`.
You could also clone the Extension to a different directory and add it to the list of extensions paths in Isaac Sim. The one above is just used by default.
OpenSCAD is required for trimesh boolean operations (used in collision checking):
sudo apt-get install openscad
### SpaceMouse Setup
Clone the [SpaceMouse extension](https://github.com/NVlabs/spacemouse-extension) and carefully follow the setup instructions to install it.
Currently, the assistance extension won't function without the SpaceMouse extension.
<!-- #### 2022.2.0:
Cortex doesn't declare a module, which seems to prevent imports from other extensions. Add the following to its `extension.toml`:
[[python.module]]
name = "omni.isaac" -->
## Usage
* Run Isaac Sim from the OV Launcher. Activate the extension in the `Window > Extensions` menu by searching for `SRL` and toggling the extension on. Set it to autoload if you don't want to have to enable it manually every launch.
* Click on the new `Teleop Assistance` pane that appeared near the `Stage` pane (right side menus).
* Click `Load World`.
* Open the `SpaceMouse` pane, select your device, and click the `Engage` checkbox.
SpaceMouse input moves the target, visualized with a small axis marker, that the robot tries to move towards. Suggestions will appear, indicated by a ghost gripper, you can hold the `Pull` button to have the target slowly moved to match the target. When you have an object in your gripper, you will see a ghost version of the held object floating along planes in the scene. You can move the robot around as normal and the ghost will move in tandem. You can move the robot as normal until you're happy with where the marker is, then use `Pull` to have the object plopped down into the plane.
| Function | SpaceMouse | SpaceMouse Pro |
|--------------------|------------|----------------|
| Gripper open/close | Left click | Ctrl |
| Pull | Right hold | Alt |
| Home | - | Menu |
| Left View | - | F |
| Right View | - | R |
| Top View | - | T |
| Free View | - | Clockwise (Center on right pad) |
| Rotate View | - | Roll (Top-left on right pad) |
### Recording a Demonstration
Under the Data Logging panel of the extension, enter the operator's name, then click "Start" to begin collecting a demonstration. Press pause when finished and click "Save Data" to store the information into a JSON file at the "Output Directory".
## Development
Run `source ${ISAAC_SIM_ROOT}/setup_python_env.sh` in a shell, then run `code .` in the repository. The included `.vscode` config is based on the one distributed with Isaac Sim.
Omniverse will monitor the Python source files making up the extension and automatically "hot reload" the extension when you save changes.
This repo tracks a VS Code configuration for connecting the debugger to the Python environment while Isaac Sim is running. Enabling the debugger by enabling its host extension brings a performance penalty (even if it isn't connected), so be sure to disable it before judging frame rates.
**Note: You must start with a fresh Omniverse stage every time you open the plugin. Use `File > New From Stage Template > Empty` to clear the stage. Then you can `Load World` and proceed.**
# Contributions
Some parts of this codebase reuse and modify the "Isaac Sim Examples" plugin and Cortex from NVIDIA Isaac Sim.
# Citation
If you find this work useful, please star or fork this repository and cite the following paper:
```
@misc{walker2024fast,
title={Fast Explicit-Input Assistance for Teleoperation in Clutter},
author={Nick Walker and Xuning Yang and Animesh Garg and Maya Cakmak and Dieter Fox and Claudia P\'{e}rez-D'Arpino},
year={2024},
eprint={2402.02612},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
```
| 5,545 | Markdown | 60.622222 | 590 | 0.732372 |
NVlabs/fast-explicit-teleop/PACKAGE-LICENSES/omni.isaac.examples-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
NVlabs/fast-explicit-teleop/scripts/render_demo.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import argparse
import os
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to hdf5 file')
args = parser.parse_args()
selection_path = Path(args.input_file)
# selection path but without file extension
prefix = ""
out_path = prefix + str(selection_path.parent) + "/" + str(selection_path.stem)
if os.path.exists(out_path + "/operator_view.mp4"):
print("Already rendered, skipping")
exit()
if "lifting" in out_path or "reaching" in out_path:
print("Skipping warm up")
exit()
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
from omni.isaac.core.utils.extensions import enable_extension
# Enable the layers and stage windows in the UI
enable_extension("srl.teleop")
import atexit
def exit_handler():
simulation_app.close()
atexit.register(exit_handler)
import numpy as np
import h5py
import asyncio
import os
from omni.isaac.core.world import World
from srl.teleop.analysis.playback import Playback
import math
import subprocess
import os
import argparse
from tqdm import tqdm
import shlex
def get_hdf5_files(dir_path):
hdf5_files = []
for dirpath, dirnames, filenames in os.walk(dir_path):
for filename in filenames:
if filename.endswith('.hdf5'):
hdf5_files.append(os.path.join(dirpath, filename))
return hdf5_files
def main(input_path):
selection_path = Path(input_path)
# selection path but without file extension
prefix = ""
out_path = prefix + str(selection_path.parent) + "/" + str(selection_path.stem)
if os.path.exists(out_path + "/operator_view.mp4"):
print("Already rendered, skipping")
return
if "lifting" in out_path or "reaching" in out_path:
print("Skipping warm up")
return
# Clear out old renders
os.system(f"rm -rf {out_path}/main {out_path}/secondary {out_path}/table {out_path}/gripper")
with h5py.File(selection_path, 'r') as f:
task = f.attrs["task"]
scene_description = f.attrs["scene_description"]
trajectory = f["frames"][()]
print("**********************")
print(input_path)
print(f"Frames in trajectory: {len(trajectory)}")
frame_duration = len(trajectory) / 60
print(f"Frame duration: {int(frame_duration//60)}:{math.floor(frame_duration % 60)}")
duration = sum([frame['time'] for frame in trajectory])
print(f"Wall clock length: {int(duration//60)}:{math.floor(duration % 60)} ")
filepath_no_ext, ext = os.path.splitext(selection_path)
playback = Playback(task, scene_description, trajectory, save_images_path=filepath_no_ext, half_res=True, every_other_frame=True)
playback._world = World()
playback.setup_scene()
loop = asyncio.get_event_loop()
playback._world.reset()
loop.run_until_complete(playback.setup_post_load())
playback._world.play()
with tqdm(total=len(trajectory)) as pbar:
pbar.set_description("Rendering " + str(selection_path))
while True:
playback._world.step(render=True)
if not playback._world.is_playing():
break
pbar.update(2)
# Rename RenderProduct_Replicator_01 folder to something more descriptive
os.rename(out_path + "/RenderProduct_Replicator_01/rgb", out_path + "/table")
os.rename(out_path + "/RenderProduct_Replicator/rgb", out_path + "/gripper")
os.system(f"rmdir {out_path}/RenderProduct_Replicator {out_path}/RenderProduct_Replicator_01")
# Remove rgb_ prefix from filenames (sudo apt install rename)
os.system(f"find {out_path}/table -type f -name '*' | rename 's/rgb_//'")
os.system(f"find {out_path}/gripper -type f -name '*' | rename 's/rgb_//'")
os.mkdir(out_path + "/main")
os.mkdir(out_path + "/secondary")
for i, frame in enumerate(tqdm(trajectory)):
# frame number as string with leading zeros
frame_str = str(i).zfill(5)
# check if frame file exists
if not os.path.isfile(f"{out_path}/table/{frame_str}.png") or not os.path.isfile(f"{out_path}/gripper/{frame_str}.png"):
continue
if frame["ui_state"]["primary_camera"] == 0:
os.system(f"ln -s ../gripper/{frame_str}.png {out_path}/secondary/{frame_str}.png")
os.system(f"ln -s ../table/{frame_str}.png {out_path}/main/{frame_str}.png")
else:
os.system(f"ln -s ../table/{frame_str}.png {out_path}/secondary/{frame_str}.png")
os.system(f"ln -s ../gripper/{frame_str}.png {out_path}/main/{frame_str}.png")
commands = [f"ffmpeg -framerate 30 -i '{out_path}/main/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/main.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/secondary/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/secondary.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/table/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/table.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/gripper/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/gripper.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/main/%05d.png' -framerate 30 -i '{out_path}/secondary/%05d.png' -filter_complex '[1]scale=iw/3:ih/3 [pip]; [0][pip] overlay=main_w-overlay_w:0[v]' -map '[v]' -vcodec libx264 -y {out_path}/operator_view.mp4",
]
processes = set()
for cmd in commands:
p = subprocess.Popen(shlex.split(cmd), stdin=subprocess.PIPE)
processes.add(p)
for process in processes:
fout = process.stdin
process.wait()
if process.returncode !=0: raise subprocess.CalledProcessError(process.returncode, process.args)
#os.system(f"rm -rf {out_path}/main {out_path}/secondary {out_path}/table {out_path}/gripper")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to hdf5 file')
args = parser.parse_args()
main(args.input_file)
simulation_app.close()
| 6,125 | Python | 35.464286 | 247 | 0.657469 |
NVlabs/fast-explicit-teleop/srl/teleop/base_sample/base_sample.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.core import World
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.stage import create_new_stage_async, update_stage_async
import gc
from abc import abstractmethod
class BaseSample(object):
def __init__(self) -> None:
self._world = None
self._current_tasks = None
self._world_settings = {"physics_dt": 1.0 / 60.0, "stage_units_in_meters": 1.0, "rendering_dt": 1.0 / 60.0}
# self._logging_info = ""
return
def get_world(self) -> World:
return self._world
def set_world_settings(self, physics_dt=None, stage_units_in_meters=None, rendering_dt=None):
if physics_dt is not None:
self._world_settings["physics_dt"] = physics_dt
if stage_units_in_meters is not None:
self._world_settings["stage_units_in_meters"] = stage_units_in_meters
if rendering_dt is not None:
self._world_settings["rendering_dt"] = rendering_dt
return
async def load_world_async(self):
"""Function called when clicking load buttton
"""
if World.instance() is None:
await create_new_stage_async()
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = World.instance()
self._current_tasks = self._world.get_current_tasks()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load()
if len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
async def reset_async(self):
"""Function called when clicking reset buttton
"""
if self._world.is_tasks_scene_built() and len(self._current_tasks) > 0:
self._world.remove_physics_callback("tasks_step")
await self._world.play_async()
await update_stage_async()
await self.setup_pre_reset()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_reset()
if self._world.is_tasks_scene_built() and len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
@abstractmethod
def setup_scene(self, scene: Scene) -> None:
"""used to setup anything in the world, adding tasks happen here for instance.
Args:
scene (Scene): [description]
"""
return
@abstractmethod
async def setup_post_load(self):
"""called after first reset of the world when pressing load,
intializing provate variables happen here.
"""
return
@abstractmethod
async def setup_pre_reset(self):
""" called in reset button before resetting the world
to remove a physics callback for instance or a controller reset
"""
return
@abstractmethod
async def setup_post_reset(self):
""" called in reset button after resetting the world which includes one step with rendering
"""
return
@abstractmethod
async def setup_post_clear(self):
"""called after clicking clear button
or after creating a new stage and clearing the instance of the world with its callbacks
"""
return
def _world_cleanup(self):
self._world.stop()
# check callbacks on stage close
if self._world.physics_callback_exists("tasks_step"):
self._world.remove_physics_callback("tasks_step")
self._current_tasks = None
self.world_cleanup()
return
def world_cleanup(self):
"""Function called when extension shutdowns and starts again, (hot reloading feature)
"""
return
async def clear_async(self):
"""Function called when clicking clear buttton
"""
await create_new_stage_async()
if self._world is not None:
self._world_cleanup()
self._world.clear_instance()
self._world = None
gc.collect()
await self.setup_post_clear()
return
| 4,687 | Python | 34.786259 | 115 | 0.628334 |
NVlabs/fast-explicit-teleop/srl/teleop/base_sample/__init__.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.base_sample.base_sample import BaseSample
| 201 | Python | 32.666661 | 79 | 0.781095 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/analysis_extension.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import asyncio
import omni.ui as ui
from omni.isaac.ui.ui_utils import btn_builder, setup_ui_headers, get_style
import asyncio
import carb
from omni.kit.viewport.utility import get_active_viewport_window
import omni
from srl.teleop.analysis.playback import Playback
from srl.teleop.assistance.logging import is_hdf5_file
from srl.teleop.assistance.ui import str_builder
from srl.spacemouse.ui_utils import xyz_plot_builder
from .ui import joint_state_plot_builder
import numpy as np
import carb
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
import weakref
import omni.ext
import asyncio
from omni.isaac.core import World
from functools import partial
import h5py
class AnalysisExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
self._ext_id = ext_id
menu_items = [MenuItemDescription(name="Teleop Analysis", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
self._menu_items = menu_items
add_menu_items(self._menu_items, "SRL")
self._viewport = get_active_viewport_window("Viewport")
self.timeline = omni.timeline.get_timeline_interface()
self._world_buttons = {}
self._plots = {}
self.build_ui(name="Teleop Analysis",
title="Teleop Analysis",
doc_link="",
overview="Provides playback and analysis of saved trajectories",
file_path=os.path.abspath(__file__),
number_of_extra_frames=3,
window_width=350,)
self.build_control_ui(self.get_frame(index=0))
self.build_joint_state_plotting_ui(self.get_frame(index=1))
self._joint_states_plotting_buffer = np.zeros((360, 14))
self._control_plotting_buffer = np.zeros((360, 6))
self._plotting_event_subscription = None
self.playback = None
def get_frame(self, index):
if index >= len(self._extra_frames):
raise Exception("there were {} extra frames created only".format(len(self._extra_frames)))
return self._extra_frames[index]
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def shutdown_cleanup(self):
pass
def _on_snapping_button_event(self, value):
pass
def post_reset_button_event(self):
pass
def post_load_button_event(self):
pass
def _on_load_world(self):
self._world_buttons["Load World"].enabled = False
if self.playback:
self.playback._world_cleanup()
self.playback._world.clear_instance()
self.playback = None
else:
World.clear_instance()
async def _on_load_world_async():
selection_path = self._world_buttons["Trajectory Selection"].get_value_as_string()
if os.path.isdir(selection_path):
return
elif os.path.isfile(selection_path):
with h5py.File(selection_path, 'r') as f:
task = f.attrs["task"]
user = f.attrs["user"]
objects = f.attrs["objects"]
scene_description = f.attrs["scene_description"]
trajectory = f["frames"][()]
filepath_no_ext, ext = os.path.splitext(selection_path)
self.playback = Playback(task, scene_description, trajectory, save_images_path=filepath_no_ext)
if not self._plotting_event_subscription:
self. _plotting_event_subscription = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
else:
return
await self.playback.load_world_async()
await omni.kit.app.get_app().next_update_async()
self.playback._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self.post_load_button_event()
self.playback._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
self._world_buttons["Load World"].enabled = True
asyncio.ensure_future(_on_load_world_async())
return
def _on_reset(self):
async def _on_reset_async():
if self.playback:
await self.playback.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_plotting_step(self, e: carb.events.IEvent):
if not self.playback:
return
robot = self.playback.franka
if robot is not None:
positions = robot.get_joint_positions()[:7]
velocities = robot.get_joint_velocities()[:7]
if positions is not None:
self._joint_states_plotting_buffer = np.roll(self._joint_states_plotting_buffer, shift=1, axis=0)
self._joint_states_plotting_buffer[0, :7] = positions
self._joint_states_plotting_buffer[0, 7:] = velocities
for i in range(7):
self._plots["joint_positions"][i].set_data(*self._joint_states_plotting_buffer[:, i])
self._plots["joint_velocities"][i].set_data(*self._joint_states_plotting_buffer[:, 7 + i])
control = self.playback.control
if control is not None:
self._control_plotting_buffer = np.roll(self._control_plotting_buffer, shift=1, axis=0)
self._control_plotting_buffer[0, :3] = control["trans"]
self._control_plotting_buffer[0, 3:] = control["rot"]
for i in range(3):
self._plots["xyz_plot"][i].set_data(*self._control_plotting_buffer[:, i])
self._plots["xyz_vals"][i].set_value(self._control_plotting_buffer[0, i])
self._plots["rpy_plot"][i].set_data(*self._control_plotting_buffer[:, 3 + i])
self._plots["rpy_vals"][i].set_value(self._control_plotting_buffer[0, 3 + i])
if len(self._plots["xyz_plot"]) == 4:
self._plots["xyz_plot"][3].set_data(*np.linalg.norm(self._control_plotting_buffer[:, :3], axis=1))
self._plots["xyz_vals"][3].set_value(np.linalg.norm(self._control_plotting_buffer[0,:3]))
if len(self._plots["rpy_plot"]) == 4:
self._plots["rpy_plot"][3].set_data(*np.linalg.norm(self._control_plotting_buffer[:, 3:], axis=1))
self._plots["rpy_vals"][3].set_value(np.linalg.norm(self._control_plotting_buffer[0,3:]))
def _enable_all_buttons(self, flag):
for btn_name, btn in self._world_buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def on_shutdown(self):
self._extra_frames = []
if self._menu_items is not None:
self._window_cleanup()
if self._world_buttons is not None:
self._world_buttons["Load World"].enabled = True
self._enable_all_buttons(False)
self.shutdown_cleanup()
return
def _window_cleanup(self):
remove_menu_items(self._menu_items, "SRL")
self._window = None
self._menu_items = None
self._world_buttons = None
return
def on_stage_event(self, event):
# event_type = omni.usd.StageEventType(event.type)
if event.type == int(omni.usd.StageEventType.CLOSED):
self. _plotting_event_subscription = None
# If the stage is closed before on_startup has run, all of our fields will be undefined
if World.instance() is not None and hasattr(self, "playback"):
self.playback._world_cleanup()
# There's no World now, so in any case the user can load anew!
if hasattr(self, "_world_buttons"):
self._enable_all_buttons(False)
self._world_buttons["Load World"].enabled = True
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
self._world_buttons["Load World"].enabled = False
self._world_buttons["Reset"].enabled = True
self.post_clear_button_event()
return
def build_ui(self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=True, dockPreference=ui.DockPreference.RIGHT_TOP
)
self._window.deferred_dock_in("Stage", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
self._extra_frames = []
with self._window.frame:
with ui.VStack(spacing=5, height=0):
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="Log Loading",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
def update_load_button_enabled(new_val):
if os.path.splitext(new_val.lower())[1] == ".hdf5":
self._world_buttons["Load World"].enabled = True
else:
self._world_buttons["Load World"].enabled = False
dict = {
"label": "Trajectory File",
"type": "stringfield",
"default_val": os.path.expanduser('~/Documents/trajectories'),
"tooltip": "Output Directory",
"on_clicked_fn": update_load_button_enabled,
"use_folder_picker": True,
"item_filter_fn": is_hdf5_file,
"read_only": False,
}
self._world_buttons["Trajectory Selection"] = str_builder(**dict)
dict = {
"label": "Load",
"type": "button",
"text": "Load",
"tooltip": "Load World and Task",
"on_clicked_fn": self._on_load_world,
}
self._world_buttons["Load World"] = btn_builder(**dict)
self._world_buttons["Load World"].enabled = False
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._world_buttons["Reset"] = btn_builder(**dict)
self._world_buttons["Reset"].enabled = False
with ui.VStack(style=get_style(), spacing=5, height=0):
for i in range(number_of_extra_frames):
self._extra_frames.append(
ui.CollapsableFrame(
title="",
width=ui.Fraction(0.33),
height=0,
visible=False,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
)
def build_control_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Data"
frame.visible = True
kwargs = {
"label": "XYZ",
"data": [[],[],[]],
"include_norm": False
}
self._plots["xyz_plot"], self._plots[
"xyz_vals"
] = xyz_plot_builder(**kwargs)
kwargs = {
"label": "RPY",
"data": [[],[],[]],
"value_names": ("R", "P", "Y"),
"include_norm": False
}
self._plots["rpy_plot"], self._plots[
"rpy_vals"
] = xyz_plot_builder(**kwargs)
return
def build_joint_state_plotting_ui(self, frame):
frame.collapsed = True
with frame:
with ui.VStack(spacing=5):
frame.title = "Joint States"
frame.visible = True
kwargs = {
"label": "Positions",
"data": [[] for i in range(7)],
"min": -3.14,
"max": 3.14
}
self._plots["joint_positions"] = joint_state_plot_builder(**kwargs)
kwargs = {
"label": "Velocities",
"data": [[] for i in range(7)],
"min": -.45,
"max": .45
}
self._plots["joint_velocities"] = joint_state_plot_builder(**kwargs)
| 14,001 | Python | 40.922156 | 156 | 0.526605 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/playback.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.core.utils.types import ArticulationAction
from srl.teleop.assistance.camera_controls import SwappableViewControls
from srl.teleop.assistance.tasks.lifting import LiftingTask
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.assistance.tasks.sorting import SortingTask
from srl.teleop.assistance.tasks.stacking import StackingTask
from srl.teleop.assistance.tasks.subset_stacking import SubsetStackingTask
from srl.teleop.assistance.viewport import configure_main_viewport, configure_realsense_viewport, get_realsense_viewport, layout_picture_in_picture
from srl.teleop.assistance.viz import viz_axis
from srl.teleop.base_sample.base_sample import BaseSample
import numpy as np
from omni.kit.viewport.utility import get_active_viewport_window
import os
import aiofiles
async def save_frame(im, path):
from io import BytesIO
buffer = BytesIO()
im.save(buffer, format="png")
async with aiofiles.open(path, "wb") as file:
await file.write(buffer.getbuffer())
class Playback(BaseSample):
def __init__(self, task, scene_description, trajectory, save_images_path=None) -> None:
super().__init__()
self.set_world_settings(rendering_dt= 1 / 30, physics_dt=1/60)
self._articulation_controller = None
self.trajectory = trajectory
self.target_marker = None
self.mode = "play_state"
self.franka = None
self.control = None
self.control_raw = None
self._writer = None
self._render_products = []
self._save_images_path = save_images_path
if task == "sorting":
self.task = SortingTask(initial_scene_description=scene_description)
elif task =="stacking":
self.task = StackingTask(initial_scene_description=scene_description)
elif task == "lifting":
self.task = LiftingTask(initial_scene_description=scene_description)
elif task =="subset_stacking":
self.task = SubsetStackingTask(initial_scene_description=scene_description)
elif task =="reaching":
self.task = ReachingTask(initial_scene_description=scene_description)
else:
raise NotImplementedError("No playback for task " + task)
def setup_scene(self):
world = self.get_world()
world.add_task(self.task)
return
def world_cleanup(self):
self._clear_recorder()
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("replay_scene"):
world.remove_physics_callback("replay_scene")
return
async def setup_post_load(self):
scene = self._world.scene
self.franka = scene.get_object(self.task.get_params()["robot_name"])
self.ghosts = [scene.get_object("ghost_franka0"),scene.get_object("ghost_franka1")]
self._object_ghosts = self.task.get_ghost_objects()
self.target_marker = viz_axis("/target_marker", (0,0,0.), (0,0,0,1.), (0.2, 0.2, 0.2))
self._articulation_controller = self.franka.get_articulation_controller()
self.realsense_vp = get_realsense_viewport(self.franka.camera.prim.GetPath())
configure_realsense_viewport(self.realsense_vp)
self.main_vp = get_active_viewport_window("Viewport")
configure_main_viewport(self.main_vp)
layout_picture_in_picture(self.main_vp, self.realsense_vp)
#self._camera_controls = ArcballCameraControls("/OmniverseKit_Persp", focus_delegate=get_focus)
self._camera_controls = SwappableViewControls("/OmniverseKit_Persp",self.main_vp, self.realsense_vp)
self._camera_controls.set_fixed_view()
self._camera_controls.camera.set_resolution((1280 // 2,720 // 2))
self.franka.camera.set_resolution((1280 // 2,720 // 2))
world = self.get_world()
world.play()
world.add_physics_callback("replay_scene", self._on_replay_scene_step)
if self._save_images_path:
self._init_recorder(self._save_images_path, [self._camera_controls.camera, self.franka.camera])
def _clear_recorder(self):
import omni.replicator.core as rep
rep.orchestrator.stop()
if self._writer:
self._writer.detach()
self._writer = None
import omni
stage = omni.usd.get_context().get_stage()
"""for rp in self._render_products:
stage.RemovePrim(rp)
self._render_products.clear()"""
rep.scripts.utils.viewport_manager.destroy_hydra_textures()
def _init_recorder(self, out_path, cameras) -> bool:
import omni.replicator.core as rep
# Init the writer
writer_params = {
"rgb": True
}
try:
self._writer = rep.BasicWriter(output_dir=out_path, **writer_params)
except Exception as e:
return False
# Create or get existing render products
self._render_prods = []
for camera in cameras:
#note
pass
# Attach the render products to the writer
try:
self._writer.attach([camera._render_product_path for camera in cameras])
#self._writer.attach(self._render_prods)
except Exception as e:
return False
rep.orchestrator.run()
return True
def _on_replay_scene_step(self, step_size):
from omni.kit.viewport.utility import get_active_viewport, capture_viewport_to_file
from PIL import Image
from io import BytesIO
import omni.renderer_capture
import asyncio
import time
current_step_i = self._world.current_time_step_index
capture_filename = f"{os.path.expanduser('~/out/')}test{current_step_i}.png"
"""async def wait_on_result():
await cap_obj.wait_for_result(completion_frames=30)
asyncio.ensure_future(wait_on_result())"""
if current_step_i < len(self.trajectory):
frame = self.trajectory[current_step_i]
self.target_marker.set_world_pose(*frame["robot_state"]["target_pose"])
self.control = frame["controls_state"]["filtered"]
self.control_raw = frame["controls_state"]["raw"]
if frame["ui_state"]["primary_camera"] != self._camera_controls.active_index:
self._camera_controls.swap()
if self.mode == "play_actions":
if current_step_i == 0:
self.task.set_object_poses(frame["scene_state"]["poses"])
self._articulation_controller.apply_action(
ArticulationAction(joint_positions=frame["robot_state"]["applied_joint_positions"])
)
else:
self.franka.set_joint_positions(frame["robot_state"]["joint_positions"])
self.task.set_object_poses(frame["scene_state"]["poses"])
ui_state = frame["ui_state"]
ghost_joint_pos = ui_state["robot_ghost_joint_positions"]
if not np.isnan(ghost_joint_pos[0]):
ghost = self.ghosts[0]
ghost.set_joint_positions(ghost_joint_pos)
ghost.show(gripper_only=True)
else:
ghost = self.ghosts[0]
ghost.hide()
ghost_obj_index = ui_state["object_ghost_index"]
if ghost_obj_index != -1:
ghost = list(self._object_ghosts.values())[ghost_obj_index]
ghost.show()
ghost.set_world_pose(*ui_state["object_ghost_pose"])
else:
for _, ghost in self._object_ghosts.items():
ghost.hide()
else:
self.get_world().pause()
self._clear_recorder()
| 8,277 | Python | 41.891191 | 147 | 0.627764 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/__init__.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from .analysis_extension import AnalysisExtension
from .playback import Playback | 224 | Python | 31.142853 | 79 | 0.794643 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/ui.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.ui import color as cl
from scipy.spatial.transform import Rotation
import omni.ui as ui
from omni.isaac.ui.ui_utils import add_separator
LABEL_WIDTH = 160
LABEL_WIDTH_LIGHT = 235
LABEL_HEIGHT = 18
HORIZONTAL_SPACING = 4
colors = [0xFF1515EA, 0xFF5FC054, 0xFFC5822A, 0xFFFF00FF, 0xFF00FFFF, 0xFFFFFF00, 0xFFFF77FF]
def joint_state_plot_builder(label="", data=[], num_joints=7, min=-1, max=1, tooltip=""):
"""Creates a stylized static XYZ plot
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
data (list(float), optional): Data to plot. Defaults to [].
min (int, optional): Minimum Y Value. Defaults to -1.
max (int, optional): Maximum Y Value. Defaults to "".
tooltip (str, optional): Tooltip to display over the Label.. Defaults to "".
Returns:
list(ui.Plot): list(x_plot, y_plot, z_plot)
"""
with ui.VStack(spacing=5):
with ui.HStack():
ui.Label(label, width=LABEL_WIDTH, alignment=ui.Alignment.LEFT_TOP, tooltip=tooltip)
plot_height = LABEL_HEIGHT * 5 + 13
plot_width = ui.Fraction(1)
with ui.ZStack():
ui.Rectangle(width=plot_width, height=plot_height)
plots = []
for i in range(num_joints):
plot = ui.Plot(
ui.Type.LINE,
min,
max,
*data[i],
value_stride=1,
width=plot_width,
height=plot_height,
style={"color": colors[i], "background_color": 0x0},
)
plots.append(plot)
def update_min(model):
for plot in plots:
plot.scale_min = model.as_float
def update_max(model):
for plot in plots:
plot.scale_max = model.as_float
ui.Spacer(width=5)
with ui.Frame(width=0):
with ui.VStack(spacing=5):
max_model = ui.FloatDrag(
name="Field", width=40, alignment=ui.Alignment.LEFT_BOTTOM, tooltip="Max"
).model
max_model.set_value(max)
min_model = ui.FloatDrag(
name="Field", width=40, alignment=ui.Alignment.LEFT_TOP, tooltip="Min"
).model
min_model.set_value(min)
min_model.add_value_changed_fn(update_min)
max_model.add_value_changed_fn(update_max)
ui.Spacer(width=20)
add_separator()
return plots | 2,886 | Python | 34.207317 | 97 | 0.52876 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/experiment.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.tasks.lifting import LiftingTask
from srl.teleop.assistance.tasks.subset_stacking import SubsetStackingTask
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.assistance.tasks.sorting import SortingTask
from srl.teleop.assistance.tasks.stacking import StackingTask
import numpy as np
from itertools import permutations
SLOT_NAMES = ["3D Mouse Demo", "Control Demo", "Reaching", "Reaching Assist", "Stacking A Warmup", "Stacking A", "Multi-Stacking A", "Stacking B Warmup", "Stacking B", "Multi-Stacking B", "Stacking C Warmup", "Stacking C", "Multi-Stacking C"]
PARTICIPANT_ID = 0
TASK_BY_INDEX = [0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]
CONDITION_BY_INDEX = [0, 0, 1, 0,0,0, 1,1,1, 2,2,2]
CONDITION_ORDERS = list(permutations([0,1,2]))
LATIN_SQUARE = [[0,1,2],
[1,2,0],
[2,0,1]]
def get_ordering(participant_id):
return CONDITION_ORDERS[participant_id % len(CONDITION_ORDERS)]
def configure_for_condition_index(i, task_ui_models, participant_id):
task_i = TASK_BY_INDEX[i]
condition_i = CONDITION_BY_INDEX[i]
if i > 2:
# Counterbalance actual experimental tasks
condition_i = get_ordering(participant_id)[condition_i]
if task_i == 0:
task = LiftingTask(n_cuboids=1, rng=np.random.RandomState(0), max_duration=None)
elif task_i == 1:
task = ReachingTask()
elif task_i == 2:
task = ReachingTask(max_duration=None)
elif task_i == 3:
task = StackingTask(n_cuboids=2, rng=np.random.RandomState(participant_id + 1000 * condition_i), max_duration=None, repeat=False)
elif task_i == 4:
task = StackingTask(n_cuboids=2, rng=np.random.RandomState(participant_id + 1000 * (condition_i + 1)), max_duration=60 * 2, repeat=True)
elif task_i == 5:
task = SubsetStackingTask(rng=np.random.RandomState(LATIN_SQUARE[participant_id % 3][condition_i]))
elif task_i == 6:
task = SortingTask(rng=np.random.RandomState(LATIN_SQUARE[participant_id % 3][condition_i]))
else:
raise Exception("Unknown task index")
if condition_i == 0:
task_ui_models["Surrogates"].set_value(False)
task_ui_models["Suggest Grasps"].set_value(False)
task_ui_models["Suggest Placements"].set_value(False)
elif condition_i == 1:
task_ui_models["Surrogates"].set_value(False)
task_ui_models["Suggest Grasps"].set_value(True)
task_ui_models["Suggest Placements"].set_value(True)
elif condition_i == 2:
task_ui_models["Surrogates"].set_value(True)
task_ui_models["Suggest Grasps"].set_value(True)
task_ui_models["Suggest Placements"].set_value(True)
return task, condition_i
| 2,875 | Python | 43.246153 | 242 | 0.676522 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/scene.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.ui_scene import scene as sc
from omni.ui import color as cl
from typing import List
import numpy as np
from scipy.spatial.transform import Rotation
import omni.ui as ui
from srl.teleop.assistance.proposals import InvalidReason
from .proposals import GroupedPoseProposalTable, PlanePlaneProposalTable
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
import omni
import time
class ViewportScene():
def __init__(self, viewport_window: ui.Window, ext_id: str, use_scene_camera: bool=True) -> None:
self._scene_view = None
self._viewport_window = viewport_window
self._ext_id = ext_id
self.manipulator = None
self.use_scene_camera = use_scene_camera
with self._viewport_window.get_frame(ext_id):
if use_scene_camera:
# scene view (default camera-model)
self._scene_view = sc.SceneView()
# register the scene view to get projection and view updates
self._viewport_window.viewport_api.add_scene_view(self._scene_view)
else:
projection = [1e-1, 0, 0, 0]
projection += [0, 1e-1, 0, 0]
projection += [0, 0, 2e-2, 0]
projection += [0, 0, 1, 1]
view = sc.Matrix44.get_translation_matrix(8.5, -4.25, 0) * sc.Matrix44.get_rotation_matrix(-0.5,0.,0.)
self._scene_view = sc.SceneView(projection=projection, view=view)
def add_manipulator(self, manipulator_class: sc.Manipulator):
# add handlers into the scene view's scene
with self._scene_view.scene:
self.manipulator = manipulator_class()
def __del__(self):
self.destroy()
def destroy(self):
if self.manipulator:
self.manipulator.clear()
if self._scene_view:
# empty the scene view
self._scene_view.scene.clear()
# un-register the scene view
if self._viewport_window and self.use_scene_camera:
self._viewport_window.viewport_api.remove_scene_view(self._scene_view)
# remove references
self._viewport_window = None
self._scene_view = None
class AssistanceManipulator(sc.Manipulator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._plane_table = None
self._grasp_table = None
self._placement_table = None
self._grasp_distribution = None
self._plane_distribution = None
self.cfg_frames_show = True
self.cfg_frames_color = [1.0, 1.0, 1.0, 1.0]
self.cfg_frames_size = 4
self.cfg_names_show = True
self.cfg_names_color = [1.0, 1.0, 0.0, 1.0]
self.cfg_names_size = 20
self.cfg_axes_show = True
self.cfg_axes_length = 0.1
self.cfg_axes_thickness = 4
self.cfg_arrows_show = True
self.cfg_arrows_color = [0.0, 1.0, 1.0, 1.0]
self.cfg_arrows_thickness = 4
self.cm = cm.hot
ncolors = 256
color_array = cm.hot(np.linspace(0.,1., ncolors))
# change alpha values
color_array[:,-1] = np.linspace(0.05,0.7,ncolors)
# create a colormap object
self.cm = LinearSegmentedColormap.from_list(name='hot_alpha',colors=color_array)
def on_build(self):
if not self._plane_table:
return
grasps = self._grasp_table
planes = self._plane_table
if self._plane_distribution is not None:
positions = planes.get_centroids_world()[planes._valid == InvalidReason.VALID.value]
sc.Points(positions.tolist(), colors=[cl(*self.cfg_frames_color)] * len(positions), sizes=[self.cfg_frames_size] * len(positions))
if self._grasp_distribution is not None:
start = time.time()
# This'll only exist if we're actively inferring
valid_mask = grasps._valid == InvalidReason.VALID.value
positions = grasps._poses_world[:, :3, 3][valid_mask]
if len(positions) == 0:
return
score_probs = np.exp(self._grasp_distribution[valid_mask])
score_probs /= np.max(np.abs(score_probs),axis=0)
colors = self.cm(score_probs)
#sc.Points(positions.tolist(), colors=[cl(*color) for color in colors], sizes=[self.cfg_frames_size] * len(positions))
for grasp, color in zip(grasps._poses_world[valid_mask], colors):
with sc.Transform(transform=sc.Matrix44(*grasp.T.flatten())):
sc.Line([0, 0, -0.04], [0, 0, -0.09], color=cl(*color), thickness=3)
sc.Line([0, -.04, -0.04], [0, 0.04, -0.04], color=cl(*color), thickness=3)
sc.Line([0, 0.04, -0.04], [0, 0.04, 0], color=cl(*color), thickness=3)
sc.Line([0, -0.04, -0.04], [0, -0.04, 0], color=cl(*color), thickness=3)
end = time.time()
#print(end - start)
return
# draw names and axes
T = np.eye(4)
for name, position, quaternion in zip(names, positions, quaternions):
# names
T[:3,3] = position
if self.cfg_names_show:
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
sc.Label(name, alignment=ui.Alignment.CENTER_TOP, color=cl(*self.cfg_names_color), size=self.cfg_names_size)
# axes
if self.cfg_axes_show:
T[:3,:3] = Rotation.from_quat(quaternion).as_matrix()
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
k = self.cfg_axes_length
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff0000"), thickness=self.cfg_axes_thickness)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff00"), thickness=self.cfg_axes_thickness)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff"), thickness=self.cfg_axes_thickness)
def update(self, grasp_table: GroupedPoseProposalTable, placement_table: GroupedPoseProposalTable, plane_table: PlanePlaneProposalTable):
self._grasp_table = grasp_table
self._placement_table = placement_table
self._plane_table = plane_table
# Triggers rebuilding.
self.invalidate()
def set_grasp_distribution(self, distribution):
self._grasp_distribution = distribution
def reset(self):
self._grasp_table = self._placement_table = self._plane_table = None
self.invalidate()
| 6,690 | Python | 38.591716 | 142 | 0.589686 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/suggestions.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_type_name, delete_prim, get_all_matching_child_prims
import math
from srl.teleop.assistance.shapenet_import import ShapeNetPrim
from srl.teleop.assistance.transforms import normalized, T2pq, pq2T
import scipy.spatial.transform
def get_cube_symmetry_rotations():
octahedral_group = scipy.spatial.transform.Rotation.create_group('O')
return octahedral_group.as_matrix()
def get_cylinder_symmetry_rotations(n_rotational_steps=20):
results = np.empty((1 + n_rotational_steps, 3, 3))
# X flip
results[0] = np.diag((1,-1,-1))
theta = np.linspace(0, 2 * math.pi, n_rotational_steps)
results[1:, 0,0] = np.cos(theta)
results[1:, 0,1] = -np.sin(theta)
results[1:, 1,0] = np.sin(theta)
results[1:, 1,1] = np.cos(theta)
results[1:, 2,2] = 1
return results
CUBE_SYMMETRY_Rs = get_cube_symmetry_rotations()
CYLINDER_SYMMETRY_Rs = get_cylinder_symmetry_rotations()
def make_grasp_T(t, ay):
az = normalized(-t)
ax = np.cross(ay, az)
T = np.eye(4)
T[:3, 0] = ax
T[:3, 1] = ay
T[:3, 2] = az
T[:3, 3] = t
return T
def make_cuboid_grasp_Ts(block_pick_height):
R = np.eye(3)
t_i = 0
Ts = np.empty((24, 4, 4))
for i in range(3):
t = block_pick_height * R[:, i]
for j in range(2):
ay = R[:, (i + j + 1) % 3]
for sign_1 in [1, -1]:
for sign_2 in [1, -1]:
Ts[t_i] = make_grasp_T(sign_1 * t, sign_2 * ay)
t_i += 1
return Ts
def make_cylinder_grasp_Ts(r, h):
# The cylinder axis centered at (0,0,0) pointing up in +Z
# Some of these are redundant, and the ones that aren't don't lend themselves to stable placement...
as_cuboid_grasps = make_cuboid_grasp_Ts(np.array((r,r,h/2)))
# Point gripper z toward the grasp point, x toward negative world Z
rotational_steps = 20
side_candidates = np.empty((rotational_steps * 2, 4, 4))
for k in range(rotational_steps):
x = (2 * math.pi / rotational_steps) * k
point = np.array((r * np.cos(x), r * np.sin(x), 0))
ay = np.array((-np.sin(x), np.cos(x), 0))
side_candidates[k] = make_grasp_T(point, ay)
side_candidates[k + rotational_steps] = make_grasp_T(point, -ay)
top_candidates = np.empty((rotational_steps * 2, 4, 4))
for k in range(rotational_steps):
x = (2 * math.pi / rotational_steps) * k
point = np.array((0, 0, h / 2))
ay = np.array((np.cos(x), np.sin(x), 0))
top_candidates[k] = make_grasp_T(point, ay)
top_candidates[k + rotational_steps] = make_grasp_T(-point, ay)
return np.vstack((side_candidates, top_candidates))
def make_cone_grasp_Ts(r, h):
return []
def make_cuboid_cuboid_placement_Ts(to_place_size, to_align_with_size):
# Strategy: centroids aligned. Compute all possible pairs of orientations. Put to_place up against
# the side of to_align_with along the x axis
# See https://en.wikipedia.org/wiki/Octahedral_symmetry
Ts = []
for align_R in CUBE_SYMMETRY_Rs:
# We're transforming the sizes to determine the depth of the cube
# in the X direction. Sign doesn't matter.
v_align = np.abs(align_R.dot(to_align_with_size))
for place_R in CUBE_SYMMETRY_Rs:
v_place = np.abs(place_R.dot(to_place_size))
# We have the two cuboids in an arbirtary orientation. Now we stack them next to eachother in X
T = np.identity(4)
# X displacement, with a little epsilon so the collision checker stays clear
T[0,3] = 0.001 + (v_place[0] + v_align[0]) / 2.0
# Orientation wrt to to_align_with. Sub out anchor frame and get just relative orientation
inv_align_R_4 = np.identity(4)
inv_align_R_4[:3,:3] = align_R.T
# How we should rotate the placement...
T[:3,:3] = place_R
# but in the alignment frame
T = inv_align_R_4.dot(T)
Ts.append(T)
return np.array(Ts)
def make_cylinder_cylinder_placement_Ts(to_place_h, anchor_h):
# Placements only for planar faces (+Z, -Z)
Ts = []
for align_R in CYLINDER_SYMMETRY_Rs:
for place_R in CYLINDER_SYMMETRY_Rs:
T = np.identity(4)
# Z displacement
T[2,3] = 0.001 + (to_place_h + anchor_h) / 2.0
# Orientation wrt to to_align_with. Sub out anchor frame and get just relative orientation
inv_align_R_4 = np.identity(4)
inv_align_R_4[:3,:3] = align_R.T
# How we should rotate the placement...
T[:3,:3] = place_R
# but in the alignment frame
T = inv_align_R_4.dot(T)
Ts.append(T)
return np.array(Ts)
def check_grasp_orientation_similarity(
world_grasp_T,
axis_x_filter=None,
axis_x_filter_thresh=0.1,
axis_y_filter=None,
axis_y_filter_thresh=0.1,
axis_z_filter=None,
axis_z_filter_thresh=0.1,
):
to_use_i = []
filters = np.zeros((3,3))
for i, filter in enumerate((axis_x_filter, axis_y_filter, axis_z_filter)):
if filter is None:
continue
to_use_i.append(i)
filters[i,:] = filter
thresh = np.array((axis_x_filter_thresh, axis_y_filter_thresh, axis_z_filter_thresh))
axes_to_check = world_grasp_T[:, :3, to_use_i]
# Get dot products between the axes of the grasps and the filter directions. Batch over the leading
# indices.
scores = 1.0 - np.einsum('...ij,...ji->...i', filters[to_use_i,:], axes_to_check)
# count num thresholds we are under,
threshes_satisfied = (scores < thresh[to_use_i,]).sum(1)
# Should be under all of them
return threshes_satisfied == len(to_use_i)
def generate_candidate_grasps(obj):
prim_type = get_prim_type_name(obj.prim_path)
as_prim = obj.prim
to_world_tf = pq2T(*obj.get_world_pose())
if isinstance(obj, ShapeNetPrim):
#return []
return obj.grasp_annotations
elif prim_type == "Cube":
size = obj.get_world_scale()
block_grasp_Ts = make_cuboid_grasp_Ts(size / 2 - .015)
#res = get_world_block_grasp_Ts(to_world_tf, block_grasp_Ts, axis_z_filter=np.array((0.,0.,-1.)))
return block_grasp_Ts
"""for T in res:
p,q = T2pq(T)
viz_axis(viz_prefix, p, q)"""
elif prim_type == "Cylinder":
height = obj.get_height()
radius = obj.get_radius()
return make_cylinder_grasp_Ts(radius - 0.01, height - 0.01)
elif prim_type == "Mesh":
mesh = dict()
mesh["points"] = mesh.GetPointsAttr().Get()
mesh["normals"] = mesh.GetNormalsAttr().Get()
mesh["vertex_counts"] = mesh.GetFaceVertexCountsAttr().Get()
mesh["vertex_indices"] = mesh.GetFaceVertexIndicesAttr().Get()
else:
# Ignore other objects for now
pass
return np.empty((0,4,4))
def generate_candidate_placements(to_place, to_align_with):
to_place_type = get_prim_type_name(to_place.prim_path)
to_place_prim = to_place.prim
align_T = pq2T(*to_align_with.get_world_pose())
place_T = pq2T(*to_place.get_world_pose())
to_place_type = get_prim_type_name(to_place.prim_path)
to_align_with_type = get_prim_type_name(to_align_with.prim_path)
if to_place_type == "Cube":
to_place_size = to_place.get_world_scale()
if to_align_with_type == "Cube":
to_align_with_size = to_align_with.get_world_scale()
return make_cuboid_cuboid_placement_Ts(to_place_size, to_align_with_size)
elif to_place_type == "Cylinder":
if to_align_with_type == "Cylinder":
return make_cylinder_cylinder_placement_Ts(to_place.get_height(), to_align_with.get_height())
elif to_align_with_type == "Cube":
pass
elif to_place_type == "Mesh":
pass
return np.empty((0,4,4))
| 8,138 | Python | 35.173333 | 121 | 0.603219 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ghost_franka.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Optional, List
import numpy as np
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema, UsdShade, Sdf
import omni
from omni.isaac.core.materials.visual_material import VisualMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
import os
import srl.teleop.assistance
def load_ghost_material(to_path="/Looks/GhostVolumetric"):
if not is_prim_path_valid(to_path):
success = omni.kit.commands.execute(
"CreateMdlMaterialPrim",
mtl_url=os.path.join(srl.teleop.assistance.DATA_DIR, "GhostVolumetric.mdl"),
mtl_name="voltest_02",
mtl_path=Sdf.Path(to_path),
)
shader = UsdShade.Shader(get_prim_at_path(f"{to_path}/Shader"))
material = UsdShade.Material(get_prim_at_path(to_path))
shader.CreateInput("absorption", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(0.8, 0.8, 0.8))
shader.CreateInput("scattering", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(0.5, 0.5, 0.5))
shader.CreateInput("transmission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.1, 1.0, 0.3)
)
shader.CreateInput("emission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.1, 1.0, 0.3)
)
shader.CreateInput("distance_scale", Sdf.ValueTypeNames.Float).Set(1.0)
shader.CreateInput("emissive_scale", Sdf.ValueTypeNames.Float).Set(300.0)
shader.CreateInput("transmission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.3, 1.0, 0.3)
)
else:
shader = UsdShade.Shader(get_prim_at_path(f"{to_path}/Shader"))
material = UsdShade.Material(get_prim_at_path(to_path))
material = VisualMaterial(
name="GhostVolumetric",
prim_path=to_path,
prim=get_prim_at_path(to_path),
shaders_list=[shader],
material=material,
)
material_inputs = {}
for input in material.shaders_list[0].GetInputs():
material_inputs[input.GetFullName()] = input
return material, material_inputs
class GhostFranka(CameraFranka):
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "franka_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None.
gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None.
gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "franka_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
material_path="/Looks/GhostVolumetric"
) -> None:
super().__init__(prim_path, name, usd_path, position, orientation,end_effector_prim_name, gripper_dof_names, gripper_open_position, gripper_closed_position, collision_sensors=False, camera_sensor=False)
self.material, self.material_inputs = load_ghost_material(material_path)
self.material_inputs["inputs:transmission_color"].Set((1.5, 1.5, 1.5))
self.material_inputs["inputs:emission_color"].Set((1.25, 1.25, 1.25))
self.material_inputs["inputs:emissive_scale"].Set(300.)
self._imageable = UsdGeom.Imageable(self.prim)
self.apply_visual_material(self.material)
self.disable_collisions(remove=True)
self.hide()
self._current_color = None
self._current_opacity = None
# Populate simplifed meshes under the right links of the robot
if not is_prim_path_valid(prim_path + "/panda_hand/viz"):
self.viz_palm = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_hand_viz.usd"), prim_path=prim_path + "/panda_hand/viz")
self.viz_left_finger = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_leftfinger_viz.usd"), prim_path=prim_path + "/panda_leftfinger/viz")
self.viz_right_finger = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_rightfinger_viz.usd"), prim_path=prim_path + "/panda_rightfinger/viz")
else:
self.viz_palm = get_prim_at_path(prim_path + "/panda_hand/viz")
self.viz_left_finger = get_prim_at_path(prim_path + "/panda_leftfinger/viz")
self.viz_right_finger = get_prim_at_path(prim_path + "/panda_rightfinger/viz")
for p in [self.viz_left_finger, self.viz_right_finger, self.viz_palm]:
viz_mesh = get_prim_at_path(f"{p.GetPath()}/mesh")
def disable_collisions(self, remove=False):
for p in Usd.PrimRange(self.prim):
if p.HasAPI(UsdPhysics.CollisionAPI):
collision_api = UsdPhysics.CollisionAPI(p)
collision_api.GetCollisionEnabledAttr().Set(False)
if remove:
p.RemoveAPI(UsdPhysics.CollisionAPI)
@property
def visible(self):
return self._imageable.GetVisibilityAttr().Get() != "invisible"
def hide(self):
self._imageable.MakeInvisible()
def show(self, gripper_only=False):
if not gripper_only:
self._imageable.MakeVisible()
else:
for p in [self.viz_left_finger, self.viz_right_finger, self.viz_palm]:
UsdGeom.Imageable(p).MakeVisible()
def set_color(self, color, opacity=1.0):
if color == self._current_color and opacity == self._current_opacity:
# idempotent
return
transmission = 1.0 - opacity
def clip(value):
# Inputs seem to behave differently for 0 and close to 0 for some reason...
return Gf.Vec3f(*np.clip(value, 0.0001, 1.0))
# The colors you don't absorb will shine through.
# The color you emit shows in the absence of other colors
if color == "red":
self.material_inputs["inputs:absorption"].Set((transmission, 0, 0))
elif color == "yellow":
self.material_inputs["inputs:absorption"].Set(clip((.0, .0, transmission)))
elif color == "green":
self.material_inputs["inputs:absorption"].Set(clip((transmission, .0, transmission)))
elif color == "white":
self.material_inputs["inputs:absorption"].Set(clip((opacity, opacity, opacity)))
else:
return
self._current_color = color
self._current_opacity = opacity
| 7,588 | Python | 45.558282 | 214 | 0.635213 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/check_collision.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Union
from srl.teleop.assistance.transforms import T2pq_array
import warp as wp
import warp.render
import numpy as np
import time
from pxr import Usd, UsdGeom, UsdSkel, Gf
import trimesh
import quaternion
import carb
DEVICE = wp.get_preferred_device()
#DEVICE = 'cpu'
@wp.func
def cw_min(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.min(a[0], b[0]),
wp.min(a[1], b[1]),
wp.min(a[2], b[2]))
@wp.func
def cw_max(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.max(a[0], b[0]),
wp.max(a[1], b[1]),
wp.max(a[2], b[2]))
@wp.kernel
def intersect(query_mesh: wp.uint64,
query_mesh_scale: wp.float32,
query_xforms: wp.array(dtype=wp.transform),
fixed_mesh: wp.uint64,
result: wp.array(dtype=int, ndim=2)):
batch, face = wp.tid()
# mesh_0 is assumed to be the query mesh, we launch one thread
# for each face in mesh_0 and test it against the opposing mesh's BVH
# transforms from query -> fixed space
xform = query_xforms[batch]
# load query triangles points and transform to mesh_1's space
# Local scale is useful for checking whether the interior (roughly) of the object would overlap.
v0 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 1.0, 0.0) * query_mesh_scale)
v1 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 0.0, 1.0) * query_mesh_scale)
v2 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 0.0, 0.0) * query_mesh_scale)
# compute bounds of the query triangle
lower = cw_min(cw_min(v0, v1), v2)
upper = cw_max(cw_max(v0, v1), v2)
query = wp.mesh_query_aabb(fixed_mesh, lower, upper)
result[batch][face] = 0
for f in query:
u0 = wp.mesh_eval_position(fixed_mesh, f, 1.0, 0.0)
u1 = wp.mesh_eval_position(fixed_mesh, f, 0.0, 1.0)
u2 = wp.mesh_eval_position(fixed_mesh, f, 0.0, 0.0)
# test for triangle intersection
i = wp.intersect_tri_tri(v0, v1, v2,
u0, u1, u2)
if i > 0:
result[batch][face] = 1
return
# use if you want to count all intersections
#wp.atomic_add(result, batch, i)
@wp.kernel
def grasp_contacts(
mesh_1: wp.uint64,
left_finger_pad_point: wp.vec3,
right_finger_pad_point: wp.vec3,
palm_point: wp.vec3,
xforms: wp.array(dtype=wp.transform),
result: wp.array(dtype=float, ndim=2),
points: wp.array(dtype=wp.vec3, ndim=2)):
batch = wp.tid()
# mesh_0 is assumed to be the query mesh, we launch one thread
# for each face in mesh_0 and test it against the opposing mesh's BVH
# transforms from mesh_0 -> mesh_1 space
xform = xforms[batch]
# load query triangles points and transform to mesh_1's space
left_ray_origin = wp.transform_point(xform, left_finger_pad_point)
right_ray_origin = wp.transform_point(xform, right_finger_pad_point)
palm_ray_origin = wp.transform_point(xform, palm_point)
left_ray_dir = wp.transform_vector(xform, wp.vec3(0., -1., 0.))
right_ray_dir = wp.transform_vector(xform, wp.vec3(0., 1., 0.))
palm_ray_dir = wp.transform_vector(xform, wp.vec3(0., 0., 1.))
left_ray_t = float(0.)
left_ray_sign = float(0.)
u = float(0.)
v = float(0.0)
normal = wp.vec3()
face = int(0)
left_hit = wp.mesh_query_ray(mesh_1, left_ray_origin, left_ray_dir, .1, left_ray_t, u, v, left_ray_sign, normal, face)
right_ray_t = float(0.)
right_ray_sign = float(0.)
right_hit = wp.mesh_query_ray(mesh_1, right_ray_origin, right_ray_dir, .1, right_ray_t, u, v, right_ray_sign, normal, face)
palm_ray_t = float(100.)
palm_ray_sign = float(0.)
palm_hit = wp.mesh_query_ray(mesh_1, palm_ray_origin, palm_ray_dir, .04, palm_ray_t, u, v, palm_ray_sign, normal, face)
#points[batch][0] = left_ray_origin + left_ray_t * left_ray_dir
#points[batch][1] = right_ray_origin + right_ray_t * right_ray_dir
#points[batch][2] = palm_ray_origin + palm_ray_t * palm_ray_dir
result[batch][2] = palm_ray_t
if not left_hit and right_hit:
# Usually, _both_ rays will hit. If only one doesn't, report both as zero anyways
# to let the outside code assume as much.
result[batch][0] = 0.
result[batch][1] = 0.
else:
result[batch][0] = left_ray_t
result[batch][1] = right_ray_t
class WarpGeometeryScene:
def __init__(self):
self._warp_mesh_cache = {}
self._trimesh_cache = {}
def query(self, Ts, from_mesh, to_mesh, render=False, query_name=None, from_mesh_scale=1.0):
# Transforms take "from-mesh" coordinates into "to-mesh" coordinates
from_mesh = self._load_and_cache_geometry(from_mesh, "warp")
to_mesh = self._load_and_cache_geometry(to_mesh, "warp")
pq_array = T2pq_array(Ts)
xforms = wp.array(pq_array[:, (0,1,2,4,5,6,3)], dtype=wp.transform, device=DEVICE)
with wp.ScopedTimer("intersect", active=False):
carb.profiler.begin(1, f"collision check (N={len(Ts)})", active=True)
query_num_faces = len(from_mesh.indices) // 3
shape = (len(xforms),query_num_faces)
array_results = wp.empty(shape=shape, dtype=int, device=DEVICE)
wp.launch(kernel=intersect, dim=shape, inputs=[from_mesh.id, from_mesh_scale, xforms, to_mesh.id, array_results], device=DEVICE)
wp.synchronize()
# Get num contacts per transform by summing over all faces
results = array_results.numpy()
if len(Ts) == 0:
# warp 0.5.1
results = np.empty(shape)
results = results.sum(1)
carb.profiler.end(1, True)
if render:
if query_name is None:
query_name = str(self._get_mesh_name(to_mesh)).split("/")[-1]
self.viz_query(results, xforms, from_mesh, to_mesh, query_name)
return results
def query_grasp_contacts(self, Ts, from_mesh, to_mesh, render=False, query_name=None):
# Transforms take "from-mesh" coordinates into "to-mesh" coordinates
carb.profiler.begin(1, "Prep meshes", active=True)
from_mesh = self._load_and_cache_geometry(from_mesh, "warp")
to_mesh = self._load_and_cache_geometry(to_mesh, "warp")
carb.profiler.end(1, True)
carb.profiler.begin(1, "Prep transforms", active=True)
carb.profiler.begin(1, "T2pq", active=True)
pq_array = T2pq_array(Ts)
carb.profiler.end(1, True)
xforms = wp.array(pq_array[:, (0,1,2,4,5,6,3)], dtype=wp.transform, device=DEVICE)
carb.profiler.end(1, True)
with wp.ScopedTimer("intersect_and_contact", active=False):
carb.profiler.begin(1, f"collision and contact measure (N={len(Ts)})", active=True)
query_num_faces = len(from_mesh.indices) // 3
shape = (len(xforms),query_num_faces)
contacts_shape = (len(xforms), 3)
contact_results = wp.empty(shape=contacts_shape, dtype=float, device=DEVICE)
points = wp.empty(shape=(len(xforms), 3), dtype=wp.vec3, device=DEVICE)
intersect_results = wp.empty(shape=shape, dtype=int, device=DEVICE)
wp.launch(kernel=intersect, dim=shape, inputs=[from_mesh.id, 1.0, xforms, to_mesh.id, intersect_results], device=DEVICE)
wp.launch(kernel=grasp_contacts, dim=(len(xforms),), inputs=[to_mesh.id, (0.0, 0.04, 0.005), (0.0, -0.04, 0.005), (0.0,0.0,-0.025), xforms, contact_results, points], device=DEVICE)
wp.synchronize()
# Get num contacts per transform by summing over all faces
intersections = intersect_results.numpy()
contacts = contact_results.numpy()
if len(Ts) == 0:
# warp 0.5.1
intersections = np.empty(shape)
contacts = np.empty(shape)
intersections = intersections.sum(1)
carb.profiler.end(1, True)
if render:
if query_name is None:
query_name = str(self._get_mesh_name(to_mesh)).split("/")[-1]
self.viz_query(intersections, xforms, from_mesh, to_mesh, query_name, contacts=points.numpy())
return intersections, contacts
def viz_query(self, collisions, xforms, from_mesh, to_mesh, target_name, contacts=None):
if len(xforms) == 0:
return
renderer = wp.render.UsdRenderer(f"/tmp/collision_viz/{target_name}-{time.time()}.usd", upaxis="z")
#renderer.render_ground()
with wp.ScopedTimer("render", active=True):
renderer.begin_frame(0.0)
to_mesh_points = to_mesh.points.numpy()
to_mesh_indices = to_mesh.indices.numpy()
from_mesh_points = from_mesh.points.numpy()
from_mesh_indices = from_mesh.indices.numpy()
to_extents = np.max(to_mesh_points, axis=0) - np.min(to_mesh_points, axis=0)
spacing_x = to_extents[0] + .3
spacing_y = to_extents[1] + .3
row_size = int(np.sqrt(len(xforms)))
for i, xform in enumerate(xforms.numpy()):
x_offset = (i % row_size) * spacing_x
y_offset = (i // row_size) * spacing_y
renderer.render_mesh(f"to_{target_name}_{i}", points=to_mesh_points, indices=to_mesh_indices, pos=wp.vec3(x_offset, y_offset, 0))
p, q = xform[:3], xform[3:]
renderer.render_mesh(f"frommesh_{i}", points=from_mesh_points, indices=from_mesh_indices, pos=wp.vec3(p[0] + x_offset, p[1] + y_offset, p[2]), rot=q)
if contacts is not None:
for j, contact in enumerate(contacts[i]):
renderer.render_sphere(f"contact_{i}_{j}", pos=wp.vec3(contact[0] + x_offset, contact[1] + y_offset, contact[2]), rot=q, radius=.01)
# if pair intersects then draw a small box above the pair
if collisions[i] > 0:
renderer.render_box(f"result_{i}", pos=wp.vec3(x_offset, y_offset, .15), rot=wp.quat_identity(), extents=(0.01, 0.01, 0.02))
renderer.end_frame()
renderer.save()
def get_support_surfaces(self, geom):
as_trimesh = self._load_and_cache_geometry(geom, "trimesh")
facet_centroids = np.empty((len(as_trimesh.facets), 3))
for i, (facet, total_area) in enumerate(zip(as_trimesh.facets, as_trimesh.facets_area)):
weighted_centroid = 0
for tri_index in facet:
weighted_centroid += as_trimesh.area_faces[tri_index] * as_trimesh.triangles_center[tri_index]
facet_centroids[i] = weighted_centroid / total_area
if len(facet_centroids) == 0:
return facet_centroids, np.empty((0,3)), as_trimesh.facets, as_trimesh.facets_area, as_trimesh.facets_boundary
return facet_centroids, as_trimesh.facets_normal, as_trimesh.facets, as_trimesh.facets_area, as_trimesh.facets_boundary
def combine_geometries_to_mesh(self, geoms, xforms) -> wp.Mesh:
tri = self.combine_geometries_to_trimesh(geoms, xforms)
mesh = warp_from_trimesh(tri)
return mesh
def combine_geometries_to_trimesh(self, geoms, xforms) -> trimesh.Trimesh:
assert len(geoms) == len(xforms)
trimeshes = [self._load_and_cache_geometry(geom, target="trimesh").copy(include_cache=True).apply_transform(xform) for geom, xform in zip(geoms, xforms)]
tri = trimesh.util.concatenate(trimeshes)
return tri
def _load_and_cache_geometry(self, obj, target='warp') -> Union[wp.Mesh, trimesh.Trimesh]:
if target == 'warp':
if isinstance(obj, wp.Mesh):
return obj
cached = self._warp_mesh_cache.get(obj.GetPath(), None)
if cached is not None:
return cached
else:
# Assume that the object is a usd geom
tri = self._load_and_cache_geometry(obj, target='trimesh')
processed = warp_from_trimesh(tri)
self._warp_mesh_cache[obj.GetPath()] = processed
return processed
elif target == "trimesh":
if isinstance(obj, trimesh.Trimesh):
return obj
cached = self._trimesh_cache.get(obj.GetPath(), None)
if cached is not None:
return cached
else:
# Assume that the object is a usd geom
tri = geom_to_trimesh(obj)
self._trimesh_cache[obj.GetPath()] = tri
return tri
else:
assert(False)
def _get_mesh_name(self, mesh):
return list(self._warp_mesh_cache.keys())[list(self._warp_mesh_cache.values()).index(mesh)]
def warp_from_trimesh(trimesh: trimesh.Trimesh):
mesh = wp.Mesh(
points=wp.array(trimesh.vertices, dtype=wp.vec3, device=DEVICE),
indices=wp.array(trimesh.faces.flatten(), dtype=int, device=DEVICE))
return mesh
def get_support_surfaces_trimesh(mesh: trimesh.Trimesh, for_normal=None, threshold=None):
# No caching at the moment so don't put this in any loops
facet_centroids = []
if for_normal:
scores = mesh.facets_normal.dot(for_normal)
support_mask = scores < threshold
else:
support_mask = np.ones((len(mesh.facets)))
facets = []
for facet, total_area, is_support in zip(mesh.facets, mesh.facets_area, support_mask):
if not is_support:
continue
facets.append(facet)
weighted_centroid = 0
for tri_index in facet:
weighted_centroid += mesh.area_faces[tri_index] * mesh.triangles_center[tri_index]
facet_centroids.append(weighted_centroid / total_area)
return facets, mesh.facets_area[support_mask], np.array(facet_centroids), mesh.facets_normal[support_mask]
def geom_to_trimesh(geom):
if isinstance(geom, UsdGeom.Mesh):
trimesh = load_trimesh_from_usdgeom(geom)
elif isinstance(geom, UsdGeom.Cube):
trimesh = get_trimesh_for_cube(geom)
elif isinstance(geom, UsdGeom.Cylinder):
trimesh = get_trimesh_for_cylinder(geom)
elif isinstance(geom, UsdGeom.Cone):
trimesh = get_trimesh_for_cone(geom)
elif isinstance(geom, UsdGeom.Sphere):
trimesh = get_trimesh_for_sphere(geom)
else:
raise Exception("No mesh representation for obj" + str(geom))
return trimesh
def get_trimesh_for_cube(cube: UsdGeom.Cube):
transform = cube.GetLocalTransformation()
translate, rotation, scale = UsdSkel.DecomposeTransform(transform)
transform = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], scale[2], 1))
size = cube.GetSizeAttr().Get()
baked_trimesh = trimesh.creation.box(extents=(size, size, size))
baked_trimesh.apply_transform(transform)
return baked_trimesh
def get_trimesh_for_cylinder(cylinder: UsdGeom.Cylinder):
transform = cylinder.GetLocalTransformation()
translate, rotation, scale = UsdSkel.DecomposeTransform(transform)
transform = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], scale[2], 1))
baked_trimesh = trimesh.creation.cylinder(radius=cylinder.GetRadiusAttr().Get(), height=cylinder.GetHeightAttr().Get())
baked_trimesh.apply_transform(transform)
return baked_trimesh
def get_trimesh_for_cone(cone: UsdGeom.Cone):
baked_trimesh = trimesh.creation.cone(radius=cone.GetRadiusAttr().Get(), height=cone.GetHeightAttr().Get())
baked_trimesh.apply_transform(trimesh.transformations.translation_matrix([0,0,-cone.GetHeightAttr().Get() / 2]))
return baked_trimesh
def get_trimesh_for_sphere(shpere: UsdGeom.Sphere):
transform = shpere.GetLocalTransformation()
baked_trimesh = trimesh.creation.icosphere(radius=shpere.GetRadiusAttr().Get())
baked_trimesh.apply_transform(transform)
return baked_trimesh
def load_trimesh_from_usdgeom(mesh: UsdGeom.Mesh):
transform = mesh.GetLocalTransformation()
baked_trimesh = trimesh.Trimesh(vertices=mesh.GetPointsAttr().Get(), faces=np.array(mesh.GetFaceVertexIndicesAttr().Get()).reshape(-1,3))
baked_trimesh.apply_transform(transform)
return baked_trimesh
| 16,494 | Python | 42.637566 | 192 | 0.619619 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/motion_commander.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
from typing import Optional, Union
import numpy as np
import omni.isaac.cortex.math_util as math_util
import omni.isaac.motion_generation.interface_config_loader as icl
import quaternion
from omni.isaac.core.objects import VisualSphere
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from omni.isaac.core.utils.math import normalized
from omni.isaac.core.utils.prims import (
get_prim_at_path,
is_prim_path_valid,
)
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.math_util import to_stage_units, matrix_to_quat, unpack_T, invert_T
from omni.isaac.motion_generation import MotionPolicyController, ArticulationMotionPolicy, RmpFlowSmoothed, PathPlannerVisualizer
from omni.isaac.motion_generation.lula import RRT
from pxr import Gf, UsdGeom, Usd, Sdf
import srl.teleop.assistance
from srl.teleop.assistance.transforms import integrate_twist
def build_motion_commander(physics_dt, robot, obstacles):
""" Build the motion commander object.
Creates an RmpFlowSmoothed motion policy to govern the motion generation using the
RMPflowCortex motion policy config. This policy is a wrapped version of RmpFlowSmoothed which
measures jerk and both dynamically adjusts the system's speed if a large jerk is predicted,
and truncates small/medium sized jerks.
Also, adds the target prim, adds end-effector prim to the hand prim returned by
get_robot_hand_prim_path(robot), and adds the provided obstacles to the underlying policy.
Params:
- physics_dt: The time delta used by physics in seconds. Default: 1./60 seconds.
- robot: The robot object. Supported robots are currently Franka and UR10.
- obstacles: A dictionary of obstacles to be added to the underlying motion policy.
"""
"""motion_policy = RmpFlowSmoothed(
**icl.load_supported_motion_policy_config("Franka", "RMPflow", policy_config_dir=get_extension_path_from_name("srl.teleop") + "/data/rmpflow")
)"""
motion_policy = RmpFlowSmoothed(
**icl.load_supported_motion_policy_config("Franka", "RMPflowCortex")
)
# Setup the robot commander and replace its (xform) target prim with a visible version.
motion_policy_controller = MotionPolicyController(
name="rmpflow_controller",
articulation_motion_policy=ArticulationMotionPolicy(
robot_articulation=robot, motion_policy=motion_policy
),
)
# Lula config files for supported robots are stored in the motion_generation extension under
# "/path_planner_configs" and "motion_policy_configs"
mg_extension_path = get_extension_path_from_name("omni.isaac.motion_generation")
rrt_config_dir = os.path.join(mg_extension_path, "path_planner_configs")
rmp_config_dir = os.path.join(mg_extension_path, "motion_policy_configs")
# Initialize an RRT object
rrt = RRT(
robot_description_path = rmp_config_dir + "/franka/rmpflow/robot_descriptor.yaml",
urdf_path = rmp_config_dir + "/franka/lula_franka_gen.urdf",
rrt_config_path = rrt_config_dir + "/franka/rrt/franka_planner_config.yaml",
end_effector_frame_name = "right_gripper"
)
target_prim = make_target_prim("/motion_controller_target")
commander = MotionCommander(robot, motion_policy_controller, rrt, target_prim)
hand_prim_path = robot.prim_path + "/panda_hand"
add_end_effector_prim_to_robot(commander, hand_prim_path, "eff")
for obs in obstacles.values():
commander.add_obstacle(obs)
return commander
def make_target_prim(prim_path="/cortex/belief/motion_controller_target"):
""" Create the prim to be used as the motion controller target and add it to the stage.
Creates an axis marker.
"""
target_prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=prim_path)
target_prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
xformable = XFormPrim(target_prim.GetPath(), "motion_controller_target")
xformable.set_local_scale((.4,.4,.4))
return CortexObject(xformable)
def add_end_effector_prim_to_robot(motion_commander, hand_prim_path, eff_prim_name):
""" Add an end-effector prim as a child of the specified hand prim.
In general, a motion policy consuming commands from the motion commander may not use an
end-effector explicitly represented as a prim in the underlying robot USD. This method measures
the location of the underlying policy's end-effector, computes the relative transform between
the specified hand prim and that end-effector, and adds an explicit end-effector prim as a child
of the hand prim to represent the end-effector in USD.
This call uses MotionCommander.calc_policy_eff_pose_rel_to_hand(hand_prim_path) to calculate
where the end-effector transform used by the underlying motion policy is relative to the
specified hand prim.
The end-effector prim is added to the path <hand_prim_path>/<eff_prim_name>
"""
eff_prim_path = hand_prim_path + "/" + eff_prim_name
# Only add the prim if it doesn't already exist.
if not is_prim_path_valid(eff_prim_path):
print("No end effector detected. Adding one.")
eff_prim = XFormPrim(eff_prim_path, "eff_transform")
eff_prim_viz = VisualSphere(eff_prim_path + "/viz", "eff_viz", radius=0.003)
eff_prim_viz.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
eff_prim = eff_prim.prim
else:
eff_prim = get_prim_at_path(eff_prim_path)
pose = calc_policy_eff_pose_rel_to_hand(motion_commander, hand_prim_path)
p = to_stage_units(pose[0])
q = pose[1]
eff_prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(*p.tolist()))
eff_prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(*q.tolist()))
#eff_prim.GetAttribute("xformOp:scale").Set(Gf.Vec3d(.1,.1,.1))
def calc_policy_eff_pose_rel_to_hand(commander, ref_prim_path):
""" Calculates the pose of the controlled end-effector in coordinates of the reference prim
in the named path.
The underlying motion policy uses an end-effector that's not necessarily available in the
franka robot. It's that control end-effector pose that's returned by the forward kinematics
(fk) methods below. This method gets that control end-effector pose relative to a given prim
(such as the hand frame) so, for instance, a new prim can be added relative to that frame
for reference elsewhere.
"""
ref_T = get_prim_world_T_meters(ref_prim_path)
#print("hand_prim_T_meter:\n", ref_T)
#note
eff_T = commander.get_fk_T()
#print("eff_T from mg:\n", eff_T)
eff_T_rel2ref = invert_T(ref_T).dot(eff_T)
R, p = unpack_T(eff_T_rel2ref)
q = matrix_to_quat(R)
return p, q
class ApproachParams(object):
""" Parameters describing how to approach a target (in position).
The direction is a 3D vector pointing in the direction of approach. It'd magnitude defines the
max offset from the position target the intermediate approach target will be shifted by. The std
dev defines the length scale a radial basis (Gaussian) weight function that defines what
fraction of the shift we take. The radial basis function is defined on the orthogonal distance
to the line defined by the target and the direction vector.
Intuitively, the normalized vector direction of the direction vector defines which direction to
approach from, and it's magnitude defines how far back we want the end effector to come in from.
The std dev defines how tighly the end-effector approaches along that line. Small std dev is
tight around that approach line, large std dev is looser. A good value is often between 1 and 3
cm.
See calc_shifted_approach_target() for the specific implementation of how these parameters are
used.
"""
def __init__(self, direction, std_dev):
self.direction = direction
self.std_dev = std_dev
def __str__(self):
return "{direction: %s, std_dev %s}" % (str(self.approach), str(self.std_dev))
class MotionCommand:
""" A motion command includes the motion API parameters: a target pose (required), optional
approach parameters, and an optional posture configuration.
The target pose is a full position and orientation target. The approach params define how the
end-effector should approach that target. And the posture config defines how the system should
resolve redundancy and generally posture the arm on approach.
"""
def __init__(self, target_position: Optional[np.array], target_orientation: Optional[quaternion.quaternion]=None, approach_params=None, posture_config=None):
self.target_position = target_position
self.target_orientation = target_orientation
self.approach_params = approach_params
self.posture_config = posture_config
@property
def has_approach_params(self):
return self.approach_params is not None
@property
def has_posture_config(self):
return self.posture_config is not None
class VelocityMotionCommand:
def __init__(self, target_linear_velocity: np.ndarray, target_angular_velocity: np.ndarray, frame_trans=np.identity(3), frame_rot=np.identity(3)):
self.target_linear_velocity = target_linear_velocity
self.target_angular_velocity = target_angular_velocity
self.frame_trans = frame_trans
self.frame_rot = frame_rot
class PlannedMoveCommand:
def __init__(self, cspace_goal: Optional[np.ndarray]=None, end_effector_goal: Optional[np.ndarray]=None):
self.cspace_target = cspace_goal
self.end_effector_goal = end_effector_goal
def __eq__(self, obj):
if not isinstance(obj, PlannedMoveCommand):
return False
if self.cspace_target is not None and obj.cspace_target is not None:
return np.allclose(self.cspace_target, obj.cspace_target)
else:
return False
class SmoothedCommand(object):
""" Represents a smoothed command.
The API includes:
- reset(): Clear the current smoothed target data.
- update(): Updating the data given a new target.
A command consists of a position target, an optional rotation matrix target, and a posture
config. The smoothed command is stored in members x (position), R (rotation matrix), q (posture
config), and can be accessed from there. On first update of any given component, the component
is set directly to the value provided. On subsequent updates the currently value is averaged
with the new value, creating an exponentially weighted average of values received. If a
particular component is never received (e.g. the posture config, or the rotation matrix) the
corresponding member is never initialized and remains None.
Rotation recursive averaging is done by averaging the matrices themselves then projecting using
math_util.proj_R(), which converts the (invalid) rotation matrix to a quaternion, normalizes,
then converts back to a matrix.
If use_distance_based_smoothing_regulation is set to True (default) the degree of smoothing
diminishes to a minimum value of 0.5 as the system approaches the target. This feature is
optimized for discrete jumps in targets. Then a large jump is detected, the smoothing increase
to the interpolation_alpha provided on initialization, but then decreases to the minimum value
as it nears the target. Note that the distance between rotation matrices factors into the
distance to target.
"""
def __init__(self, interpolation_alpha=0.95, use_distance_based_smoothing_regulation=True):
""" Initialize to use interpolation_alpha as the alpha blender. Larger values mean higher
smoothing. interpolation_alpha should be between 0 and 1; a good default (for use with 60hz
updates) is given by SmoothedCommand_a.
"""
self.x = None
self.R = None
self.q = None
self.init_interpolation_alpha = interpolation_alpha
self.use_distance_based_smoothing_regulation = use_distance_based_smoothing_regulation
self.reset()
def reset(self):
""" Reset the smoother back to its initial state.
"""
self.x = None
self.R = None
self.q = None
self.interpolation_alpha = self.init_interpolation_alpha
def update(self, target_p, target_R, posture_config, eff_x, eff_R):
""" Update the smoothed target given the current command (target, posture_config) and the
current end-effector frame (eff_{x,R}).
Params:
- target: A target object implementing the TargetAdapter API. (It need not have a rotational
target.)
- posture_config: The posture configuration for this command. None is valid.
- eff_x: The position component of the current end-effector frame.
- eff_R: The rotational component of the current end-effector frame.
"""
x_curr = target_p
R_curr = None
if target_R is not None:
R_curr = target_R
q_curr = None
if posture_config is not None:
q_curr = np.array(posture_config)
if self.x is None:
self.x = eff_x
if self.R is None:
self.R = eff_R
if self.q is None:
self.q = q_curr
# Clear the R if there's no rotation command. But don't do the same for the posture config.
# Always keep around the previous posture config.
if R_curr is None:
self.R = None
if self.use_distance_based_smoothing_regulation:
d = np.linalg.norm([eff_x - x_curr])
if self.R is not None:
d2 = np.linalg.norm([eff_R - self.R]) * 1.0
d = max(d, d2)
std_dev = 0.05
scalar = 1.0 - np.exp(-0.5 * (d / std_dev) ** 2)
alpha_min = 0.5
a = scalar * self.interpolation_alpha + (1.0 - scalar) * alpha_min
else:
a = self.interpolation_alpha
self.x = a * self.x + (1.0 - a) * x_curr
if self.R is not None and R_curr is not None:
self.R = math_util.proj_R(a * self.R + (1.0 - a) * R_curr)
if self.q is not None and q_curr is not None:
self.q = a * self.q + (1.0 - a) * q_curr
def calc_shifted_approach_target(target_T, eff_T, approach_params):
""" Calculates how the target should be shifted to implement the approach given the current
end-effector position.
- target_p: Final target position.
- eff_p: Current end effector position.
- approach_params: The approach parameters.
"""
target_R, target_p = math_util.unpack_T(target_T)
eff_R, eff_p = math_util.unpack_T(eff_T)
direction = approach_params.direction
std_dev = approach_params.std_dev
v = eff_p - target_p
an = normalized(direction)
norm = np.linalg.norm
dist = norm(v - np.dot(v, an) * an)
dist += 0.5 * norm(target_R - eff_R) / 3
alpha = 1.0 - np.exp(-0.5 * dist * dist / (std_dev * std_dev))
shifted_target_p = target_p - alpha * direction
return shifted_target_p
def get_prim_world_T_meters(prim_path):
""" Computes and returns the world transform of the prim at the provided prim path in units of
meters.
"""
prim = get_prim_at_path(prim_path)
prim_tf = UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
transform = Gf.Transform()
transform.SetMatrix(prim_tf)
position = transform.GetTranslation()
orientation = transform.GetRotation().GetQuat()
p = np.array(position)
R = np.array(Gf.Matrix3d(orientation).GetTranspose())
T = math_util.pack_Rp(R, math_util.to_meters(p))
return T
class MotionCommander:
""" The motion commander provides an abstraction of motion for the cortex wherein a lower-level
policy implements the motion commands defined by MotionCommand objects.
This class adds and end-effector prim to the robot's hand and creates a target prim for setting
targets. The target prim can be set to a target manually via a call to set_target() or it can be
controlled using a gizmo through the OV viewport.
Independent of what the stage units currently are, this class provides an SI interface. Commands
are specified in units of meters and forward kinematics is returned in units of meters.
"""
def __init__(self, robot, motion_controller, rrt, target_prim):
self.robot = robot
self.motion_controller = motion_controller
self.smoothed_command = SmoothedCommand()
self.rrt = rrt
# Use the PathPlannerVisualizer wrapper to generate a trajectory of ArticulationActions
self.path_planner_visualizer = PathPlannerVisualizer(robot,rrt)
self.robot_prim = get_prim_at_path(self.amp.get_robot_articulation().prim_path)
self.target_prim = None
self.register_target_prim(target_prim)
self.is_target_position_only = False
self.last_command = None
def set_target_position_only(self):
self.is_target_position_only = True
def set_target_full_pose(self):
self.is_target_position_only = False
def register_target_prim(self, target_prim):
""" Register the specified target prim with this commander. This prim will both visualize
the commands being sent to the motion commander, and it can be used to manually control the
robot using the OV viewport's gizmo.
"""
self.target_prim = CortexObject(target_prim) # Target prim will be in units of meters.
self.set_command(MotionCommand(*self.get_fk_pq()))
def calc_policy_eff_pose_rel_to_hand(self, ref_prim_path):
""" Calculates the pose of the controlled end-effector in coordinates of the reference prim
in the named path.
The underlying motion policy uses an end-effector that's not necessarily available in the
franka robot. It's that control end-effector pose that's returned by the forward kinematics
(fk) methods below. This method gets that control end-effector pose relative to a given prim
(such as the hand frame) so, for instance, a new prim can be added relative to that frame
for reference elsewhere.
"""
ref_T = get_prim_world_T_meters(ref_prim_path)
print("hand_prim_T_meter:\n", ref_T)
eff_T = self.get_fk_T()
print("eff_T from mg:\n", eff_T)
eff_T_rel2ref = math_util.invert_T(ref_T).dot(eff_T)
R, p = math_util.unpack_T(eff_T_rel2ref)
q = math_util.matrix_to_quat(R)
return p, q
def reset(self):
""" Reset this motion controller. This method ensures that any internal integrators of the
motion policy are reset, as is the smoothed command.
"""
self.motion_policy.reset()
self.smoothed_command.reset()
@property
def amp(self):
""" Accessor for articulation motion policy from the motion controller.
"""
return self.motion_controller.get_articulation_motion_policy()
@property
def motion_policy(self):
""" The motion policy used to command the robot.
"""
return self.motion_controller.get_articulation_motion_policy().get_motion_policy()
@property
def aji(self):
""" Active joint indices. These are the indices into the full C-space configuration vector
of the joints which are actively controlled.
"""
return self.amp.get_active_joints_subset().get_joint_subset_indices()
def get_end_effector_pose(self, config=None):
""" Returns the control end-effector pose in units of meters (the end-effector used by
motion gen).
Motion generation returns the end-effector pose in stage units. We convert it to meters
here. Returns the result in the same (<position>, <rotation_matrix>) tuple form as motion
generation.
If config is None (default), it uses the current applied action (i.e. current integration
state of the underlying motion policy which the robot is trying to follow). By using the
applied action (rather than measured simulation state) the behavior is robust and consistent
regardless of simulated PD control nuances. Otherwise, if config is set, calculates the
forward kinematics for the provided joint config. config should be the full C-space
configuration of the robot.
"""
if config is None:
# No active joints config was specified, so fill it in with the current applied action.
action = self.robot.get_applied_action()
config = np.array(action.joint_positions)
active_joints_config = config[self.aji]
p, R = self.motion_policy.get_end_effector_pose(active_joints_config)
p = math_util.to_meters(p)
return p, R
def get_eef_T(self):
"""
Return the true, current end effect pose, using latest joint angle measurements
"""
return self.get_fk_T(self.robot.get_joint_positions()[:-2])
def get_fk_T(self, config=None):
""" Returns the forward kinematic transform to the control frame as a 4x4 homogeneous
matrix. Uses currently applied joint position goal, which may differ from real joint positions
in cases where the controller is oscillating.
"""
p, R = self.get_end_effector_pose(config)
return math_util.pack_Rp(R, p)
def get_fk_pq(self, config=None):
""" Returns the forward kinematic transform to the control frame as a
(<position>,<quaternion>) pair.
"""
p, R = self.get_end_effector_pose(config)
return p, quaternion.from_rotation_matrix(R)
def get_fk_p(self, config=None):
""" Returns the position components of the forward kinematics transform to the end-effector
control frame.
"""
p, _ = self.get_end_effector_pose(config)
return p
def get_fk_R(self, config=None):
""" Returns the rotation matrix components of the forward kinematics transform to the
end-effector control frame.
"""
_, R = self.get_end_effector_pose(config)
return R
def set_command(self, command: Union[MotionCommand, VelocityMotionCommand]):
""" Set the active command to the specified value. The command is smoothed before passing it
into the underlying policy to ensure it doesn't change too quickly.
If the command does not have a rotational target, the end-effector's current rotation is
used in its place.
Note the posture configure should be a full C-space configuration for the robot.
"""
eff_T = self.get_fk_T()
eff_p = eff_T[:3, 3]
eff_R = eff_T[:3, :3]
if isinstance(command, VelocityMotionCommand):
screw_T = integrate_twist(3 * command.frame_trans @ command.target_linear_velocity, 12 * command.frame_rot @ command.target_angular_velocity, 2)
target_posture = None
self.smoothed_command.interpolation_alpha = .6
new_T = eff_T @ screw_T
self.smoothed_command.update(new_T[:3,3], new_T[:3,:3], None, eff_p, eff_R)
elif isinstance(command, MotionCommand):
target_p, target_q = command.target_position, command.target_orientation
if target_q is None:
target_q = quaternion.from_rotation_matrix(eff_R)
if command.has_approach_params:
target_T = math_util.pack_Rp(quaternion.as_rotation_matrix(target_q), target_p)
target_p = calc_shifted_approach_target(target_T, eff_T, command.approach_params)
self.smoothed_command.interpolation_alpha = .95
self.smoothed_command.update(target_p, quaternion.as_rotation_matrix(target_q), command.posture_config, eff_p, eff_R)
elif isinstance(command, PlannedMoveCommand):
need_replan = True
if isinstance(self.last_command, PlannedMoveCommand):
if self.last_command == command:
need_replan = False
if need_replan:
self.rrt.set_cspace_target(command.cspace_target)
self.plan = self.path_planner_visualizer.compute_plan_as_articulation_actions(max_cspace_dist = .01)
if self.plan:
next_action = self.plan[0]
p, q = self.get_fk_pq(config=next_action.joint_positions)
self.smoothed_command.interpolation_alpha = 0
self.smoothed_command.update(p, quaternion.as_rotation_matrix(q), None, eff_p, eff_R)
target_p = self.smoothed_command.x
target_R = self.smoothed_command.R
target_T = math_util.pack_Rp(target_R, target_p)
target_posture = self.smoothed_command.q
self.target_prim.set_world_pose(position=target_p, orientation=math_util.matrix_to_quat(target_R))
if target_posture is not None:
self.set_posture_config(target_posture)
self.last_command = command
def set_posture_config(self, posture_config):
""" Set the posture configuration of the underlying motion policy.
The posture configure should be a full C-space configuration for the robot.
"""
policy = self.motion_policy._policy
policy.set_cspace_attractor(posture_config)
def _sync_end_effector_target_to_motion_policy(self):
""" Set the underlying motion generator's target to the pose in the target prim.
Note that the world prim is a CortexObject which is always in units of meters. The motion
generator uses stage units, so we have to convert.
"""
target_translation, target_orientation = self.target_prim.get_world_pose()
if self.is_target_position_only:
self.motion_policy.set_end_effector_target(math_util.to_stage_units(target_translation))
p, _ = self.target_prim.get_world_pose()
q = self.get_fk_pq().q
self.target_prim.set_world_pose(p, q)
else:
self.motion_policy.set_end_effector_target(math_util.to_stage_units(target_translation), target_orientation)
def get_action(self, dt):
""" Get the next action from the underlying motion policy. Returns the result as an
ArticulationAction object.
"""
self.amp.physics_dt = dt
self._sync_end_effector_target_to_motion_policy()
self.motion_policy.update_world()
action = self.amp.get_next_articulation_action()
if isinstance(self.last_command, PlannedMoveCommand):
if self.plan:
action = self.plan.pop(0)
return action
def step(self, dt):
""" Convenience method for both getting the current action and applying it to the
underlying robot's articulation controller.
"""
action = self.get_action(dt)
self.robot.get_articulation_controller().apply_action(action)
def add_obstacle(self, obs):
""" Add the provided obstacle to the underlying motion policy so they will be avoided.
The obstacles must be core primitive types. See omni.isaac.core/omni/isaac/core/objects for
options.
See also omni.isaac.motion_generation/omni/isaac/motion_generation/world_interface.py:
WorldInterface.add_obstacle(...)
"""
self.motion_policy.add_obstacle(obs)
def disable_obstacle(self, obj):
""" Distable the given object as an obstacle in the underlying motion policy.
Disabling can be done repeatedly safely. The object can either be a core api object or a
cortex object.
"""
try:
# Handle cortex objects -- extract the underlying core api object.
if hasattr(obj, "obj"):
obj = obj.obj
self.motion_policy.disable_obstacle(obj)
except Exception as e:
err_substr = "Attempted to disable an already-disabled obstacle"
if err_substr in str(e):
print("<lula error caught and ignored (obj already disabled)>")
else:
raise e
def enable_obstacle(self, obj):
""" Enable the given object as an obstacle in the underlying motion policy.
Enabling can be done repeatedly safely. The object can either be a core api object or a
cortex object.
"""
try:
# Handle cortex objects -- extract the underlying core api object.
if hasattr(obj, "obj"):
obj = obj.obj
self.motion_policy.enable_obstacle(obj)
except Exception as e:
err_substr = "Attempted to enable an already-enabled obstacle"
if err_substr in str(e):
print("<lula error caught and ignored (obj already enabled)>")
else:
raise e
| 29,384 | Python | 42.340708 | 161 | 0.672441 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/camera_controls.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import omni
from omni.isaac.sensor import Camera
import math
from srl.teleop.assistance.transforms import rotate_vec_by_quat
from omni.isaac.core.utils.viewports import set_camera_view
import time
import quaternion
class ArcballCameraControls:
def __init__(self, camera_path, focus_delegate) -> None:
self.camera_path = camera_path
self.last_free_camera_view = None
self._last_holdable_control = None
self._hold_stamp = time.time()
self._hold_duration = 0
self.camera = Camera(self.camera_path, name="persp")
self.focus_delegate = focus_delegate
def update(self, control_input):
if control_input in {"ROTATE RIGHT", "ROTATE LEFT", "PITCH DOWN", "PITCH UP", "ZOOM IN", "ZOOM OUT"}:
now = time.time()
if self._last_holdable_control != control_input or now > self._hold_stamp + 0.2:
# Interpret as a new press
self._hold_duration = 0
elif now > self._hold_stamp:
self._hold_duration += 1
self._hold_stamp = now
self._last_holdable_control = control_input
focus_point = self.focus_delegate()
if control_input == "ROTATE RIGHT" or control_input == "ROTATE LEFT":
sign = 1
if control_input == "ROTATE LEFT":
sign = -1
self._rotate_camera_eye_by_quat(
quaternion.from_euler_angles(0,0,sign * .02 * min(math.log(math.e + self._hold_duration), 3)),
focus_point)
elif control_input == "PITCH UP" or control_input == "PITCH DOWN":
sign = 1
if control_input == "PITCH DOWN":
sign = -1
self._rotate_camera_eye_by_quat(
quaternion.from_euler_angles(0,sign * .02 * min(math.log(math.e + self._hold_duration), 3),0),
focus_point)
elif control_input == "ZOOM IN" or control_input == "ZOOM OUT":
sign = 1
if control_input == "ZOOM OUT":
sign = -1
current_cam_pose = self.camera.get_world_pose()
set_camera_view(
eye=current_cam_pose[0] + (sign * .02 * min(math.log(math.e + self._hold_duration), 3)) * (focus_point - current_cam_pose[0]),
target=focus_point,
camera_prim_path=self.camera_path
)
def _rotate_camera_eye_by_quat(self, quat: quaternion.quaternion, focus):
current_cam_pose = self.camera.get_world_pose()
set_camera_view(
eye=rotate_vec_by_quat(current_cam_pose[0], quat),
target=focus,
camera_prim_path=self.camera_path
)
class SwappableViewControls:
def __init__(self, camera_path, main_viewport, secondary_viewport, on_flip=lambda x: x):
self.main_viewport = main_viewport
self.secondary_viewport = secondary_viewport
self.camera_path = camera_path
# Outside expects us to have a handle to a controllable camera.
self.camera = Camera(self.camera_path, name="persp")
#self.camera.pause()
self._hold_stamp = time.time()
self._hold_duration = 0
self.on_flip = on_flip
def update(self, control_input):
if control_input == 0:
return
now = time.time()
if now > self._hold_stamp + 0.2:
# Interpret as a new press
self._hold_duration = 0
else:
self._hold_duration += 1
self._hold_stamp = now
if self._hold_duration > 0:
return
self.swap()
def swap(self):
prev_main_camera = self.main_viewport.viewport_api.get_active_camera()
prev_secondary_camera = self.secondary_viewport.viewport_api.get_active_camera()
self.main_viewport.viewport_api.set_active_camera(prev_secondary_camera)
self.secondary_viewport.viewport_api.set_active_camera(prev_main_camera)
self.on_flip(prev_secondary_camera == self.camera_path)
@property
def active_index(self):
return 0 if self.camera_path == self.main_viewport.viewport_api.get_active_camera() else 1
def set_fixed_view(self):
omni.kit.commands.execute("UnlockSpecs", spec_paths=[self.camera.prim_path])
#set_camera_view((-.35, -1.16, 1.29), (.35, 0, 0), self.camera_path, self.main_viewport.viewport_api)
set_camera_view((1.79, 0, 1.35), (.25, 0, 0), self.camera_path, self.main_viewport.viewport_api)
def lock_fixed(self):
omni.kit.commands.execute("LockSpecs", spec_paths=[self.camera.prim_path])
| 4,743 | Python | 40.252174 | 142 | 0.598777 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/__init__.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.assistance import Assistance
from srl.teleop.assistance.assistance_extension import AssistanceExtension
import os
# Conveniences to other module directories via relative paths
EXT_DIR = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../../"))
DATA_DIR = os.path.join(EXT_DIR, "data")
__all__ = [
# global paths
"EXT_DIR",
"DATA_DIR",
] | 551 | Python | 28.05263 | 96 | 0.720508 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/assistance.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import datetime
import weakref
from srl.teleop.assistance.camera_controls import ArcballCameraControls, SwappableViewControls
from srl.teleop.assistance.logging import CONTROLS_STATE_DTYPE, ROBOT_STATE_DTYPE, UI_STATE_DTYPE
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.base_sample import BaseSample
from .behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.network import build_control_behavior, build_suggestion_display_behavior, build_suggestion_selection_behavior
from srl.teleop.assistance.check_collision import WarpGeometeryScene
from srl.teleop.assistance.proposals import FixedTargetProposal, build_proposal_tables
from srl.spacemouse.spacemouse_extension import get_global_spacemouse
from omni.isaac.core.world import World
from omni.isaac.core.prims.xform_prim import XFormPrim
import numpy as np
import omni
import carb
import time
import quaternion
from omni.kit.viewport.utility import get_active_viewport_window
from srl.teleop.assistance.transforms import invert_T, pack_Rp
from srl.teleop.assistance.viewport import configure_main_viewport, configure_realsense_viewport, disable_viewport_interaction, get_realsense_viewport, layout_picture_in_picture
from srl.teleop.assistance.viz import viz_laser_rooted_at
from srl.teleop.assistance.motion_commander import build_motion_commander, add_end_effector_prim_to_robot
from srl.teleop.assistance.ui import AssistanceMode, ControlFrame, strfdelta
from pxr import UsdGeom, PhysxSchema
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.debug_draw import _debug_draw
class Assistance(BaseSample):
def __init__(self, task, viewport_manipulator) -> None:
super().__init__()
self.set_world_settings(rendering_dt= 1/30, physics_dt=1/60)
self._timeline = omni.timeline.get_timeline_interface()
self._stage = None
self.scene_context = None
self.control_behavior = None
self.suggestion_selection_behavior = None
self.suggestion_display_behavior = None
self.models = None
self.start_stamp = None
self.last_stamp = time.time()
self._camera_controls = None
self._draw = _debug_draw.acquire_debug_draw_interface()
self._task = task
self.viewport_manipulator = viewport_manipulator
self.viewport_disable_handles = None
def setup_scene(self):
"""
Called by super when the world and stage are setup
"""
# Turn off scientific notation to make debug prints scannable
np.set_printoptions(suppress=True)
world = self.get_world()
world.add_task(self._task)
self._stage = omni.usd.get_context().get_stage()
def physics_step(self, step):
if self._world.is_stopped():
return
if self._task.is_done():
self._world.stop()
return
carb.profiler.begin(1, "physics step", active=True)
if self.start_stamp is None:
self.start_stamp = time.time()
# Force everyone to redraw anything they want shown each frame
self._draw.clear_lines()
self._draw.clear_points()
# Make sure we've let the simulation settle a few steps before updating the eff prim. Otherwise
# the hand prim starts in a strange place which disagrees with joint states
if self._world.current_time_step_index > 10:
hand_prim_path = self.franka.prim_path + "/panda_hand"
# FIXME: This gets called for the first time when the commander is built, but at that point
# the hand prim position is wrong relative to the controller's FK frame. We call it again here
# to put the eff prim in the right place.
add_end_effector_prim_to_robot(self.commander, hand_prim_path, "eff")
spacemouse = get_global_spacemouse()
if spacemouse and not self.control_behavior:
self.configure_behaviors()
elif not spacemouse:
self.control_behavior = None
self.suggestion_selection_behavior = None
self.suggestion_display_behavior = None
with profile("scene_context.monitors", True):
for mon in self.scene_context.monitors:
mon(self.scene_context)
if self.control_behavior:
#HACK: For basic assistance familiarization in study
if isinstance(self._task, ReachingTask) and self.models["suggest_grasps"].as_bool:
if self.control_behavior.context.button_command[2]:
self.selection_behavior.context.fixed_proposal = FixedTargetProposal(self._task._current_target_T)
with profile("control_behavior.monitors", True):
for mon in self.control_behavior.context.monitors:
mon(self.control_behavior.context)
with profile("control_behavior.step", True):
self.control_behavior.step()
with profile("selection.monitors", True):
for mon in self.selection_behavior.context.monitors:
mon(self.selection_behavior.context)
with profile("selection.step", True):
self.selection_behavior.step()
with profile("suggestion_display_behavior.monitors", True):
for mon in self.suggestion_display_behavior.context.monitors:
mon(self.suggestion_display_behavior.context)
with profile("suggestion_display_behavior.step", True):
self.suggestion_display_behavior.step()
action = self.commander.get_action(World.instance().get_physics_dt())
self.franka.get_articulation_controller().apply_action(action)
if self.models is not None and self.models["use_laser"].as_bool:
viz_laser_rooted_at(f"{self.franka.prim_path}/panda_hand/guide", pack_Rp(np.identity(3), np.array((0, 0, .07))))
orig_style = self.models["left_label"][1].style
if hasattr(self._task, "time_remaining") and self._task.time_remaining:
to_display = datetime.timedelta(seconds=self._task.time_remaining)
self.models["left_label"][0].text = strfdelta(to_display, '%M:%S')
if to_display.total_seconds() < 60:
orig_style["background_color"] = 0x330000FF
else:
orig_style["background_color"] = 0x33000000
else:
to_display = datetime.timedelta(seconds=time.time() - self.start_stamp)
self.models["left_label"][0].text = strfdelta(to_display, '%M:%S')
orig_style["background_color"] = 0x33000000
self.models["left_label"][1].set_style(orig_style)
carb.profiler.end(1, True)
async def setup_post_reset(self):
self.commander.reset()
omni.usd.get_context().get_selection().set_selected_prim_paths([], True)
def world_cleanup(self):
self._world.remove_physics_callback("sim_step")
if self.viewport_disable_handles:
self.viewport_disable_handles = None
return
async def setup_pre_reset(self):
return
async def setup_post_load(self):
scene = self._world.scene
self.ghosts = [scene.get_object("ghost_franka0"),scene.get_object("ghost_franka1")]
self.franka = scene.get_object("franka")
await self._world.play_async()
if self.franka is None:
carb.log_error("Grasp Suggestion load failed trying to retrieve Franka from scene. Make sure you have"
"cleared the stage completely before attempted to load.")
assert False
self.realsense_vp = get_realsense_viewport(self.franka.camera.prim.GetPath())
configure_realsense_viewport(self.realsense_vp)
self.main_vp = get_active_viewport_window("Viewport")
configure_main_viewport(self.main_vp)
self.viewport_disable_handles = disable_viewport_interaction(self.main_vp), disable_viewport_interaction(self.realsense_vp)
self.models["control_frame"].get_item_value_model().set_value(2)
layout_picture_in_picture(self.main_vp, self.realsense_vp)
def get_focus():
point = self.commander.get_fk_p()
point[2] = 0
return point
#self._camera_controls = ArcballCameraControls("/OmniverseKit_Persp", focus_delegate=get_focus)
def on_flip(main_is_original):
if main_is_original:
self.models["control_frame"].get_item_value_model().set_value(2)
else:
self.models["control_frame"].get_item_value_model().set_value(0)
self._camera_controls = SwappableViewControls("/OmniverseKit_Persp",self.main_vp, self.realsense_vp, on_flip=on_flip)
self._camera_controls.set_fixed_view()
self._objects = self._task.get_task_objects()
self._scene_objects = self._task.get_scene_objects()
self._object_ghosts = self._task.get_ghost_objects()
#self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.ghosts))
# NOTE: motioncommander requires the articulation view to already exist, which it isn't before setup_post_load
self.commander = build_motion_commander(self.get_world().get_physics_dt(), self.franka, {})
self.eff_prim = XFormPrim(self.franka.prim_path + "/panda_hand/eff")
self.target_prim = XFormPrim("/motion_controller_target")
await self._world.play_async()
self._camera_controls.lock_fixed()
# Generate all possible suggestions we could have based on object geometry
ee_T = self.commander.get_eef_T()
inv_ee_T = invert_T(ee_T)
part_Ts = self.franka.get_gripper_collision_Ts()
ee_to_part_Ts = [inv_ee_T.dot(part_T) for part_T in part_Ts]
self.ee_to_part_Ts = ee_to_part_Ts
self.collision_checker = WarpGeometeryScene()
self.gripper_collision_mesh = self.collision_checker.combine_geometries_to_mesh(self.franka.get_gripper_collision_meshes(), self.ee_to_part_Ts)
with profile("filter_proposal_tables"):
self.grasp_table, self.placement_table, self.plane_table = build_proposal_tables(self.collision_checker, list(self._objects.values()), list(self._scene_objects.values()), self.gripper_collision_mesh)
#self.viewport_manipulator.update(self.grasp_table, self.placement_table, self.plane_table)
self.scene_context = SceneContext(ContextTools(self._world, self.viewport_manipulator, self._objects, self._scene_objects, {}, self._object_ghosts, self.franka, self.ghosts, self.commander, self.grasp_table, self.placement_table, self.plane_table, self.collision_checker, self.gripper_collision_mesh), self.models["suggest_grasps"].as_bool, self.models["suggest_placements"].as_bool)
self._world.add_physics_callback("sim_step", callback_fn=self.physics_step)
omni.usd.get_context().get_selection().set_selected_prim_paths([], True)
def configure_behaviors(self):
assistance_mode = AssistanceMode(self.models["assistance_mode"].get_item_value_model().as_int)
control_frame = ControlFrame(self.models["control_frame"].get_item_value_model().as_int)
self.control_behavior = build_control_behavior(weakref.proxy(self.scene_context.tools), get_global_spacemouse(), control_frame, weakref.proxy(self.scene_context), assistance_mode, weakref.proxy(self._camera_controls).update, self.models["avoid_obstacles"].as_bool)
self.selection_behavior = build_suggestion_selection_behavior(weakref.proxy(self.scene_context.tools), weakref.proxy(self.scene_context), weakref.proxy(self.control_behavior.context), self.models["use_surrogates"].as_bool, self.models["snapping"].as_bool)
self.control_behavior.context.selection_context = weakref.proxy(self.selection_behavior.context)
self.suggestion_display_behavior = build_suggestion_display_behavior(weakref.proxy(self.scene_context.tools), weakref.proxy(self.scene_context), weakref.proxy(self.control_behavior.context), weakref.proxy(self.selection_behavior.context), self.models["center_label"])
def register_ui_models(self, models):
self.models = models
def overlay_opacity_change(model):
value = model.get_value_as_float()
self.suggestion_display_behavior.context.overlay_opacity = value
def control_frame_change(model,_):
if self.control_behavior:
self.control_behavior.context.control_frame = ControlFrame(model.get_item_value_model().as_int)
def assistance_mode_change(model, _):
if self.control_behavior:
self.control_behavior.context.assistance_mode = AssistanceMode(model.get_item_value_model().as_int)
def should_assist_change(model):
self.scene_context.should_suggest_placements = self.models["suggest_placements"].as_bool
self.scene_context.should_suggest_grasps = self.models["suggest_grasps"].as_bool
if self.selection_behavior:
self.selection_behavior.context.use_surrogates = self.models["use_surrogates"].as_bool
self.selection_behavior.context.use_snapping = self.models["snapping"].as_bool
self.models["overlay_opacity"][0].add_value_changed_fn(overlay_opacity_change)
self.models["control_frame"].add_item_changed_fn(control_frame_change)
self.models["assistance_mode"].add_item_changed_fn(assistance_mode_change)
self.models["suggest_grasps"].add_value_changed_fn(should_assist_change)
self.models["suggest_placements"].add_value_changed_fn(should_assist_change)
self.models["use_surrogates"].add_value_changed_fn(should_assist_change)
self.models["snapping"].add_value_changed_fn(should_assist_change)
async def _on_ui_value_change(self, name, value):
if name == "suggest_grasps":
self.scene_context.should_suggest_grasps = value
elif name == "suggest_placements":
self.scene_context.should_suggest_placements = value
elif name == "avoid_obstacles":
if self.control_behavior:
self.control_behavior.context.avoid_obstacles = value
elif name == "use_laser":
imageable = UsdGeom.Imageable(get_prim_at_path(f"{self.franka.prim_path}/panda_hand/guide"))
if not value:
imageable.MakeInvisible()
else:
imageable.MakeVisible()
elif name == "use_surrogates":
if self.selection_behavior:
self.selection_behavior.context.use_surrogates = value
else:
print("unhandled ui event", name, value)
def _on_logging_event(self, val):
world = self.get_world()
data_logger = world.get_data_logger()
if not world.get_data_logger().is_started():
data_logger.add_data_frame_logging_func(self.frame_logging_func)
if val:
data_logger.start()
else:
data_logger.pause()
return
def frame_logging_func(self, tasks, scene):
if self.suggestion_display_behavior.context is None:
return {}
# return always a dict
applied_action = self.franka.get_applied_action()
spacemouse = get_global_spacemouse()
trans, rot, buttons = (0,0,0), (0,0,0), 0
trans_raw, rot_raw, buttons_raw = (0,0,0), (0,0,0), 0
if spacemouse:
stamp, trans, rot, buttons = spacemouse.get_controller_state()
stamp, trans_raw, rot_raw, buttons_raw = spacemouse._control
p,q = self.commander.get_fk_pq()
target_p, target_q = self.commander.target_prim.get_world_pose()
data = {}
robot_state = np.empty((1,), dtype=ROBOT_STATE_DTYPE)
robot_state['eef_pose']["position"] = p
robot_state['eef_pose']["orientation"] = quaternion.as_float_array(q)
robot_state['target_pose']["position"] = target_p
robot_state['target_pose']["orientation"] = target_q
#frame['eef_vel_lin'] = self.franka.gripper.get_linear_velocity()
#frame['eef_vel_ang'] = self.franka.gripper.get_angular_velocity()
twist = self.scene_context.ee_vel_tracker.get_twist()
if twist is None:
twist = np.zeros(6)
robot_state['eef_vel_lin'] = twist[:3]
robot_state['eef_vel_ang'] = twist[3:]
robot_state['joint_positions'] = self.franka.get_joint_positions()
robot_state['joint_velocities'] = self.franka.get_joint_velocities()
robot_state['applied_joint_positions'] = applied_action.joint_positions
robot_state['applied_joint_velocities'] = applied_action.joint_velocities
ui_state = np.empty((1,), dtype=UI_STATE_DTYPE)
cam_p, cam_q = self._camera_controls.camera.get_world_pose()
ui_state['primary_camera'] = self._camera_controls.active_index
ui_state['camera_pose']['position'] = cam_p
ui_state['camera_pose']['orientation'] = cam_q
ghost_i, (ghost_p, ghost_q) = self.suggestion_display_behavior.context.get_current_object_ghost_index_and_pose()
ui_state['object_ghost_pose']['position'] = ghost_p
ui_state['object_ghost_pose']['orientation'] = ghost_q
ui_state['object_ghost_index'] = ghost_i
ui_state["robot_ghost_joint_positions"] = self.suggestion_display_behavior.context.get_current_robot_ghost_joint_positions()
ui_state["ghost_is_snapped"] = self.selection_behavior.context.suggestion_is_snap
controls_state = np.empty((1,), dtype=CONTROLS_STATE_DTYPE)
controls_state["filtered"] = trans, rot, buttons
controls_state["raw"] = trans_raw, rot_raw, buttons_raw
data["robot_state"] = robot_state
data["controls_state"] = controls_state
data["scene_state"] = self._task.get_observations()
data["ui_state"] = ui_state
return data
| 18,162 | Python | 51.799418 | 391 | 0.665345 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/viz.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import omni
import srl.teleop
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pq2T, invert_T, normalized
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_at_path
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.objects.cone import VisualCone
from omni.isaac.core.objects.cylinder import VisualCylinder
from omni.isaac.core.objects.sphere import VisualSphere
from pxr import Usd, UsdGeom, Sdf, UsdLux
import quaternion
import numpy as np
import math
from pxr import Sdf, Usd, UsdGeom, Gf
from omni.isaac.debug_draw import _debug_draw
def ray_cast(
position: np.array, orientation: np.array, offset: np.array, max_dist: float = 100.0
, viz=False):
"""Projects a raycast forward along x axis with specified offset
If a hit is found within the maximum distance, then the object's prim path and distance to it is returned.
Otherwise, a None and 10000 is returned.
Args:
position (np.array): origin's position for ray cast
orientation (np.array): origin's orientation for ray cast
offset (np.array): offset for ray cast
max_dist (float, optional): maximum distance to test for collisions in stage units. Defaults to 100.0.
Returns:
typing.Tuple[typing.Union[None, str], float]: path to geometry that was hit and hit distance, returns None, 10000 if no hit occurred
"""
# based on omni.isaac.core.utils.collisions.ray_cast
if viz:
draw = _debug_draw.acquire_debug_draw_interface()
draw.clear_lines()
input_tr = Gf.Matrix4f()
input_tr.SetTranslate(Gf.Vec3f(*position.tolist()))
input_tr.SetRotateOnly(Gf.Quatf(*orientation.tolist()))
offset_transform = Gf.Matrix4f()
offset_transform.SetTranslate(Gf.Vec3f(*offset.tolist()))
raycast_tf = offset_transform * input_tr
trans = raycast_tf.ExtractTranslation()
direction = raycast_tf.ExtractRotation().TransformDir((1, 0, 0))
origin = (trans[0], trans[1], trans[2])
ray_dir = (direction[0], direction[1], direction[2])
if viz:
draw.draw_lines([np.array(trans)], [np.array(trans) + np.array(direction) * max_dist], [np.array((1,0,0, 1))], [1])
hit = omni.physx.get_physx_scene_query_interface().raycast_closest(origin, ray_dir, max_dist)
if hit["hit"]:
usdGeom = UsdGeom.Mesh.Get(get_current_stage(), hit["rigidBody"])
distance = hit["distance"]
return usdGeom.GetPath().pathString, distance
return None, 10000.0
def viz_axis(parent_path, position, orientation, scale=(1,1,1)):
prim_path = omni.usd.get_stage_next_free_path(get_current_stage(), parent_path, False)
prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=prim_path)
prim = XFormPrim(str(prim.GetPath()), position=position, orientation=orientation)
prim.prim.SetInstanceable(True)
prim.set_local_scale(scale)
return prim
def viz_axis_named_T(name: str, T: np.ndarray, scale=(1,1,1)):
p, q = T2pq(T, as_float_array=True)
viz_axis_named(name,p, q, scale)
def viz_axis_named_Rp(name: str, R: np.ndarray, p: np.ndarray, scale=(1,1,1)):
q = quaternion.from_rotation_matrix(R)
viz_axis_named(name, p, quaternion.as_float_array(q), scale)
def viz_axis_named_Ts(name: str, Ts: np.ndarray, scale=(1,1,1)):
path = f"/Viz/{name}"
proto_path = "/Viz/axis_proto"
if not is_prim_path_valid(proto_path):
proto = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=proto_path)
#UsdGeom.Imageable(proto).MakeInvisible()
p, q = T2pq(Ts)
QF = quaternion.as_float_array(q)
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
axes_prim.GetOrientationsAttr().Set(QF[:, (1,2,3,0)])
axes_prim.GetScalesAttr().Set([scale] * len(p))
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateOrientationsAttr(QF[:, (1,2,3,0)])
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * len(p))
def viz_axis_named(name: str, position: np.ndarray, orientation: np.ndarray, scale=(1,1,1)):
path = f"/Viz/{name}"
if is_prim_path_valid(path):
axis_prim = XFormPrim(path)
else:
axis_prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=path)
axis_prim = XFormPrim(str(axis_prim.GetPath()))
axis_prim.prim.SetInstanceable(True)
axis_prim.set_world_pose(position, orientation)
axis_prim.set_local_scale(scale)
return axis_prim
def viz_point_named(name: str, point, scale=(1,1,1)):
path = f"/Viz/{name}"
prim = VisualSphere(path, name, radius=scale[0] * .05 / 8)
prim.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
prim.set_world_pose(position=point)
def viz_points_named(name: str, points: np.ndarray, scale=(1,1,1), max_instances=None):
path = f"/Viz/{name}"
proto_path = "/Viz/sphere_proto"
p = points
assert len(points.shape) == 2 and points.shape[-1] == 3
if not is_prim_path_valid(proto_path):
proto = VisualSphere(proto_path, "sphere_Proto", radius=.05 / 8)
proto.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
if max_instances is None:
max_instances = len(points)
else:
p = np.resize(points, (max_instances, 3))
visible = np.arange(0, max_instances)
invisible = visible[len(points):]
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
#axes_prim.GetScalesAttr().Set([scale] * max_instances)
axes_prim.GetInvisibleIdsAttr().Set(invisible)
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * max_instances)
axes_prim.CreateInvisibleIdsAttr(invisible)
def viz_dirs_named_Ts(name, Ts, scale=(1,1,1), max_instances=None):
path = f"/Viz/{name}"
proto_path = "/Viz/cone_proto"
if not is_prim_path_valid(proto_path):
proto = VisualCone(proto_path, "cone_proto", height=0.05, radius=.05 / 8)
proto.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
p, q = T2pq(Ts)
QF = quaternion.as_float_array(q)
if max_instances is None:
max_instances = len(Ts)
else:
p = np.resize(p, (max_instances, 3))
QF = np.resize(QF, (max_instances, 4))
visible = np.arange(0, max_instances)
invisible = visible[len(Ts):]
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
axes_prim.GetOrientationsAttr().Set(QF[:, (1,2,3,0)])
#axes_prim.GetScalesAttr().Set([scale] * max_instances)
axes_prim.GetInvisibleIdsAttr().Set(invisible)
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateOrientationsAttr(QF[:, (1,2,3,0)])
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * max_instances)
axes_prim.CreateInvisibleIdsAttr(invisible)
def viz_delta(name, from_prim, to_prim, radius=0.001):
path = f"/Viz/delta/{name}"
if not is_prim_path_valid(path):
prim = VisualCylinder(path, f"delta{name}", height=0, radius=radius)
prim.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
else:
prim = VisualCylinder(path, f"delta{name}", height=0, radius=radius)
from_pq = from_prim.get_world_pose()
from_p, from_q = from_pq[0], quaternion.from_float_array(from_pq[1])
from_T = pq2T(*from_pq)
to_T = pq2T(*to_prim.get_world_pose())
direction = to_T[:3,3] - from_T[:3,3]
prim.set_height(np.linalg.norm(direction))
ori = quaternion.from_rotation_matrix(make_rotation_matrix(normalized(direction), (1,0,0)))
prim.set_world_pose(from_p + (direction / 2), quaternion.as_float_array(ori))
def viz_delta_rooted_at(name, root_path, to_prim, radius=0.0005):
path = f"{root_path}/{name}"
prim = XFormPrim(path)
marker_prim = VisualCylinder(path + "/marker", f"delta{name}", height=0, radius=radius)
marker_prim.geom.GetAxisAttr().Set("Z")
from_prim = XFormPrim(root_path)
from_pq = from_prim.get_world_pose()
from_T = pq2T(*from_pq)
to_T = pq2T(*to_prim.get_world_pose())
diff = invert_T(from_T) @ to_T
direction = diff[:3,3]
ori = quaternion.from_rotation_matrix(make_rotation_matrix((direction), (1,0,0)))
prim.set_local_pose((0,0,0), quaternion.as_float_array(ori))
dist = np.linalg.norm(direction)
marker_prim.set_height(dist)
marker_prim.set_local_pose((0,0, dist / 2), (1,0,0,0))
def viz_laser_rooted_at(root_path, T):
beam_path = f"{root_path}/beam"
hit_path = f"{root_path}/hit"
if not is_prim_path_valid(root_path):
root = XFormPrim(root_path)
p, q = T2pq(T)
# Rotate to point Y in direction of X. No axis attr on CylinderLight
q = q * quaternion.from_euler_angles(np.array((0,-math.pi / 2,0)))
root.set_local_pose(p, quaternion.as_float_array(q))
beam = UsdLux.CylinderLight.Define(get_current_stage(), beam_path)
beam.AddTranslateOp()
beam.CreateColorAttr((1.,.1,.1))
beam.CreateIntensityAttr(50000.)
beam.CreateRadiusAttr(0.00075)
beam.CreateLengthAttr(0.0)
raw_beam = get_prim_at_path(beam_path)
raw_beam.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool, True).Set(True)
hit = UsdLux.SphereLight.Define(get_current_stage(), hit_path)
hit.CreateColorAttr((1.,.8,.8))
hit.CreateIntensityAttr(300.)
hit.CreateRadiusAttr(0.0025)
hit.CreateExposureAttr(2.0)
hit.CreateDiffuseAttr(0.1)
hit.CreateSpecularAttr(0.9)
hit.AddTranslateOp()
raw_hit = get_prim_at_path(hit_path)
raw_hit.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool, True).Set(True)
else:
root = XFormPrim(root_path)
beam = UsdLux.CylinderLight(get_prim_at_path(beam_path))
hit = UsdLux.SphereLight(get_prim_at_path(hit_path))
p,q = root.get_world_pose()
_, dist = ray_cast(p, q, np.zeros(3), 100)
beam.GetLengthAttr().Set(dist)
beam.GetOrderedXformOps()[0].Set((dist / 2.0, 0, 0))
hit.GetOrderedXformOps()[0].Set((dist, 0, 0))
| 11,334 | Python | 41.453183 | 140 | 0.667461 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/camera_franka.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Optional, List
import numpy as np
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import get_stage_units, get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid, delete_prim, find_matching_prim_paths
from omni.isaac.core.prims.rigid_prim_view import RigidContactView, RigidPrimView
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.franka import Franka
from pxr import Sdf, UsdGeom, Gf
import omni
import omni.kit
import quaternion
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.transforms import T2pq, pq2T, rotate_vec_by_quat
from omni.isaac.sensor import Camera, ContactSensor
FINGER_CONTACT_OFFSET = np.array((0,0,.045))
class GripperContentsDebouncer:
def __init__(self) -> None:
self.last_contents_path = None
self.last_contents_timestamp = None
self.to_report = None
self.to_report_stamp = None
self.last_update = time.time()
def update(self, content_path):
now = time.time()
self.last_update = now
if self.last_contents_path == content_path:
self.last_contents_timestamp = now
elif now - self.last_contents_timestamp > 0.4:
#print("change to " + str(content_path))
self.last_contents_path = content_path
self.last_contents_timestamp = now
else:
pass
#print("ignoring change to " + str(content_path))
return self.last_contents_path
class CameraFranka(Franka):
HOME_CONFIG = np.array([-0.01561307, -1.2717055 , -0.02706644, -2.859138, -0.01377442, 2.0233166, 0.7314064])
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "franka_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None.
gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None.
gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "franka_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
deltas: Optional[np.ndarray] = None,
collision_sensors=True,
contact_paths=None,
camera_sensor=True
) -> None:
if usd_path is None:
assets_root_path = get_assets_root_path()
usd_path = assets_root_path + "/Isaac/Robots/Franka/franka.usd"
super().__init__(prim_path, name, usd_path, position, orientation,end_effector_prim_name, gripper_dof_names, gripper_open_position, gripper_closed_position)
stage = get_current_stage()
prim = stage.GetPrimAtPath(prim_path + "/panda_link0/geometry")
prim.GetReferences().ClearReferences()
prim.GetReferences().AddReference(assets_root_path + "/Isaac/Robots/Franka/DetailedProps/panda_link0.usd")
realsense_path = self.prim_path + "/panda_hand/geometry/realsense"
alt_fingers_realsense_path = f"{self.prim_path}/panda_hand/geometry/realsense/realsense_camera"
self._default_camera_transform = ((0.00,0.049,0.053), (.5,-.5,-.5,-.5))
if camera_sensor:
if not is_prim_path_valid(realsense_path):
realsense = UsdGeom.Xformable(add_reference_to_stage(assets_root_path + "/Isaac/Robots/Franka/DetailedProps/realsense.usd",realsense_path))
realsense.AddRotateXYZOp().Set((180.,180.,90.))
self._camera = Camera(alt_fingers_realsense_path)
self._camera.set_horizontal_aperture(200)
self._camera.set_focal_length(48.0)
self._camera.set_clipping_range(0.001, 10000000.0)
self._camera.set_local_pose(*self._default_camera_transform)
self._camera.set_resolution((1280,720))
#self._camera.pause()
else:
self._camera = Camera(alt_fingers_realsense_path)
else:
self._camera = None
self._physx_query_interface = omni.physx.get_physx_scene_query_interface()
self._gripper_contents_debouncer = GripperContentsDebouncer()
if self._end_effector_prim_name is None:
self._end_effector_prim_path = prim_path + "/panda_rightfinger"
if gripper_dof_names is None:
gripper_dof_names = ["panda_finger_joint1", "panda_finger_joint2"]
if gripper_open_position is None:
gripper_open_position = np.array([0.05, 0.05]) / get_stage_units()
if gripper_closed_position is None:
gripper_closed_position = np.array([0.0, 0.0])
if gripper_dof_names is not None:
if deltas is None:
deltas = np.array([0.05, 0.05]) / get_stage_units()
self._gripper = ParallelGripper(
end_effector_prim_path=self._end_effector_prim_path,
joint_prim_names=gripper_dof_names,
joint_opened_positions=gripper_open_position,
joint_closed_positions=gripper_closed_position,
action_deltas=deltas,
)
if not is_prim_path_valid(self.prim_path + "/panda_hand/leftfinger_collider"):
left_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/leftfinger_collider")
left_cube.AddTranslateOp().Set((0.0, 0.0525, 0.09))
left_cube.AddScaleOp().Set((0.01, 0.013, 0.025))
UsdGeom.Imageable(left_cube).MakeInvisible()
right_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/rightfinger_collider")
right_cube.AddTranslateOp().Set((0.0, -0.0525, 0.09))
right_cube.AddScaleOp().Set((0.01, 0.013, 0.025))
UsdGeom.Imageable(right_cube).MakeInvisible()
gripper_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/hand_collider")
gripper_cube.AddTranslateOp().Set((0.025, 0.0, 0.016))
gripper_cube.AddScaleOp().Set((0.045, 0.1, 0.05))
UsdGeom.Imageable(gripper_cube).MakeInvisible()
else:
left_cube = get_prim_at_path(self.prim_path + "/panda_hand/leftfinger_collider")
right_cube = get_prim_at_path(self.prim_path + "/panda_hand/rightfinger_collider")
gripper_cube = get_prim_at_path(self.prim_path + "/panda_hand/hand_collider")
self._gripper_collision_meshes = [gripper_cube, left_cube, right_cube]
self._gripper_collision_views = [XFormPrim(f"{part.GetPath()}") for part in self._gripper_collision_meshes]
self._palm_prim = XFormPrim(self.prim_path + "/panda_hand")
self.contact_sensors = []
self.contact_views = []
self.contact_path_filter = None
if collision_sensors:
if contact_paths:
for part in ["panda_leftfinger", "panda_rightfinger"]:
self.contact_views.append(RigidContactView(f"{prim_path}/{part}", contact_paths, name=f"{part}_rigid_contact_view"))
else:
if is_prim_path_valid(prim_path + "/panda_leftfinger/contact_sensor"):
delete_prim(prim_path + "/panda_leftfinger/contact_sensor")
delete_prim(prim_path + "/panda_rightfinger/contact_sensor")
left = ContactSensor(prim_path + "/panda_leftfinger/contact_sensor", "left_finger_contact_sensor", translation=FINGER_CONTACT_OFFSET, radius=.03)
right = ContactSensor(prim_path + "/panda_rightfinger/contact_sensor", "right_finger_contact_sensor", translation=FINGER_CONTACT_OFFSET, radius=.03)
left.add_raw_contact_data_to_frame()
right.add_raw_contact_data_to_frame()
self.contact_sensors = [left, right]
self.reset_camera_position()
@property
def camera(self) -> Camera:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._camera
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
for sensor in self.contact_sensors:
sensor.initialize(physics_sim_view)
for view in self.contact_views:
view.initialize(physics_sim_view)
super().initialize(physics_sim_view)
if self.camera:
# Prevent scrolling or clicking from moving the wrist camera
omni.kit.commands.execute("LockSpecs", spec_paths=[self.camera.prim_path])
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
self.reset_camera_position()
return
def reset_camera_position(self) -> None:
if self.camera:
self.camera.set_local_pose(*self._default_camera_transform)
def set_contact_path_filter(self, path_filter):
self.contact_path_filter = path_filter
def check_gripper_contents(self, threshold=None) -> Optional[str]:
"""Get the path of a prim that is colliding with the gripper's palm and/or either finger
Args:
threshold (_type_, optional): _description_. Defaults to None.
Returns:
str: _description_
"""
if len(self.contact_views) > 0:
forces = np.zeros(2)
finger_contact_ids = np.full(2, -1)
for i, view in enumerate(self.contact_views):
reading = np.squeeze(view.get_contact_force_matrix())
per_obj_norm = np.linalg.norm(reading, axis=-1)
highest_j = np.argmax(per_obj_norm)
forces[i] = per_obj_norm[highest_j]
finger_contact_ids[i] = highest_j
#print(finger_contact_paths, finger_contact_forces, finger_contact_times, overlapping)
if sum(forces != 0) == 2 and finger_contact_ids[0] == finger_contact_ids[1]:
# Optionally ensure that we're applying at least a certain amount of force
if threshold is not None and sum(forces) < threshold:
return None
return self.contact_path_filter[finger_contact_ids[0]]
return None
finger_contact_forces = []
finger_contact_paths = []
finger_contact_times = []
def check_non_robot_overlap():
paths = []
true_path = None
x_offset = (self.gripper.get_joint_positions()[0] - self.gripper.get_joint_positions()[1]) / 2
aperture = max(self.gripper.get_joint_positions()[0] + self.gripper.get_joint_positions()[1] - 0.01, 0)
if aperture == 0.0:
return None
def report_hit(hit):
nonlocal true_path
nonlocal paths
path = hit.rigid_body
if self.prim_path in path:
return True
paths.append(path)
if self.contact_path_filter is not None and self.contact_path_filter(path):
true_path = path
return False
return True # return True to continue the query
gripper_mesh = self._palm_prim
#left_mesh, right_mesh = self._gripper_collision_meshes[1], self._gripper_collision_meshes[2]
position, orientation = gripper_mesh.get_world_pose()[0], gripper_mesh.get_world_pose()[1]
position += rotate_vec_by_quat(np.array((0.,x_offset, .0895)), quaternion.from_float_array(orientation))
scale = (0.02, aperture ,0.045)
#cube = VisualCuboid("/viz/overlap", position=position, orientation=orientation,scale=scale)
numHits = self._physx_query_interface.overlap_box(np.array(scale) / 2, position, orientation, report_hit, False)
return true_path
overlapping = check_non_robot_overlap()
for sensor in self.contact_sensors:
reading = sensor.get_current_frame()
if len(reading["contacts"]) == 0:
continue
contact = reading["contacts"][0]
body0 = contact["body0"]
body1 = contact["body1"]
# Make sure we're getting the body that _isn't_ the robot
if self.prim_path not in body0.lower():
to_report = body0
elif self.prim_path not in body1.lower():
to_report = body1
else:
# Might happen if self collision is enabled?
assert False
finger_contact_forces.append(reading["force"])
finger_contact_paths.append(to_report)
finger_contact_times.append(reading["time"])
reading["contacts"].clear()
finger_contact_forces = tuple(finger_contact_forces)
#print(finger_contact_paths, finger_contact_forces, finger_contact_times, overlapping)
if len(finger_contact_forces) == 2:
# Optionally ensure that we're applying at least a certain amount of force
if threshold is not None and sum(finger_contact_forces) < threshold:
return None
if overlapping != finger_contact_paths[0]:
pass #print("gripper contents mismatch")
return overlapping
elif len(finger_contact_forces) == 1:
# Object isn't grasped unless both fingers are in contact, but sometimes the sensor is not correct
# so we just trust the overlap query
return overlapping
else:
return None
@property
def gripper_contents(self):
if time.time() - self._gripper_contents_debouncer.last_update > 0.01:
return self._gripper_contents_debouncer.update(self.check_gripper_contents(threshold=0.0001))
else:
return self._gripper_contents_debouncer.last_contents_path
def get_gripper_collision_meshes(self):
return self._gripper_collision_meshes
def get_gripper_collision_Ts(self):
self._gripper_collision_transforms = [pq2T(*view.get_world_pose()) for view in self._gripper_collision_views]
return self._gripper_collision_transforms
| 15,255 | Python | 45.941538 | 164 | 0.615011 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/transforms.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import math
from pxr import Gf
import quaternion
from quaternion import quaternion as quat
from numpy.linalg import norm
import copy
import traceback
from omni.isaac.core.utils.rotations import quat_to_rot_matrix, matrix_to_euler_angles, euler_angles_to_quat
from typing import Tuple, List, Optional
from omni.isaac.core.prims.rigid_prim import RigidPrim
from scipy.spatial.transform import Rotation
def orthogonalize(R: np.ndarray, prioritize=(0,1,2)) -> np.ndarray:
reverse_mapping = tuple(prioritize.index(i) for i in range(3))
# QR decomp will preserve the first axis. The priority
# arg lets the caller decide what they want to preserve.
ordered = R[:, prioritize]
ortho_R, r = np.linalg.qr(ordered)
# Sign of the upper-triangular component diagonals indicate
# whether the sign of the original axes were flipped. The
# result is still orthogonal, but we
# choose to flip them all back so that we have a unique
# solution that respects the input signs.
if r[0,0] < 0:
ortho_R[:, 0] *= -1
if r[1,1] < 0:
ortho_R[:, 1] *= -1
if r[2,2] < 0:
ortho_R[:, 2] *= -1
reordered = ortho_R[:, reverse_mapping]
return reordered
def matrix_to_quat(rot_mat):
return euler_angles_to_quat(matrix_to_euler_angles(rot_mat))
def unpack_T(T) -> Tuple[np.ndarray, np.ndarray]:
""" Returns the rotation matrix and translation separately
Returns (R, p)
"""
return T[..., :3, :3], T[..., :3, 3]
def unpack_R(R) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Returns the individual axes of the rotation matrix.
"""
return R[...,:3, 0], R[...,:3, 1], R[...,:3, 2]
def pack_R(ax, ay, az, as_homogeneous=False):
""" Returns a rotation matrix with the supplied axis columns.
R = [ax, ay, az]
"""
ax_v = np.atleast_2d(ax)
ay_v = np.atleast_2d(ay)
az_v = np.atleast_2d(az)
assert ax_v.shape[0] == ay_v.shape[0] == az_v.shape[0]
if as_homogeneous:
R = np.empty((ax_v.shape[0], 4, 4))
R[:] = np.eye(4)
else:
R = np.empty((ax_v.shape[0], 3, 3))
R[:] = np.eye(3)
R[...,:3, 0] = ax
R[...,:3, 1] = ay
R[...,:3, 2] = az
return np.squeeze(R)
def pack_Rp(R: np.ndarray, p: np.ndarray) -> np.ndarray:
""" Packs the provided rotation matrix (R) and position (p) into a homogeneous transform
matrix.
"""
# np.atleast_3d puts the extra dimension at the back but we need it at the front
Rv = np.atleast_2d(R)
Rb = Rv.view()
if Rv.ndim == 2:
Rb = Rv[None, :, :]
# The user can pass in a single R for many P, or a single P for many R. We'll size the output for
# the expected result of broadcasting.
pb = np.atleast_2d(p)
num_results = max(Rb.shape[0], pb.shape[0])
T = np.tile(np.eye(4)[None,...], (num_results, 1,1))
T[..., :3, :3] = Rb
T[..., :3, 3] = pb
if Rv.ndim == 2:
return T.squeeze()
else:
return T
def invert_T(T: np.ndarray):
""" Inverts the provided transform matrix using the explicit formula leveraging the
orthogonality of R and the sparsity of the transform.
Specifically, denote T = h(R, t) where h(.,.) is a function mapping the rotation R and
translation t to a homogeneous matrix defined by those parameters. Then
inv(T) = inv(h(R,t)) = h(R', -R't).
"""
R, t = unpack_T(T)
R_trans = np.swapaxes(R, -1, -2)
return pack_Rp(R_trans, np.squeeze(-R_trans @ t[..., None]))
def T2pq(T: np.ndarray, as_float_array=False) -> Tuple[np.ndarray, quat]:
""" Converts a 4d homogeneous matrix to a position-quaternion representation.
"""
R, p = unpack_T(T)
q = quaternion.from_rotation_matrix(R)
if as_float_array:
q = quaternion.as_float_array(q)
return p, q
def T2pq_array(T: np.ndarray) -> np.ndarray:
"""
Converts 4d homogeneous matrices to position-quaternion representation and stores them
in a (N,7) array. Rotation components of the transforms are assumed to already be orthonormal
"""
result = np.empty((len(T), 7), dtype=float)
R, result[:, :3] = unpack_T(T)
result[:, 3:] = quaternion.as_float_array(quaternion.from_rotation_matrix(R, nonorthogonal=False))
return result
def pq2T(p: np.ndarray, q: np.ndarray):
""" Converts a pose given as (<position>,<quaternion>) to a 4x4 homogeneous transform matrix.
"""
q_view = q
if q_view.dtype != "quaternion":
q_view = quaternion.from_float_array(q)
return pack_Rp(quaternion.as_rotation_matrix(q_view), p)
def euler2R(angles: np.array):
return pq2T((0,0,0), euler_angles_to_quat(angles))
def np_to_gfquat(q: np.array) -> Gf.Quatd:
qf = q.astype(float)
return Gf.Quatf(qf[0], Gf.Vec3f(qf[1], qf[2], qf[3]))
def rotate_vec_by_quat(v: np.ndarray, q: quat) -> np.ndarray:
q_view = quaternion.as_float_array(q)
u = q_view[1:]
s = q_view[0]
return 2.0 * np.dot(u, v) * u + (s*s - np.dot(u, u)) * v + 2.0 * s * np.cross(u, v)
def quat_vector_part(q):
"""Create an array of vector parts from an array of quaternions.
Parameters
----------
q : quaternion array_like
Array of quaternions.
Returns
-------
v : array
Float array of shape `q.shape + (3,)`
"""
q = np.asarray(q, dtype=np.quaternion)
return quaternion.as_float_array(q)[..., 1:]
def transform_dist(T1: np.ndarray, T2: np.ndarray, R_weight: float):
# eq 7 from 10.1007/978-3-319-33714-2_10
# Here the R distance is based on the magnitude of the geodesic, calculated directly via the trace
# If the translational distance is 0, the maximum distance is 2 * R_weight * sqrt(2/3). Set R_weight based on the size of the rigid bodies
# you are measuring between. So, around .15 is reasonable for a gripper
T1_v = T1.view()
T2_v = T2.view()
if len(T1.shape) == 2:
T1_v = T1[None,:]
if len(T2.shape) == 2:
T2_v = T2[None, :]
R1_inv = np.swapaxes(T1_v[...,:3,:3], -1, -2)
R2 = T2_v[...,:3,:3]
dists = np.linalg.norm(T2_v[..., :3, 3] - T1_v[...,:3,3], axis=-1) ** 2 + (2 * R_weight ** 2 * (1 - (np.trace(R1_inv @ R2, axis1=-1, axis2=-2) / 3)))
np.sqrt(dists, dists, where=dists>0)
return np.squeeze(dists)
def quat_angle(q1: np.ndarray, q2: np.ndarray):
# Angle of rotation to get from one orientation to another
return np.arccos(2. * np.inner(q1, q2) ** 2 - 1)
def matrix_to_quat(mat: np.ndarray) -> np.ndarray:
""" Converts the provided rotation matrix into a quaternion in (w, x, y, z) order.
"""
return quaternion.as_float_array(quaternion.from_rotation_matrix(mat))
def matrix_to_euler_angles(mat: np.ndarray) -> np.ndarray:
"""Convert rotation matrix to Euler XYZ angles.
Args:
mat (np.ndarray): A 3x3 rotation matrix.
Returns:
np.ndarray: Euler XYZ angles (in radians).
"""
cy = np.sqrt(mat[0, 0] * mat[0, 0] + mat[1, 0] * mat[1, 0])
singular = cy < 0.00001
if not singular:
roll = math.atan2(mat[2, 1], mat[2, 2])
pitch = math.atan2(-mat[2, 0], cy)
yaw = math.atan2(mat[1, 0], mat[0, 0])
else:
roll = math.atan2(-mat[1, 2], mat[1, 1])
pitch = math.atan2(-mat[2, 0], cy)
yaw = 0
return np.array([roll, pitch, yaw])
def slerp_quat(quaternion_0: quat, quaternion_1: quat, alpha: float) -> quat:
return quaternion.slerp(quaternion_0, quaternion_1, 0, 1, alpha)
def normalize(v, axis=-1):
l2 = np.atleast_1d(norm(v, axis=axis))
l2[l2==0] = 1
return np.squeeze(v / np.expand_dims(l2, axis))
def normalized(v, axis=-1):
if v is None:
return None
return normalize(copy.deepcopy(v), axis=axis)
def proj_orth(v1, v2, normalize_res=False, eps=1e-5):
""" Projects v1 orthogonal to v2. If v2 is zero (within eps), v1 is returned
unchanged. If normalize_res is true, normalizes the result before returning.
"""
v1v = np.atleast_2d(v1)
v2_norm = np.atleast_1d(np.linalg.norm(v2, axis=-1))
unproj_mask = v2_norm < eps
v2n = v2 / np.expand_dims(v2_norm,axis=-1)
res = v1v - np.expand_dims(np.einsum('ij,ij->i',np.atleast_2d(v1), np.atleast_2d(v2n)), axis=-1) * v2n
res[unproj_mask] = v1v[unproj_mask]
res = np.squeeze(res)
if normalize_res:
return normalized(res)
else:
return res
def make_rotation_matrix(az_dominant: np.array, ax_suggestion: np.array):
""" Constructs a rotation matrix with the z-axis given by az_dominant (normalized), and the
x-axis given by a orthogonally projected version of ax_suggestion. The y-axis is formed via the
right hand rule.
"""
az_v = np.atleast_1d(az_dominant)
ax_v = np.atleast_1d(ax_suggestion)
az_norm = normalized(az_v)
ax_proj = proj_orth(ax_v, az_norm, normalize_res=True)
ay = np.cross(az_norm, ax_proj)
return pack_R(ax_proj, ay, az_norm)
def axes_to_mat(axis_x, axis_z, dominant_axis="z"):
if dominant_axis == "z":
axis_x = proj_orth(axis_x, axis_z)
elif dominant_axis == "x":
axis_z = proj_orth(axis_z, axis_x)
elif dominant_axis is None:
pass
else:
raise RuntimeError("Unrecognized dominant_axis: %s" % dominant_axis)
axis_x = axis_x / norm(axis_x)
axis_z = axis_z / norm(axis_z)
axis_y = np.cross(axis_z, axis_x)
R = np.zeros((3, 3))
R[0:3, 0] = axis_x
R[0:3, 1] = axis_y
R[0:3, 2] = axis_z
return R
# Projects T to align with the provided direction vector v.
def proj_to_align(R, v):
max_entry = max(enumerate([np.abs(np.dot(R[0:3, i], v)) for i in range(3)]), key=lambda entry: entry[1])
return axes_to_mat(R[0:3, (max_entry[0] + 1) % 3], v)
def shortest_arc(normal_1: np.ndarray, normal_2: np.ndarray) -> quat:
# Are the normals already parallel?
normal_dot = normal_1.dot(normal_2)
if normal_dot > .99999:
# Same direction -> identity quat
return quaternion.quaternion(1,0,0,0)
elif normal_dot < -.999999:
# Exactly opposing -> 180 about arbitrary axis
return quaternion.quaternion(0,0,1,0)
else:
# Shortest arc between the vectors
a = np.cross(normal_1, normal_2)
# w is simple because we have unit normals: sqrt(norm(v1)**2 * norm(v2)**2) -> 1
return quaternion.quaternion(1 + normal_dot, *a).normalized()
def transform_point(p: np.ndarray, T: np.ndarray) -> np.ndarray:
return (T @ np.array((*p, 1)))[:3]
def R_to_rot_vector(R: np.ndarray) -> np.ndarray:
theta = R_to_angle(R)
with np.errstate(invalid='ignore', divide='ignore'):
# undefined if theta is 0 but we handle that in the following line
aa = theta /(2 * np.sin(theta))*np.array([R[...,2,1]-R[...,1,2], R[...,0,2]-R[...,2,0], R[...,1,0]-R[...,0,1]])
return np.where(~np.isnan(theta) & (theta != 0.0), aa, 0).T
def R_to_angle(R: np.ndarray) -> np.ndarray:
return np.arccos(np.clip((np.trace(R, axis1=-1, axis2=-2) - 1) / 2.,-1, 1))
def random_vector_in_spherical_cap(theta, dir, n, rng=None) -> np.ndarray:
result = np.empty((n,3))
if rng is None:
rng = np.random.default_rng()
result[:, 2] = rng.uniform(size=n, low=np.cos(theta), high=1.)
phi = np.random.rand(n) * 2 * math.pi
result[:, 0] = np.sqrt(1-result[:,2]**2)*np.cos(phi)
result[:, 1] = np.sqrt(1-result[:,2]**2)*np.sin(phi)
if np.allclose(dir, (0,0,1)):
return result
rot = shortest_arc(np.array((0,0,1)), dir)
return quaternion.rotate_vectors(rot, result)
def cone_vectors(theta, phi_steps):
"""
Generate unit vectors along the surface of the cone with aperture theta pointing toward -Z, taking
phi_steps stops along the circle
"""
theta_v = np.atleast_1d(theta)
result = np.empty((len(theta_v), phi_steps, 3), dtype=float)
phi = np.linspace(0, math.pi * 2, phi_steps, endpoint=False)
# These are spherical coordinates
result[:,:,0] = np.sin(theta_v)[:,None] * np.cos(phi)
result[:,:,1] = np.sin(theta_v)[:,None] * np.sin(phi)
result[:,:,2] = np.cos(theta_v)[:,None]
return result.squeeze()
class FrameVelocityEstimator:
def __init__(self, dt):
self.T_prev = None
self.T_diff = None
self.last_dt = None
self.dt = dt
@property
def is_available(self):
return self.T_diff is not None
def update(self, T, dt=None):
if self.T_prev is not None:
self.T_diff = (invert_T(self.T_prev) @ T)
self.T_prev = T
self.last_dt = dt
def get_twist(self, small_angle=False) -> Optional[np.ndarray]:
if self.T_diff is None:
return None
dt = self.last_dt if self.last_dt is not None else self.dt
diff = np.reshape(self.T_diff, (-1, 4,4))
out = np.zeros((diff.shape[0], 6))
out[:, :3] = self.T_diff[...,:3,3]
if small_angle:
# If the angle is small, the difference matrix is very close to I + an infinitesimal rotation.
# This is good up to about theta=0.1
out[:, 3] = self.T_diff[..., 2,1]
out[:, 4] = self.T_diff[..., 0,2]
out[:, 5] = self.T_diff[..., 1,0]
else:
out[:, 3:] = R_to_rot_vector(self.T_diff[...,:3, :3])
return np.squeeze(out / dt)
def get_obj_poses(objects: List[RigidPrim]) -> np.ndarray:
N = len(objects)
positions = np.empty((N, 3))
quats = np.empty((N, 4))
for i, obj in enumerate(objects):
p, q = obj.get_world_pose()
positions[i, :] = p
quats[i, :] = q
return pq2T(positions, quaternion.from_float_array(quats))
def integrate_twist(v: np.ndarray, w: np.ndarray, time=1):
"""
Find the matrix exponential of the 6 element twist, parameterized by
by time. Integrates the application of this twist over time.
"""
v = np.atleast_1d(v)
theta = np.linalg.norm(w)
if theta == 0:
return np.array([[1, 0, 0, v[0] * time],
[0, 1, 0, v[1] * time],
[0, 0, 1, v[2] * time],
[0, 0, 0, 1]])
else:
w_n = normalized(w)
theta *= time
# theta = time / theta
skew_w = np.array([[0, -w_n[2], w_n[1]],
[w_n[2], 0, -w_n[0]],
[-w_n[1], w_n[0], 0]])
skew_w_2 = skew_w @ skew_w
# Rodrigues' formula, forward exponential map (modern robotics 3.51)
R = np.eye(3) + (np.sin(theta) * skew_w) + ((1-np.cos(theta)) * skew_w_2)
# modern robotics 3.88, but we the amount which we move down the screw axis
# by the magnitude of the rotation
p = ((np.eye(3) * theta) + (1 - np.cos(theta)) * skew_w + (theta - np.sin(theta)) * (skew_w_2)) @ (v / np.linalg.norm(w))
return np.array([[R[0,0], R[0,1], R[0,2], p[0]],
[R[1,0], R[1,1], R[1,2], p[1]],
[R[2,0], R[2,1], R[2,2], p[2]],
[0, 0, 0, 1]])
def integrate_twist_stepwise(v: np.ndarray, w: np.ndarray, until_time: float, n_steps: int) -> np.ndarray:
""" Integrate the twist (v,w), providing 1 + until_time * n_steps points, beginning with (0,0,0)
"""
step = 1 / n_steps
result = np.empty((1 + int(until_time * n_steps), 3))
result[0] = (0,0,0)
R = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(w * step))
for i in range(1, len(result)):
result[i] = (R @ result[i-1]) + v * step
return result
def homogeneous_to_twist(Ts):
diff = np.reshape(Ts, (-1, 4,4))
out = np.zeros((diff.shape[0], 6))
out[:, :3] = Ts[...,:3,3]
out[:, 3:] = R_to_rot_vector(Ts[...,:3, :3])
return np.squeeze(out)
def lognormalize(x):
# Calculate log of all components exponentiated
a = np.logaddexp.reduce(x)
if a == float('-inf'):
# Return unchanged dist for all 0s
return x.copy()
# "Divide" all values by the max
return x - a
| 16,151 | Python | 32.861635 | 153 | 0.590737 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ghost_object.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Optional
import numpy as np
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf
from omni.isaac.core.prims.xform_prim import XFormPrim
import omni
from typing import Sequence
from pxr import Gf
from omni.physxcommands import UnapplyAPISchemaCommand
from srl.teleop.assistance.ghost_franka import load_ghost_material
def make_ghost(from_object_at_path, ghost_path, ghost_name, material_path="/Looks/GhostVolumetric"):
if is_prim_path_valid(ghost_path):
return
result = omni.kit.commands.execute(
"CopyPrimCommand", path_from=from_object_at_path, path_to=ghost_path, duplicate_layers=False, combine_layers=False
)
return GhostObject(ghost_path, ghost_name, material_path=material_path)
class GhostObject(XFormPrim):
def __init__(self, prim_path: str, name: str = "xform_prim", position: Optional[Sequence[float]] = None, translation: Optional[Sequence[float]] = None, orientation: Optional[Sequence[float]] = None, scale: Optional[Sequence[float]] = None, visible: Optional[bool] = False, material_path="/Looks/GhostVolumetric") -> None:
super().__init__(prim_path, name, position, translation, orientation, scale, visible)
self.material, self.material_inputs = load_ghost_material(material_path)
self.material_inputs["inputs:transmission_color"].Set((1.5, 1.5, 1.5))
self.material_inputs["inputs:emission_color"].Set((1.25, 1.25, 1.25))
self.material_inputs["inputs:emissive_scale"].Set(300.)
self._current_color = None
self._current_opacity = None
self._imageable = UsdGeom.Imageable(self.prim)
self.apply_visual_material(self.material)
self.remove_physics()
# Shadows give better depth cues, but have strange artifacts (z-fighting, and slow pop in)
#self.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
def disable_collisions(self):
# Disable colliders
for p in Usd.PrimRange(self.prim):
if p.HasAPI(UsdPhysics.CollisionAPI):
collision_api = UsdPhysics.CollisionAPI(p)
collision_api.GetCollisionEnabledAttr().Set(False)
if p.HasAPI(UsdPhysics.RigidBodyAPI):
physx_api = UsdPhysics.RigidBodyAPI(p)
physx_api.CreateRigidBodyEnabledAttr(False)
physx_api.GetRigidBodyEnabledAttr().Set(False)
def remove_physics(self):
UnapplyAPISchemaCommand(UsdPhysics.CollisionAPI, self.prim).do()
UnapplyAPISchemaCommand(UsdPhysics.RigidBodyAPI, self.prim).do()
@property
def visible(self):
return self._imageable.GetVisibilityAttr().Get() != "invisible"
def hide(self):
self._imageable.MakeInvisible()
def show(self):
self._imageable.MakeVisible()
def set_color(self, color, opacity=1.0):
if color == self._current_color and opacity == self._current_opacity:
# idempotent
return
transmission = 1.0 - opacity
def clip(value):
# Inputs seem to behave differently for 0 and close to 0 for some reason...
return Gf.Vec3f(*np.clip(value, 0.0001, 1.0))
# The colors you don't absorb will shine through.
# The color you emit shows in the absence of other colors
if color == "red":
self.material_inputs["inputs:absorption"].Set((transmission, 0, 0))
elif color == "yellow":
self.material_inputs["inputs:absorption"].Set((transmission, transmission, 0))
elif color == "green":
self.material_inputs["inputs:absorption"].Set((0, transmission, 0))
elif color == "white":
self.material_inputs["inputs:absorption"].Set(clip((opacity, opacity, opacity)))
else:
return
self._current_color = color
self._current_opacity = opacity
| 4,158 | Python | 42.778947 | 325 | 0.664502 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/swing_twist.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
from quaternion import quaternion as quat
from typing import Tuple
from .transforms import quat_vector_part
def swing_twist_decomposition(q: quat, axis: np.ndarray) -> Tuple[quat, quat]:
dir = quat_vector_part(q)
dot_dir_axis = dir.dot(axis)
projected = dot_dir_axis / np.linalg.norm(axis) * axis
twist = quat(q.w ,projected[0], projected[1], projected[2])
if dot_dir_axis < 0.0:
twist *= -1
twist /= twist.norm()
swing = q * twist.conjugate()
swing /= swing.norm()
return swing, twist
class SwingTwistLeash:
def __init__(self, trans_limit, rot_limit) -> None:
self.set_limits(trans_limit, rot_limit)
def set_limits(self, trans: float, rot: float):
# Radians
self.rot_limit = rot
self.trans_limit = trans
self.max_swing_mag = (1. - np.cos(self.rot_limit)) / 2
self.max_swing_mag2 = np.sin(.5 * self.rot_limit)
self.max_swing_w = np.sqrt(1.0 - self.max_swing_mag)
def apply(self, anchor_p: np.ndarray, anchor_q: quat, new_p: np.ndarray, new_q: quat):
# And now we'll apply limits to keep the target within a certain delta from the current gripper pose
limited_p = new_p
pos_diff = np.array(new_p - anchor_p)
pos_diff_norm = np.linalg.norm(pos_diff)
pos_dir = pos_diff / pos_diff_norm
if pos_diff_norm > self.trans_limit:
# Project the desired position target onto the surface of the sphere with the limit radius
limited_p = anchor_p + (pos_dir * self.trans_limit)
# Orientation limits
limited_q = new_q
# Just the relative rotation from current orientation to the proposed new target
r_delta = new_q * anchor_q.conjugate()
# Get the part of the rotation that twists about (1,0,0) and the part that swings that axis
swing, twist = swing_twist_decomposition(r_delta, np.array((1,0,0)))
swing_vec = quat_vector_part(swing)
swing_magnitude = np.linalg.norm(swing_vec)
# Cone constraint: limit swing
if (swing_magnitude > self.max_swing_mag):
limited_swing_vec = swing_vec / swing_magnitude * self.max_swing_mag
w_sign = -1 if swing.w < 0 else 1
swing = quat(w_sign * self.max_swing_w, *limited_swing_vec)
limited_q = swing * twist * anchor_q
return limited_p, limited_q | 2,559 | Python | 38.384615 | 109 | 0.63306 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/assistance_extension.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance import Assistance
from omni.isaac.ui.ui_utils import cb_builder, dropdown_builder, btn_builder, combo_floatfield_slider_builder, state_btn_builder
from omni.kit.viewport.utility import get_active_viewport_window
import omni
from srl.teleop.assistance.experiment import PARTICIPANT_ID, SLOT_NAMES, configure_for_condition_index, get_ordering
from srl.teleop.assistance.logging import is_folder
from srl.teleop.assistance.spacemouse_demo import SpaceMouseManipulator
from srl.teleop.assistance.ui import ASSISTANCE_MODES, CONTROL_FRAMES, add_overlay, str_builder, multi_btn_builder
from srl.teleop.assistance.scene import ViewportScene
from srl.spacemouse.spacemouse_extension import get_global_spacemouse, get_global_spacemouse_extension
import os
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style
import numpy as np
import carb
from omni.isaac.core.utils.viewports import set_camera_view
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
import weakref
import omni.ext
import asyncio
from omni.isaac.core import World
from omni.kit.quicklayout import QuickLayout
from .logging import save_log
from functools import partial
import time
class AssistanceExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
# profiling note
from . import profiling
self._ext_id = ext_id
menu_items = [MenuItemDescription(name="Teleop Assistance", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
self._menu_items = menu_items
add_menu_items(self._menu_items, "SRL")
self._settings = carb.settings.get_settings()
self._viewport = get_active_viewport_window("Viewport")
self.timeline = omni.timeline.get_timeline_interface()
self.task_ui_elements = {}
self._world_buttons = {}
self._plots = {}
self.build_ui(name="Teleop Assistance",
title="Teleop Assistance",
doc_link="",
overview="Provides assistance during human operated pick and place",
file_path=os.path.abspath(__file__),
number_of_extra_frames=3,
window_width=350,)
frame = self.get_frame(index=0)
self.build_assistance_ui(frame)
self.logging_ui = {}
frame = self.get_frame(index=1)
self.build_data_logging_ui(frame)
self.center_label, self.left_label = add_overlay(self._viewport, ext_id)
self._viewport_scene = ViewportScene(self._viewport, ext_id)
self._assistance_system = None
self._plotting_event_subscription = None
def get_frame(self, index):
if index >= len(self._extra_frames):
raise Exception("there were {} extra frames created only".format(len(self._extra_frames)))
return self._extra_frames[index]
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def shutdown_cleanup(self):
if self.center_label:
self.center_label[0].destroy()
self.center_label[1].destroy()
self.center_label = None
if self.left_label:
self.left_label[0].destroy()
self.left_label[1].destroy()
self.left_label = None
# destroy scene
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = None
def _on_logging_button_event(self, val):
self._assistance_system._on_logging_event(val)
self.logging_ui["Save Data"].enabled = True
return
def _on_save_data_button_event(self):
world = World.instance()
data_logger = world.get_data_logger()
frames = data_logger._data_frames
current_task_name = list(world.get_current_tasks())[0]
current_task = world.get_current_tasks()[current_task_name]
#user_name = self.logging_ui["User"].get_value_as_string()
user_name = str(PARTICIPANT_ID)
timestamp = time.time()
timestamp_str = time.strftime("%Y%m%d-%H%M%S")
log_path = self.logging_ui["Output Directory"].get_value_as_string()
log_name = f"{user_name}-{current_task_name}-{self.condition_i}-{timestamp_str}"
log_path = f"{log_path}/{log_name}.hdf5"
metadata = {"collected_timestamp": timestamp, "task": current_task_name, "user": user_name, "condition_id": self.condition_i, "experiment_i": self.world_i}
metadata = {**metadata, **current_task.get_params()}
def done_saving():
data_logger.reset()
# If we're saving at a shutdown point, UI and self will vanish
if hasattr(self, "logging_ui") and self.logging_ui:
self.logging_ui["Save Data"].enabled = False
self.logging_ui["Start Logging"].text = "START"
carb.log_info("Saved " + log_path)
self._viewport._post_toast_message("Saved log", "test")
asyncio.ensure_future(
save_log(log_path, frames, metadata, done=done_saving)
)
def _on_option_button_event(self, name, value):
asyncio.ensure_future(
self._assistance_system._on_ui_value_change(name, value)
)
def post_reset_button_event(self):
self.logging_ui["Start Logging"].enabled = True
self.logging_ui["Save Data"].enabled = False
def post_load_button_event(self):
self.logging_ui["Start Logging"].enabled = True
self.logging_ui["Save Data"].enabled = False
def _on_load_world(self, world_index):
self._enable_all_buttons(False, False)
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = ViewportScene(self._viewport, self._ext_id, use_scene_camera=False)
# This will close the current stage and stop the world, causing any logs to be saved
omni.usd.get_context().new_stage()
task, condition_i = configure_for_condition_index(world_index, self.task_ui_elements, PARTICIPANT_ID)
self.condition_i = condition_i
self.world_i = world_index
self._assistance_system = Assistance(task, None)
self._assistance_system.viewport_scene = self._viewport_scene
self._assistance_system.register_ui_models({
"control_frame": self.task_ui_elements["Control Frame"],
"overlay_opacity": self.task_ui_elements["Overlay Opacity"],
"assistance_mode": self.task_ui_elements["Assistance Mode"],
"avoid_obstacles": self.task_ui_elements["Avoid Obstacles"],
"suggest_grasps": self.task_ui_elements["Suggest Grasps"],
"suggest_placements": self.task_ui_elements["Suggest Placements"],
"snapping": self.task_ui_elements["Snapping"],
"use_laser": self.task_ui_elements["Laser"],
"use_surrogates": self.task_ui_elements["Surrogates"],
"center_label": self.center_label,
"left_label": self.left_label
})
async def _on_load_world_async():
found_mouse = await get_global_spacemouse_extension().discover_mouse()
if not found_mouse:
self._enable_all_buttons(True, True)
carb.log_error("Can't connect to spacemouse")
return
await self._assistance_system.load_world_async()
await omni.kit.app.get_app().next_update_async()
#self._viewport_scene.add_manipulator(lambda: SpaceMouseManipulator(grid=False))
self._assistance_system._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True, True)
self.post_load_button_event()
self._assistance_system._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
self.timeline.play()
self._assistance_system._on_logging_event(True)
asyncio.ensure_future(_on_load_world_async())
"""if not self._plotting_event_subscription:
self._plotting_event_subscription = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
)"""
return
def _on_load_spacemouse_demo(self):
from srl.teleop.assistance import DATA_DIR
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = ViewportScene(self._viewport, self._ext_id, use_scene_camera=True)
# This will close the current stage and stop the world, causing any logs to be saved
omni.usd.get_context().new_stage()
QuickLayout.load_file(f"{DATA_DIR}/experiment_layout.json", False)
async def _load_async():
set_camera_view((-1., -3, 3), (0.,0.,0.))
found_mouse = await get_global_spacemouse_extension().discover_mouse()
if not found_mouse:
carb.log_error("Can't connect to spacemouse")
return
await omni.kit.app.get_app().next_update_async()
self._viewport_scene.add_manipulator(SpaceMouseManipulator)
self._enable_all_buttons(True, True)
self.post_load_button_event()
self._plotting_event_subscription = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
)
asyncio.ensure_future(_load_async())
def _on_plotting_step(self, step):
device = get_global_spacemouse()
if self._viewport_scene.manipulator:
self._viewport_scene.manipulator.update(device.get_controller_state())
def _on_reset(self):
async def _on_reset_async():
await self._assistance_system.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_stop(self):
async def _on_stop_async():
world = World.instance()
world.stop()
asyncio.ensure_future(_on_stop_async())
return
def _enable_all_buttons(self, load_flag, other_flag):
for btn in self._world_buttons["Load World"]:
btn.enabled=load_flag
for btn_name, btn in self._world_buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = other_flag
self._world_buttons["Stop"].enabled = other_flag
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def on_shutdown(self):
self._extra_frames = []
if self._assistance_system is None:
print("self._assistance_system is none. Not sure if this is a problem")
if self._assistance_system is not None and self._assistance_system._world is not None:
self._assistance_system._world_cleanup()
if self._menu_items is not None:
self._window_cleanup()
if self._world_buttons is not None:
self._enable_all_buttons(True, False)
self.shutdown_cleanup()
return
def _window_cleanup(self):
remove_menu_items(self._menu_items, "SRL")
self._window = None
self._menu_items = None
self._world_buttons = None
return
def on_stage_event(self, event):
# event_type = omni.usd.StageEventType(event.type)
if event.type == int(omni.usd.StageEventType.CLOSED):
# If the stage is closed before on_startup has run, all of our fields will be undefined
if World.instance() is not None and hasattr(self, "_assistance_system") and self._assistance_system:
self._assistance_system._world_cleanup()
self._assistance_system._world.clear_instance()
self._assistance_system = None
# There's no World now, so in any case the user can load anew!
if hasattr(self, "_world_buttons"):
self._enable_all_buttons(True, False)
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
if self._assistance_system:
self._enable_all_buttons(True, False)
self._on_save_data_button_event()
# NOTE(3-8-22): Trying to close the world here produces segfaults
return
def build_ui(self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=True, dockPreference=ui.DockPreference.RIGHT_TOP
)
self._window.deferred_dock_in("Stage", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
self._extra_frames = []
with self._window.frame:
with ui.VStack(spacing=5, height=0):
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="World Controls",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
ui.Label(f"You are participant {PARTICIPANT_ID}", width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip="Use this ID whenever prompted")
ui.Spacer(height=5)
dict = {
"label": "Load World",
"type": "button",
"text": SLOT_NAMES,
"tooltip": ["Load World and Task" for _ in range(len(SLOT_NAMES) + 1)],
"on_clicked_fn": [self._on_load_spacemouse_demo] + [partial(self._on_load_world,i) for i in range(len(SLOT_NAMES) - 1)],
}
self._world_buttons["Load World"] = multi_btn_builder(**dict)
for btn in self._world_buttons["Load World"]:
btn.enabled=True
dict = {
"label": "Stop",
"type": "button",
"text": "Stop",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_stop,
}
self._world_buttons["Stop"] = btn_builder(**dict)
self._world_buttons["Stop"].enabled = False
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._world_buttons["Reset"] = btn_builder(**dict)
self._world_buttons["Reset"].enabled = False
ui.Spacer(height=10)
ui.Label(f"Version 6430.{''.join(map(str,get_ordering(PARTICIPANT_ID)))}", width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip="")
with ui.VStack(style=get_style(), spacing=5, height=0):
for i in range(number_of_extra_frames):
self._extra_frames.append(
ui.CollapsableFrame(
title="",
width=ui.Fraction(0.33),
height=0,
visible=False,
collapsed=True,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
)
def build_assistance_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Settings"
frame.visible = True
dict = {
"label": "Control Frame",
"tooltip": "The coordinate system used to map control input to robot motion",
#"on_clicked_fn": self._on_control_frame_event,
"default_val": 2,
"items": CONTROL_FRAMES
}
self.task_ui_elements["Control Frame"] = dropdown_builder(**dict)
dict = {
"label": "Assistance Overlay Opacity",
"tooltip": ["How opaque the overlaid suggestions should be", ""],
"default_val": .2,
"min": 0.0,
"max": 1.0
}
self.task_ui_elements["Overlay Opacity"] = combo_floatfield_slider_builder(**dict)
dict = {
"label": "Assistance Mode",
"tooltip": "The format of assistance provided",
#"on_clicked_fn": self._on_assistance_mode_event,
"items": ASSISTANCE_MODES
}
self.task_ui_elements["Assistance Mode"] = dropdown_builder(**dict)
dict = {
"label": "Use Surrogates",
"tooltip": "Whether to use interactive surrogates to select suggestions",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "use_surrogates"),
}
self.task_ui_elements["Surrogates"] = cb_builder(**dict)
dict = {
"label": "Avoid Obstacles",
"tooltip": "Avoid Obstacles",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "avoid_obstacles"),
}
self.task_ui_elements["Avoid Obstacles"] = cb_builder(**dict)
dict = {
"label": "Suggest Grasps",
"tooltip": "Whether to suggest grasps",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "suggest_grasps"),
}
self.task_ui_elements["Suggest Grasps"] = cb_builder(**dict)
dict = {
"label": "Suggest Placements",
"tooltip": "Whether to suggest placements",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "suggest_placements"),
}
self.task_ui_elements["Suggest Placements"] = cb_builder(**dict)
dict = {
"label": "Snapping",
"tooltip": "Whether to snap suggestions",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "snapping"),
}
self.task_ui_elements["Snapping"] = cb_builder(**dict)
dict = {
"label": "Laser",
"tooltip": "Enable a laser pointer attached to the gripper",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "use_laser"),
}
self.task_ui_elements["Laser"] = cb_builder(**dict)
return
def build_data_logging_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Data Logging"
frame.visible = True
dict = {
"label": "Output Directory",
"type": "stringfield",
"default_val": os.path.expanduser('~/Documents/trajectories'),
"tooltip": "Output Directory",
"on_clicked_fn": None,
"use_folder_picker": True,
"item_filter_fn": is_folder,
"read_only": False,
}
self.logging_ui["Output Directory"] = str_builder(**dict)
dict = {
"label": "User",
"type": "stringfield",
"default_val": "unspecified",
"tooltip": "Name of operator",
"on_clicked_fn": None,
"use_folder_picker": False,
"read_only": False,
}
self.logging_ui["User"] = str_builder(**dict)
dict = {
"label": "Start Logging",
"type": "button",
"a_text": "START",
"b_text": "PAUSE",
"tooltip": "Start Logging",
"on_clicked_fn": self._on_logging_button_event,
}
self.logging_ui["Start Logging"] = state_btn_builder(**dict)
self.logging_ui["Start Logging"].enabled = False
dict = {
"label": "Save Data",
"type": "button",
"text": "Save Data",
"tooltip": "Save Data",
"on_clicked_fn": self._on_save_data_button_event,
}
self.logging_ui["Save Data"] = btn_builder(**dict)
self.logging_ui["Save Data"].enabled = False
return
| 22,036 | Python | 43.609312 | 167 | 0.548058 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/profiling.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from time import perf_counter
import carb.profiler
profile_table = {}
name_stack = []
class profile:
def __init__(self, name="", active=True) -> None:
self.name = name
self.active = active
pass
def __enter__(self):
self.time = perf_counter()
carb.profiler.begin(1, self.name, active=self.active)
return self
def __exit__(self, type, value, traceback):
self.time = perf_counter() - self.time
self.readout = f'{self.name} Time: {self.time * 1000:.2f} milliseconds'
carb.profiler.end(1, self.active)
def is_profiler_active():
# Flip this to True if you want profiling information to print out
return False
def begin(mask, name, stack_offset=0, active=False):
if not is_profiler_active() or not active:
return
profile_table[name] = perf_counter()
name_stack.append(name)
def end(mask, active=False):
if not is_profiler_active() or not active:
return
start_stack_depth = len(name_stack)
if start_stack_depth == 0:
return
name = name_stack.pop()
print(" " * (start_stack_depth - 1) + f"{name}: {(perf_counter() - profile_table[name]) * 1000:.2f}ms")
del profile_table[name]
# TBR
carb.profiler.begin = begin
carb.profiler.end = end
| 1,432 | Python | 26.557692 | 108 | 0.638966 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/shapenet_import.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from collections import defaultdict
import omni.client
import omni.kit
import omni.usd
import asyncio
import os
from pxr import UsdGeom, Gf, Tf, Usd, UsdShade, UsdPhysics
import random
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_type_name, delete_prim, get_all_matching_child_prims, get_prim_at_path
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.prims.rigid_prim import RigidPrim
from typing import Optional, Sequence
import numpy as np
from srl.teleop.assistance.transforms import invert_T
from omni.physx.scripts.utils import setColliderSubtree
from os import listdir
from os.path import isfile, join
ACRONYM_BY_CAT = None
ACRONYM_ROOT = os.environ["HOME"] + '/data/acronym/grasps'
def load_acronym_index():
if not os.stat(ACRONYM_ROOT):
return None
acronym_paths = [f for f in listdir(ACRONYM_ROOT) if isfile(join(ACRONYM_ROOT, f))]
acronym_tuples = [f[:f.rfind(".")].split("_") for f in acronym_paths]
by_cat = defaultdict(lambda: defaultdict(list))
for i, (cat, obj, scale) in enumerate(acronym_tuples):
by_cat[cat][obj].append((float(scale), acronym_paths[i]))
return by_cat
def file_exists_on_omni(file_path):
result, _ = omni.client.stat(file_path)
if result == omni.client.Result.OK:
return True
return False
async def create_folder_on_omni(folder_path):
if not file_exists_on_omni(folder_path):
result = await omni.client.create_folder_async(folder_path)
return result == omni.client.Result.OK
async def convert(in_file, out_file):
#
import omni.kit.asset_converter as assetimport
# Folders must be created first through usd_ext of omni won't be able to create the files creted in them in the current session.
out_folder = out_file[0 : out_file.rfind("/") + 1]
# only call create_folder_on_omni if it's connected to an omni server
if out_file.startswith("omniverse://"):
await create_folder_on_omni(out_folder + "materials")
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.as_shapenet = True
converter_context.single_mesh = True
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
class ShapeNetPrim(RigidPrim):
def __init__(self, prim_path: str, metadata, name: str = "rigid_prim", position: Optional[Sequence[float]] = None, translation: Optional[Sequence[float]] = None, orientation: Optional[Sequence[float]] = None, scale: Optional[Sequence[float]] = None, visible: Optional[bool] = None, mass: Optional[float] = None, density: Optional[float] = None, linear_velocity: Optional[np.ndarray] = None, angular_velocity: Optional[np.ndarray] = None) -> None:
super().__init__(prim_path, name, position, translation, orientation, scale, visible, mass, density, linear_velocity, angular_velocity)
unit = metadata["unit"]
self.materials = []
self.material_inputs = {}
self.shaders = []
self.shader_inputs = {}
self._geometry_prims = []
for p in Usd.PrimRange(self.prim):
prim_type = get_prim_type_name(p.GetPath())
if p.GetPath() != self.prim_path and prim_type == "Xform":
as_xform = XFormPrim(p.GetPath())
as_xform.set_local_scale((unit, unit, unit))
self._geometery_xform = as_xform
self._geometry_prims = p.GetChildren()
self._geometry_prims = [UsdGeom.Mesh(raw) for raw in self._geometry_prims]
elif prim_type == "Material":
as_material = UsdShade.Material(p)
self.materials.append(as_material)
elif prim_type == "Shader":
as_shader = UsdShade.Shader(p)
inputs = as_shader.GetInputs()
self.shaders.append(as_shader)
self.shader_inputs[p.GetPath()] = {}
for input in inputs:
self.shader_inputs[p.GetPath()][input.GetFullName()] = input
self.add_colliders()
# Merge component meshes
all_points = []
all_indices = []
all_counts = []
index_offset = 0
for component in self._geometry_prims:
points = component.GetPointsAttr().Get()
indices = component.GetFaceVertexIndicesAttr().Get()
counts = component.GetFaceVertexCountsAttr().Get()
offset_indices = [x + index_offset for x in indices]
all_points.extend(points)
all_indices.extend(offset_indices)
all_counts.extend(counts)
index_offset = index_offset + len(points)
self.collision_geom = UsdGeom.Mesh.Define(get_current_stage(), prim_path + "/merged")
scale = self.collision_geom.AddXformOp(UsdGeom.XformOp.TypeScale, UsdGeom.XformOp.PrecisionDouble, "")
scale.Set(Gf.Vec3d(unit, unit, unit))
self.collision_geom.CreatePointsAttr(all_points)
self.collision_geom.CreateFaceVertexIndicesAttr(all_indices)
self.collision_geom.CreateFaceVertexCountsAttr(all_counts)
UsdGeom.Imageable(self.collision_geom).MakeInvisible()
self.make_visible()
def make_visible(self):
#
for shader in self.shaders:
opacity_input = self.shader_inputs[shader.GetPath()].get("inputs:opacity_constant", None)
if opacity_input:
opacity_input.Set(1.0)
def add_colliders(self, approximationShape="convexDecomposition"):
#
setColliderSubtree(self.prim, approximationShape)
"""for mesh in self._geometry_prims:
UsdPhysics.CollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI.CreateApproximationAttr().Set("none")"""
@property
def geom(self):
return self.collision_geom
async def add_shapenetsem_model(category, nth, prim_path, position, name):
global ACRONYM_BY_CAT
try:
import meshsets
#
os.environ['MESHSETS_LOCAL_ROOT_DIR'] = os.environ['HOME'] + '/data/meshes'
dataset = meshsets.load_dataset('ShapeNetSem watertight')
obj_filepath = dataset.get_filenames(category)[nth]
obj_filename = obj_filepath[obj_filepath.rfind("/",1) + 1:]
obj_name = obj_filename[:obj_filename.rfind(".")]
except ImportError:
print("Couldn't import nvidia-meshsets. Can't add shapenet model.")
return None
if ACRONYM_BY_CAT is None:
ACRONYM_BY_CAT = load_acronym_index()
scale = None
if ACRONYM_BY_CAT is not None:
import h5py
scales = ACRONYM_BY_CAT[category][obj_name]
scale, filename = scales[0]
data = h5py.File(ACRONYM_ROOT + "/" + filename, "r")
grasps = np.array(data["grasps/transforms"])
success = np.array(data["grasps/qualities/flex/object_in_gripper"])
offset = np.identity(4)
# (invert_T(get_prim_world_T_meters("/motion_controller_target")) @ get_prim_world_T_meters(self.franka.prim_path + "/panda_hand"))[:3, 3]
offset[2,3] = .06
grasps = grasps[success == 1] @ offset
else:
grasps = None
dataset_name = obj_filepath.replace(os.environ['MESHSETS_LOCAL_ROOT_DIR'], '')
dataset_name = dataset_name[1:dataset_name.find("/",1)]
converted_folder_name = os.environ["MESHSETS_LOCAL_ROOT_DIR"] + "/" + dataset_name + "/usd"
out_filepath = converted_folder_name + "/" + obj_name[:obj_name.rfind(".")] + ".usd"
import pathlib
pathlib.Path(converted_folder_name).mkdir(parents=True, exist_ok=True)
pathlib.Path(converted_folder_name + "/materials").mkdir(parents=True, exist_ok=True)
if not os.path.isfile(out_filepath):
await convert(obj_filepath, out_filepath)
added = add_reference_to_stage(out_filepath, prim_path)
metadata = dataset.get_metadata(obj_filepath)
if scale is not None:
metadata["unit"] = scale
#
wrapped = ShapeNetPrim(prim_path, metadata, name=name, translation=position, mass=0.03)
wrapped.grasp_annotations = grasps
return wrapped
| 8,763 | Python | 41.543689 | 450 | 0.65263 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/proposals.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import math
import time
from enum import Enum
from typing import List, Optional, Union
import carb
import numpy as np
import quaternion
from omni.isaac.core.prims.rigid_prim import RigidPrim
from srl.teleop.assistance.motion_commander import ApproachParams
from srl.teleop.assistance.suggestions import generate_candidate_grasps, generate_candidate_placements
from srl.teleop.assistance.transforms import cone_vectors, get_obj_poses, invert_T, make_rotation_matrix, normalize, orthogonalize, pack_Rp, pq2T, \
rotate_vec_by_quat, shortest_arc, transform_point
class InvalidReason(Enum):
VALID = 0
UNREACHABLE = 1
MOVING = 2
GROUND_COLLISION = 3
SCENE_COLLISION = 4
UNKNOWN = 5
class GroupedPoseProposalTable:
def __init__(self, poses: np.ndarray, owning_objects: List[RigidPrim], obj_poses: np.ndarray, pose_owners: np.ndarray, groups: Optional[np.ndarray] = None):
self._poses = poses
self._poses_world = None
if groups is None:
self._groups = np.zeros((poses.shape[0]))
else:
self._groups = groups
self.objects = owning_objects
self.obj_Ts = obj_poses
self.objects_dirty = np.full((len(obj_poses),), True, dtype=bool)
self._owners = pose_owners
self._configs = np.full((len(poses), 7), np.nan, dtype=float)
self._valid = np.full((len(poses)), InvalidReason.UNKNOWN.value, dtype=int)
def update_world_poses(self, updated_poses: np.ndarray):
if self._poses_world is None:
self._poses_world = np.empty_like(self._poses)
self._poses_world[:] = updated_poses
def update_world_poses_masked(self, mask: np.ndarray, updated_poses: np.ndarray):
if self._poses_world is None:
self._poses_world = np.empty_like(self._poses)
self._poses_world[mask] = updated_poses
def mask_by_owner(self, owner_id: int) -> np.ndarray:
return self._owners == owner_id
@property
def valid(self):
return self._valid == InvalidReason.VALID.value
@property
def proposable(self):
return self.valid
def invalidate(self, mask: np.ndarray, reason: InvalidReason):
self._valid[mask] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[mask].fill(np.nan)
def invalidate_submask(self, mask: np.ndarray, submask: np.ndarray, reason: InvalidReason):
masked, = np.where(mask)
self._valid[masked[submask]] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[masked[submask]].fill(np.nan)
def invalidate_all(self, reason: InvalidReason):
self._valid[:] = reason.value
self._configs.fill(np.nan)
def __len__(self):
return self._poses.shape[0]
def empty(self):
return self.__len__() == 0
class Proposal:
def __init__(self, identifier: int, table: GroupedPoseProposalTable) -> None:
self._table = table
self.identifier = identifier
@property
def valid(self):
return self._table._valid[self.identifier] == 0
@property
def T_obj(self):
return self._table._poses[self.identifier][:]
@property
def T_world(self):
return self._table._poses_world[self.identifier][:]
def mark_invalid(self, reason: InvalidReason):
self._table._valid[self.identifier] = reason.value
class FixedTargetProposal:
def __init__(self, target_T: np.ndarray):
self.target_T = target_T
self.joint_config = np.full(9, np.nan)
@property
def T_world(self):
return self.target_T
@property
def valid(self):
return True
"""
Helpful geometry references:
* Surfaces: https://en.wikipedia.org/wiki/Surface_(mathematics)
"""
class GraspProposal(Proposal):
"""Proposals are suggestions that have been posed to the user. They can become invalid due to kinematics, collision, etc,
and they can be in any of several interaction states.
"""
def __init__(self, identifier: int, table: GroupedPoseProposalTable) -> None:
super().__init__(identifier, table)
@property
def obj_T(self) -> RigidPrim:
return self._table.obj_Ts[self.obj_id]
@property
def obj_id(self) -> int:
return self._table._owners[self.identifier]
def map_velocity_input(self, position: np.ndarray, vel: np.ndarray):
if np.linalg.norm(vel) < 0.0001:
# Fixture is undefined for 0 vel
return vel
# Prefer straight line to goal
line_to_goal = self.T_world[:3,3] - position
D = np.array([line_to_goal]).T
span_D = D @ (np.linalg.pinv(D.T @ D) @ D.T)
goal_dist = np.linalg.norm(line_to_goal)
# Lower dist -> more attenuation of motion not allowed by fixture
attenuation = sigmoid(goal_dist, .35, 5.0)
return vel @ (span_D + attenuation * (np.identity(3) - span_D))
@property
def joint_config(self):
return self._table._configs[self.identifier][:]
@property
def valid(self):
return self._table._valid[self.identifier] == InvalidReason.VALID.value
def update_eff_goal(self, eff_T_world, joint_config):
self._table._poses_world[self.identifier] = eff_T_world
self._table._configs[self.identifier] = joint_config
self._table._valid[self.identifier] = InvalidReason.VALID.value
def get_eff_T(self):
"""
The target pose where the end effector should be for the grasp. Not guaranteed to be reachable.
"""
return self.obj_T.dot(self.T_obj)
def build_approach_grasp_sample_pattern(n_rotations=14, max_rotation=math.pi, n_tilts=12, n_standoffs=1, n_neighbors=18):
z_rotations = np.zeros((1 + n_rotations,4,4))
theta = np.empty((1 + n_rotations))
theta[0] = 0
theta[1: 1 + (n_rotations // 2)] = np.linspace(-max_rotation / 2, 0, n_rotations // 2, endpoint=True)
theta[1 + (n_rotations //2):] = np.linspace(max_rotation / 2, 0, n_rotations // 2, endpoint=True)
z_rotations[:,0,0] = np.cos(theta)
z_rotations[:,0,1] = -np.sin(theta)
z_rotations[:,1,0] = np.sin(theta)
z_rotations[:,1,1] = np.cos(theta)
z_rotations[:,2,2] = 1
z_rotations[:,3,3] = 1
if n_neighbors < 10:
angle = np.linspace(0, math.pi * 2, n_neighbors // 2, endpoint=False)
rad = np.array((0.0025,0.005))
xy_offsets = np.vstack([(0,0), (np.array([np.cos(angle), np.sin(angle)]).T[None] * rad[:, None, None]).reshape(-1, 2)])
else:
# Use a hexagonal pattern to pack points efficiently
angle = np.empty((n_neighbors + 1))
angle[0] = 0
angle[1:7] = np.linspace(0, math.pi * 2, 6, endpoint=False)
angle[7:] = np.linspace(0, math.pi * 2, 12, endpoint=False)
rad = np.empty((n_neighbors + 1))
rad[0] = 0
rad[1:7] = .005
rad[7:] = .0075
xy_offsets = np.array([np.cos(angle), np.sin(angle)]).T * rad[:, None]
normals = np.vstack([(0,0,1), cone_vectors((0.1, 0.2, 0.3), n_tilts //3).reshape((-1, 3))])
tilts_R = make_rotation_matrix(normals, np.full_like(normals, (1,0,0), dtype=float))
grasp_Ts = np.zeros((n_standoffs + 1, n_rotations + 1, n_neighbors + 1, n_tilts + 1, 4, 4))
grasp_Ts[..., :, :] = np.identity(4)
grasp_Ts[..., (0,1),3] = xy_offsets[None, None,:,None]
points_view = grasp_Ts[..., :, 3]
points_view[1:,..., 2] = (.0075 * np.mgrid[1:n_standoffs + 1])[:, None, None, None]
grasp_Ts[..., :, 3] = points_view
grasp_Ts[..., :3, :3] = tilts_R[None, None,None, :]
grasp_Ts[:] = grasp_Ts[:, 0, :, :][:, None, :, :] @ z_rotations[None,:,None, None]
return np.reshape(grasp_Ts, (-1, 4, 4))
SAMPLER_PATTERN = build_approach_grasp_sample_pattern()
class GraspNormalProposalTable():
def __init__(self, object: RigidPrim, approach_T: np.ndarray, point: np.ndarray, normal: np.ndarray) -> None:
self.object = object
self.point = point
self.normal = normal
ee_ax = approach_T[:3, 0]
ee_ay = approach_T[:3, 1]
proposed_face_R = np.array([ee_ax, ee_ay, -normal]).T
R = orthogonalize(proposed_face_R, prioritize=(2,0,1))
T = pack_Rp(R, point)
carb.profiler.begin(1, "buildnormaltable", active=True)
self._poses_world = T @ SAMPLER_PATTERN
carb.profiler.end(1, True)
self._valid = np.full((len(self._poses_world)), InvalidReason.UNKNOWN.value, dtype=int)
self._configs = np.full((len(self._poses_world), 7), np.nan, dtype=float)
@property
def grasp_Ts(self):
return self._poses_world
@property
def valid(self):
return self._valid == InvalidReason.VALID.value
@property
def proposable(self):
return self.valid
def invalidate(self, mask: np.ndarray, reason: InvalidReason):
self._valid[mask] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[mask].fill(np.nan)
def invalidate_submask(self, mask: np.ndarray, submask: np.ndarray, reason: InvalidReason):
masked, = np.where(mask)
self._valid[masked[submask]] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[masked[submask]].fill(np.nan)
class PlacementProposal(Proposal):
def __init__(self, identifier, table, support_obj, place_obj) -> None:
super().__init__(identifier, table)
self.support_obj = support_obj
self.place_obj = place_obj
def update_eff_goal(self, eff_T_world, joint_config):
self._table._poses_world[self.identifier] = eff_T_world
self._table._configs[self.identifier] = joint_config
self._table._valid[self.identifier] = InvalidReason.VALID.value
def get_placement_T(self):
"""
The target pose to place the object into (world frame). Not guaranteed to be reachable.
"""
support_T = pq2T(*self.support_obj.get_world_pose())
return support_T.dot(self.T_obj)
def get_support_normal(self):
# T_obj position is vector from the support centroid to the place centroid in the support frame
# Rotate it into the global frame
return normalize(pq2T(*self.support_obj.get_world_pose())[:3,:3] @ self.T_obj[:3, 3])
def sigmoid(x: Union[float, np.array], x_midpoint: float, steepness: float):
"""Maps numbers to [0,1], linearly near midpoint, then logarithmically at tails
"""
return 1. / (1. + np.exp(-steepness * (x - x_midpoint)))
class PlanePlaneProposalTable:
def __init__(self, owning_objects: List[RigidPrim], obj_poses: np.ndarray, support_centroid: np.ndarray, support_normals: np.ndarray, facet_object_owner: np.ndarray, facet_boundaries: List[List[int]]):
self.owning_objects = owning_objects
self.support_centroids = support_centroid
self.support_normals = support_normals
self.facet_object_owner = facet_object_owner
self._object_poses = obj_poses.copy()
self._valid = np.full((len(support_centroid)), InvalidReason.UNKNOWN.value, dtype=int)
def update_object_poses(self, poses: np.ndarray):
self._object_poses[:] = poses
def get_centroids_world(self, mask=None):
if mask is None:
# No op mask
mask = ...
world_Ts = self._object_poses[self.facet_object_owner][mask] @ pack_Rp(np.identity(3), self.support_centroids[mask])
return world_Ts[...,:3, 3]
def get_normals_world(self, mask=None):
if mask is None:
# No op mask
mask = ...
result = self._object_poses[self.facet_object_owner][mask][...,:3,:3] @ self.support_normals[mask][..., None]
return result.squeeze()
class PlanePlaneProposal():
def __init__(self, table: PlanePlaneProposalTable, support_index: int, place_index: int) -> None:
self._table = table
self.support_index = support_index
self.place_index = place_index
self.trans_offset = None
self.rot_offset = None
self.T_world = None
# FIXME: Check for 0 dot product
self.support_a1 = (1,0,0)
self.support_a2 = np.cross(self.support_normal_world, (1,0,0))
D = np.array([self.support_a1, self.support_a2]).T
self.span_D = D @ (np.linalg.pinv(D.T @ D) @ D.T)
self._valid = InvalidReason.VALID
@property
def support_obj(self) -> RigidPrim:
return self._table.owning_objects[self._table.facet_object_owner[self.support_index]]
@property
def support_obj_T(self) -> RigidPrim:
return self._table._object_poses[self._table.facet_object_owner[self.support_index]]
@property
def support_normal(self) -> np.ndarray:
return self._table.support_normals[self.support_index]
@property
def support_normal_world(self) -> np.ndarray:
return self._table.get_normals_world(mask=self.support_index)
@property
def support_centroid(self) -> np.ndarray:
return self._table.support_centroids[self.support_index]
@property
def support_centroid_world(self) -> np.ndarray:
return self._table.get_centroids_world(mask=self.support_index)
@property
def place_obj(self) -> RigidPrim:
return self._table.owning_objects[self._table.facet_object_owner[self.place_index]]
@property
def place_obj_T(self) -> np.ndarray:
return self._table._object_poses[self._table.facet_object_owner[self.place_index]]
@property
def place_normal(self) -> np.ndarray:
return self._table.support_normals[self.place_index]
@property
def support_T(self) -> np.ndarray:
# We'll take the normal as the z axis of it's local coordinate space,
# create a shortest rotation to get the object z to match that z,
# then use that rotation to define x and y
assert False
@property
def place_normal_world(self) -> np.ndarray:
return self._table.get_normals_world(mask=self.place_index)
@property
def place_centroid(self) -> np.ndarray:
return self._table.support_centroids[self.place_index]
@property
def place_centroid_world(self) -> np.ndarray:
return self._table.get_centroids_world(mask=self.place_index)
@property
def support_p(self) -> np.ndarray:
return self._table.support_centroids[self.support_index]
def map_velocity_input(self, position: np.ndarray, vel: np.ndarray):
if np.linalg.norm(vel) < 0.0001:
# Fixture is undefined for 0 vel
return vel
cur_p, cur_q = self.place_obj.get_world_pose()
cur_p += rotate_vec_by_quat(self.place_p, quaternion.from_float_array(cur_q))
# TODO: Make sure we should be using cur_p and not the position arg
plane_dist = self.support_normal.dot(cur_p)
# Lower dist -> more attenuation of motion not allowed by fixture
attenuation = sigmoid(plane_dist, .35, 5.0)
return vel @ (self.span_D + attenuation * (np.identity(3) - self.span_D))
def project_control_constraint_plane(self, vector: np.ndarray):
#
assert False
def project_to_constraint(self, point_world, point_obj):
# We'll work in the frame of the support object
support_obj_T = self.support_obj_T
support_normal = self.support_normal
support_centroid = self.support_centroid
place_centroid_in_support = transform_point(point_world, invert_T(support_obj_T))
#viz_axis_named("support", cur_p, cur_q, scale=(.2,.2,.2))
from_v = place_centroid_in_support - support_centroid
amount_orthogonal = np.dot(support_normal, from_v)
proj_on_plane = place_centroid_in_support - amount_orthogonal * support_normal
#
return transform_point(proj_on_plane + np.linalg.norm(point_obj) * support_normal, support_obj_T)
def project_current_to_solution(self):
# Where is the placement point on the plane right now?
current_in_plane = self.project_to_constraint(self.place_centroid_world, self.place_centroid)
self.trans_offset = current_in_plane
def update_proposal(self, trans: np.ndarray):
#trans_in_plane = project_control_constraint_plane(trans)
trans_in_plane = (trans[0], trans[1], 0)
self.trans_offset += trans_in_plane
def get_placement_T(self):
# Are the normals already parallel?
normal_dot = self.support_normal.dot(self.place_normal)
if normal_dot > .99999:
# Same direction -> 180 about arbitrary axis
alignment_rotation = quaternion.quaternion(0,0,1,0)
elif normal_dot < -.999999:
# Already exactly opposing -> identity quat
alignment_rotation = quaternion.quaternion(1,0,0,0)
else:
# Shortest arc between the vectors
a = np.cross(self.support_normal, self.place_normal)
# w is simple because we have unit normals: sqrt(norm(v1)**2 * norm(v2)**2) -> 1
alignment_rotation = quaternion.quaternion(1, *a).normalized()
placement_T_obj = rotate_vec_by_quat(self.place_centroid, alignment_rotation)
return pq2T(self.support_centroid + -placement_T_obj, alignment_rotation)
"""T = pack_Rp(axes_to_mat(self.place_normal, (0,0,1)), -placement_T_obj + self.current_offset)
return T"""
@property
def valid(self):
return self._valid
def mark_invalid(self, reason: InvalidReason):
self._valid = reason
def build_proposal_tables(collision_checker, objects, fixed_geometry, gripper_collision_mesh):
obj_Ts = get_obj_poses(objects)
fixed_Ts = get_obj_poses(fixed_geometry)
candidates_by_obj = [generate_candidate_grasps(obj) for obj in objects]
per_obj = []
owners = []
for candidates, (i, obj) in zip(candidates_by_obj, enumerate(objects)):
if len(candidates) == 0:
continue
counts = collision_checker.query(candidates, from_mesh=gripper_collision_mesh, to_mesh=obj.geom, render=False, query_name=f"{obj.name}_grasp_filter")
#viz_axis_named_Ts(obj.name, pq2T(*obj.get_world_pose()) @ candidates, (.2,.2,.2))
non_colliding = candidates[counts == 0]
# NOTE: No guarantee there will be any poses left...
per_obj.append(non_colliding)
owners.append(np.full((len(non_colliding)), i))
if len(per_obj) == 0:
per_obj = [np.empty((0, 4, 4))]
owners = [np.empty((0,), dtype=int)]
grasp_suggestions = GroupedPoseProposalTable(np.vstack(per_obj), None, obj_Ts, np.hstack(owners))
placement_suggestions = [None for _ in objects]
# Break placement poses into tables based on the object in the gripper
for place_i, to_place in enumerate(objects):
placement_suggestions[place_i] = [None for _ in objects]
per_obj = []
owners = []
for align_j, align_with in enumerate(objects):
if place_i == align_j:
continue
placements = generate_candidate_placements(to_place, align_with)
per_obj.append(placements)
owners.append(np.full((len(placements),), align_j))
if len(per_obj) == 0:
per_obj = [np.empty((0, 4, 4))]
owners = [np.empty((0,), dtype=int)]
placement_suggestions[place_i] = GroupedPoseProposalTable(np.vstack(per_obj), None, obj_Ts, np.hstack(owners))
"""if place_i == 1:
align_T = pq2T(*self.objects[align_j].get_world_pose())
for l, placement_T in enumerate(self.placement_suggestions[place_i][align_j]):
viz_axis_named_T(f"placement_{place_i}_{align_j}_{l}", align_T.dot(placement_T), scale=(0.4,0.4,0.4))"""
# Precompute all object support facets and their properties
centroids, normals, area, boundary = [], [], [], []
for obj in itertools.chain(objects, fixed_geometry):
if not hasattr(obj, 'geom'):
continue
support = collision_checker.get_support_surfaces(obj.geom)
centroids.append(support[0])
normals.append(support[1])
area.append(support[3])
boundary.append(support[4])
support_centroids = np.vstack(centroids)
support_normals = np.vstack(normals)
facet_owners = [[i] * len(centroids) for i, centroids in enumerate(centroids)]
facet_owners = np.fromiter(itertools.chain(*facet_owners), int)
plane_proposals = PlanePlaneProposalTable(objects, np.vstack((obj_Ts, fixed_Ts)), support_centroids, support_normals, facet_owners, boundary)
return grasp_suggestions, placement_suggestions, plane_proposals
def make_approach_params_for_proposal(proposal):
if isinstance(proposal, GraspProposal):
# Pull out the Z axis of the target
approach_axis = proposal.T_world[:3, 2]
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
elif isinstance(proposal, PlacementProposal):
approach_axis = -proposal.get_support_normal()
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
elif isinstance(proposal, PlanePlaneProposal):
approach_axis = -proposal.support_normal_world
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
else:
return None | 21,540 | Python | 37.673249 | 205 | 0.634912 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/spacemouse_demo.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import quaternion
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.ui import scene as sc
from omni.ui import color as cl
from srl.teleop.assistance.transforms import integrate_twist_stepwise, integrate_twist
class SpaceMouseManipulator(sc.Manipulator):
def __init__(self, grid=True, axis_colors=True, **kwargs):
super().__init__(**kwargs)
self.current_twist = np.zeros(6, dtype=float)
self.grid = grid
def on_build(self):
T = np.eye(4)
points = integrate_twist_stepwise(self.current_twist[:3], self.current_twist[3:], 1, 20)
point_delta = np.linalg.norm(points[0] - points[1]) * 20
#point_deltas = np.linalg.norm(np.diff(points, axis=0), axis=1)
target_T = integrate_twist(self.current_twist[:3], self.current_twist[3:], 1)
# axes
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
if self.grid:
t = 1
# Draw a ground grid
for v in np.linspace(-2, 2, 20):
sc.Line([v, -2, -1], [v, 2, -1], color=cl("#444444ff"), thickness=t)
sc.Line([-2, v, -1], [2, v, -1], color=cl("#444444ff"), thickness=t)
k = .25
t = 4
# Draw faint origin axis
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff000066"), thickness=t)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff0066"), thickness=t)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff66"), thickness=t)
opacity = max(point_delta, .2)
sc.Curve(
points.tolist(),
thicknesses=[4.0],
colors=[cl(opacity, opacity, opacity)],
curve_type=sc.Curve.CurveType.LINEAR,
)
with sc.Transform(transform=sc.Matrix44(*target_T.T.flatten())):
k = .5
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff0000"), thickness=t)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff00"), thickness=t)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff"), thickness=t)
def update(self, control):
trans, rot = control.xyz, control.rpy
rot[[0,1]] = rot[[1,0]]
rot[0] *= -1
rot[2] *= -1
dori_world = quaternion.from_float_array(euler_angles_to_quat(rot))
self.current_twist[:3] = trans
self.current_twist[3:] = quaternion.as_rotation_vector(dori_world)
self.invalidate()
| 2,662 | Python | 37.594202 | 96 | 0.549587 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/viewport.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.kit.viewport.utility import create_viewport_window, get_num_viewports, get_viewport_from_window_name, disable_context_menu, disable_selection
from omni.kit.viewport.window import get_viewport_window_instances, ViewportWindow
import omni
from pxr import Sdf
from typing import Optional
def get_window_by_name(window_name: str) -> Optional[ViewportWindow]:
try:
from omni.kit.viewport.window import get_viewport_window_instances
# Get every ViewportWindow, regardless of UsdContext it is attached to
for window in get_viewport_window_instances(None):
if window.title == window_name:
return window
except ImportError:
pass
def get_realsense_viewport(camera_path: Sdf.Path,):
num_viewports = get_num_viewports()
if num_viewports == 1:
viewport_window = create_viewport_window(camera_path=camera_path,)
else:
viewport_window = get_window_by_name("Viewport 1")
viewport_window.viewport_api.set_active_camera(camera_path)
return viewport_window
def configure_main_viewport(viewport_window):
viewport_window.viewport_widget.fill_frame = False
viewport_window.viewport_api.set_texture_resolution((1280,720))
def configure_realsense_viewport(viewport_window):
viewport_window.viewport_widget.fill_frame = False
viewport_window.viewport_api.set_texture_resolution((1280,720))
def disable_viewport_interaction(viewport_window):
# These are RAII-style handles which will keep the viewport configured this way until the window handle
# is destroyed.
return disable_selection(viewport_window, disable_click=True), disable_context_menu(viewport_window)
def layout_picture_in_picture(main_viewport, nested_viewport):
width = main_viewport.width / 3
height = 26 + (width * 9/16)
pos_x = main_viewport.width + main_viewport.position_x - width
pos_y = main_viewport.position_y
nested_viewport.setPosition(pos_x, pos_y)
nested_viewport.width = width
nested_viewport.height = height | 2,189 | Python | 38.107142 | 151 | 0.734582 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ui.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import omni.ui as ui
from enum import Enum
from omni.isaac.ui.ui_utils import add_separator, add_line_rect_flourish, get_style
from omni.kit.window.filepicker import FilePickerDialog
CONTROL_FRAMES = ["End-effector", "Mixed", "World"]
class ControlFrame(Enum):
END_EFFECTOR = 0
MIXED = 1
WORLD = 2
ASSISTANCE_MODES = ["Completion", "Virtual Fixture", "Forced Fixture", "Interactive Fixture"]
class AssistanceMode(Enum):
COMPLETION = 0
VIRTUAL_FIXTURE = 1
FORCED_FIXTURE = 2
INTERACTIVE_FIXTURE = 3
def add_overlay(viewport_window: ui.Window, ext_id: str):
with viewport_window.get_frame(ext_id + "_button_indicator_center"):
with ui.Placer(offset_x=ui.Percent(45), offset_y=ui.Percent(90)):
with ui.ZStack(width=ui.Percent(10), height=48):
center_bg = ui.Rectangle(name="bg", style={"background_color": 0x33000000, "border_radius": 8})
center_label = ui.Label("",name="center_label", alignment=ui.Alignment.CENTER, width=ui.Percent(100), height=ui.Percent(100), style={"color":0x66FFFFFF, "font_size":24})
with viewport_window.get_frame(ext_id + "_button_indicator_left"):
with ui.Placer(offset_x=ui.Percent(10), offset_y=ui.Percent(90)):
with ui.ZStack(width=ui.Percent(5), height=48):
left_bg = ui.Rectangle(name="bg2", style={"background_color": 0x33000000, "border_radius": 8})
left_label = ui.Label("", name="left_label", alignment=ui.Alignment.CENTER, width=ui.Percent(100), height=ui.Percent(100), style={"color":0x99FFFFFF, "font_size":16})
return (center_label, center_bg), (left_label, left_bg)
LABEL_WIDTH = 160
LABEL_WIDTH_LIGHT = 235
LABEL_HEIGHT = 18
HORIZONTAL_SPACING = 4
def str_builder(
label="",
type="stringfield",
default_val=" ",
tooltip="",
on_clicked_fn=None,
use_folder_picker=False,
read_only=False,
item_filter_fn=None,
bookmark_label=None,
bookmark_path=None,
folder_dialog_title="Select Output Folder",
folder_button_title="Select Folder",
):
"""Creates a Stylized Stringfield Widget
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
type (str, optional): Type of UI element. Defaults to "stringfield".
default_val (str, optional): Text to initialize in Stringfield. Defaults to " ".
tooltip (str, optional): Tooltip to display over the UI elements. Defaults to "".
use_folder_picker (bool, optional): Add a folder picker button to the right. Defaults to False.
read_only (bool, optional): Prevents editing. Defaults to False.
item_filter_fn (Callable, optional): filter function to pass to the FilePicker
bookmark_label (str, optional): bookmark label to pass to the FilePicker
bookmark_path (str, optional): bookmark path to pass to the FilePicker
Returns:
AbstractValueModel: model of Stringfield
"""
with ui.HStack():
ui.Label(label, width=LABEL_WIDTH, alignment=ui.Alignment.LEFT_CENTER, tooltip=tooltip)
str_field = ui.StringField(
name="StringField", width=ui.Fraction(1), height=0, alignment=ui.Alignment.LEFT_CENTER, read_only=read_only
).model
str_field.set_value(default_val)
if use_folder_picker:
def update_field(filename, path):
if filename == "":
val = path
elif filename[0] != "/" and path[-1] != "/":
val = path + "/" + filename
elif filename[0] == "/" and path[-1] == "/":
val = path + filename[1:]
else:
val = path + filename
str_field.set_value(val)
if on_clicked_fn:
on_clicked_fn(val)
def set_initial_path(picker):
input_path = str_field.get_value_as_string()
picker.set_current_directory(input_path)
# Doesn't work...
#picker.navigate_to(input_path)
add_folder_picker_icon(
on_click_fn=update_field,
on_open_fn=set_initial_path,
item_filter_fn=item_filter_fn,
bookmark_label=bookmark_label,
bookmark_path=bookmark_path,
dialog_title=folder_dialog_title,
button_title=folder_button_title,
)
else:
add_line_rect_flourish(False)
return str_field
def add_folder_picker_icon(
on_click_fn,
on_open_fn=None,
item_filter_fn=None,
bookmark_label=None,
bookmark_path=None,
dialog_title="Select Trajectory File",
button_title="Select File",
):
def open_file_picker():
def on_selected(filename, path):
on_click_fn(filename, path)
file_picker.hide()
def on_canceled(a, b):
file_picker.hide()
file_picker = FilePickerDialog(
dialog_title,
allow_multi_selection=False,
apply_button_label=button_title,
click_apply_handler=lambda a, b: on_selected(a, b),
click_cancel_handler=lambda a, b: on_canceled(a, b),
item_filter_fn=item_filter_fn,
enable_versioning_pane=False,
)
if bookmark_label and bookmark_path:
file_picker.toggle_bookmark_from_path(bookmark_label, bookmark_path, True)
if on_open_fn:
on_open_fn(file_picker)
with ui.Frame(width=0, tooltip=button_title):
ui.Button(
name="IconButton",
width=24,
height=24,
clicked_fn=open_file_picker,
style=get_style()["IconButton.Image::FolderPicker"],
alignment=ui.Alignment.RIGHT_TOP,
)
def multi_btn_builder(
label="", type="multi_button", text=None, tooltip=None, on_clicked_fn=None
):
"""Creates a Row of Stylized Buttons
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
type (str, optional): Type of UI element. Defaults to "multi_button".
count (int, optional): Number of UI elements to create. Defaults to 2.
text (list, optional): List of text rendered on the UI elements. Defaults to ["button", "button"].
tooltip (list, optional): List of tooltips to display over the UI elements. Defaults to ["", "", ""].
on_clicked_fn (list, optional): List of call-backs function when clicked. Defaults to [None, None].
Returns:
list(ui.Button): List of Buttons
"""
btns = []
count = len(text)
with ui.VStack():
ui.Label(label, width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip=tooltip[0])
ui.Spacer(height=5)
for i in range(count):
btn = ui.Button(
text[i].upper(),
name="Button",
width=ui.Fraction(1),
clicked_fn=on_clicked_fn[i],
tooltip=tooltip[i + 1],
style=get_style(),
alignment=ui.Alignment.LEFT_CENTER,
)
if i in [3, 6, 9]:
ui.Spacer(height=10)
btns.append(btn)
if i < count:
ui.Spacer(height=5)
#add_line_rect_flourish()
return btns
from string import Template
class DeltaTemplate(Template):
delimiter = "%"
def strfdelta(tdelta, fmt):
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
| 7,908 | Python | 35.615741 | 185 | 0.596232 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/logging.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import os
from omni.kit.widget.filebrowser.filesystem_model import FileSystemItem
import h5py
SPACEMOUSE_STATE = np.dtype([('trans', '3f'),
('rot', '3f'),
('buttons', 'i')])
POSE_DTYPE = np.dtype([('position', '3f'), ('orientation', '4f')
])
OBJECT_META_DTYPE = np.dtype([("name","S32")])
ROBOT_STATE_DTYPE = np.dtype([('eef_pose', POSE_DTYPE),
('eef_vel_lin', '3f'),
('eef_vel_ang', '3f'),
('joint_positions', '9f'),
('joint_velocities', '9f'),
('applied_joint_positions', '9f'),
('applied_joint_velocities', '9f'),
('target_pose', POSE_DTYPE)
])
UI_STATE_DTYPE = np.dtype([
('camera_pose', POSE_DTYPE),
('primary_camera', int),
('robot_ghost_joint_positions', '9f'),
('object_ghost_index', int),
('object_ghost_pose', POSE_DTYPE),
('ghost_is_snapped', bool)
])
CONTROLS_STATE_DTYPE = np.dtype([
('filtered', SPACEMOUSE_STATE),
('raw', SPACEMOUSE_STATE)
])
def get_scene_state_type(n_objects: int):
return np.dtype([('poses', POSE_DTYPE, (n_objects,))])
def get_stamped_frame_type(n_objects: int):
return np.dtype([('robot_state', ROBOT_STATE_DTYPE), ('scene_state', get_scene_state_type(n_objects)), ('controls_state', CONTROLS_STATE_DTYPE), ('ui_state', UI_STATE_DTYPE), ('step_index', 'i'), ('time', 'f')])
def is_hdf5_file(item: FileSystemItem):
_, ext = os.path.splitext(item.path.lower())
return ext in [".hdf5", ".HDF5"]
def is_folder(item: FileSystemItem) -> bool:
return item.is_folder
async def save_log(file_path, frames, metadata, done=lambda: None):
num_objects = len(metadata["objects"])
with h5py.File(file_path, 'w') as f:
f.attrs.update(metadata)
frames_data = np.empty((len(frames),), dtype=get_stamped_frame_type(num_objects))
for i, frame in enumerate(frames):
data = frame.data
frames_data[i]["robot_state"] = data["robot_state"]
frames_data[i]["scene_state"]["poses"] = data["scene_state"]
frames_data[i]["controls_state"] = data["controls_state"]
frames_data[i]["ui_state"] = data["ui_state"]
frames_data[i]["step_index"] = frame.current_time_step
frames_data[i]["time"] = frame.current_time
f.create_dataset('frames', data=frames_data, compression="gzip")
done()
| 2,717 | Python | 35.729729 | 215 | 0.566434 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/serializable_task.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Dict, Optional
import numpy as np
import omni.usd
from omni.isaac.core.tasks.base_task import BaseTask
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import DynamicCylinder, DynamicCone, DynamicCuboid, VisualCuboid, FixedCuboid, GroundPlane
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
from srl.teleop.assistance.ghost_franka import GhostFranka
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
class SerializableTask(BaseTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
) -> None:
self._initial_scene_description = initial_scene_description
super().__init__(
name=name,
offset=offset,
)
def get_scene_description(self) -> str:
stage = omni.usd.get_context().get_stage()
source_layer = stage.GetRootLayer()
prim_path = f"/World/{self.name}"
export_layer = Sdf.Layer.CreateAnonymous(".usda")
paths_map = {}
Sdf.CreatePrimInLayer(export_layer, "/Root")
Sdf.CopySpec(source_layer, prim_path, export_layer, "/Root")
paths_map[prim_path] = "/Root"
from srl.teleop.assistance import DATA_DIR
for prim in export_layer.rootPrims:
update_reference_paths(prim, DATA_DIR, ".")
for source_path, target_path in paths_map.items():
update_property_paths(prim, source_path, target_path)
return export_layer.ExportToString()
def load_scene_description(self, scene_str: str):
stage = omni.usd.get_context().get_stage()
root_layer = stage.GetRootLayer()
import_layer = Sdf.Layer.CreateAnonymous(".usda")
import_layer.ImportFromString(scene_str)
path_stem = f"/World/{self.name}"
# NOTE: The target path _must_ already be an xform prim, or CopySpec below will create
# a typeless "over" primspec in this spot, which will cause everything in the tree to not render.
paths_map = {}
with Sdf.ChangeBlock():
Sdf.CreatePrimInLayer(root_layer, path_stem)
Sdf.CopySpec(import_layer, "/Root", root_layer, path_stem)
paths_map["/Root"] = path_stem
from srl.teleop.assistance import DATA_DIR
for created_path in paths_map.values():
prim = root_layer.GetPrimAtPath(created_path)
update_reference_paths(prim, ".", DATA_DIR)
for source_path, target_path in paths_map.items():
update_property_paths(prim, source_path, target_path)
stage.GetPrimAtPath(path_stem).SetTypeName("Scope")
def update_property_paths(prim_spec, old_path, new_path):
if not prim_spec:
return
for rel in prim_spec.relationships:
rel.targetPathList.explicitItems = [
path.ReplacePrefix(old_path, new_path) for path in rel.targetPathList.explicitItems
]
for attr in prim_spec.attributes:
attr.connectionPathList.explicitItems = [
path.ReplacePrefix(old_path, new_path) for path in attr.connectionPathList.explicitItems
]
for child in prim_spec.nameChildren:
update_property_paths(child, old_path, new_path)
def update_reference_paths(prim_spec, old_prefix, new_prefix):
if prim_spec.HasInfo(Sdf.PrimSpec.ReferencesKey):
op = prim_spec.GetInfo(Sdf.PrimSpec.ReferencesKey)
items = []
items = op.ApplyOperations(items)
prim_spec.ClearReferenceList()
new_items = []
for item in items:
if item.assetPath.startswith(old_prefix):
new_items.append(Sdf.Reference(
assetPath=item.assetPath.replace(old_prefix, new_prefix, 1),
primPath=item.primPath,
layerOffset=item.layerOffset,
customData=item.customData,
))
else:
new_items.append(item)
prim_spec.referenceList.Append(new_items[-1])
for child in prim_spec.nameChildren:
update_reference_paths(child, old_prefix, new_prefix) | 5,488 | Python | 37.654929 | 119 | 0.666545 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/stacking.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Dict, Optional
import carb
import numpy as np
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim, RigidContactView
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim, find_matching_prim_paths
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
from srl.teleop.assistance.transforms import get_obj_poses
class ContactDebounce:
def __init__(self) -> None:
self.last_change_timestamp = None
self.last_value = None
def update(self, contact_matrix, threshold=0.0001):
now = time.time()
non_zero_contact_forces = np.abs(contact_matrix) > threshold
if self.last_value is None:
self.last_value = non_zero_contact_forces
self.last_change_timestamp = np.zeros_like(self.last_value, dtype=float)
self.last_change_timestamp[:] = now
return self.last_value
changed_mask = non_zero_contact_forces ^ self.last_value
expired_mask = (now - self.last_change_timestamp) > 0.3
self.last_value[changed_mask & expired_mask] = non_zero_contact_forces[changed_mask & expired_mask]
self.last_change_timestamp[changed_mask & expired_mask] = now
return self.last_value
class StackingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "stacking",
n_cuboids=6,
varieties=1,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
max_duration=60 * 2,
repeat=False,
rng = None
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_cuboids = n_cuboids
self.varieties = varieties
self._done = False
self.robot = None
self._initial_scene_description = initial_scene_description
self.repeat = repeat
self.contact_debounce = ContactDebounce()
if rng is None:
rng = np.random.RandomState(0)
self._initial_random_state = rng.get_state()[1]
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, max_duration)
return
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_cuboids" : self.n_cuboids,
"varieties": self.varieties,
"seed": self._initial_random_state
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
else:
pass
UNIT = 0.032
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.ghosts_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._ghost_objects[name] = GhostObject(prim_path, name=name)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = RigidPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
for i, color in enumerate(COLORS):
material_raw_prim = add_reference_to_stage(f"{DATA_DIR}/cardboard.usda", f"{self.task_root}/materials/cardboard_color{i}", "Material")
raw_material = UsdShade.Material(material_raw_prim)
shader = UsdShade.Shader(get_prim_at_path(str(raw_material.GetPath()) + "/Shader"))
shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f).Set((color[0] * 2, color[1] * 2, color[2] * 2))
self._materials.append(VisualMaterial(material_raw_prim.GetName(), str(raw_material.GetPath()), raw_material, [shader], raw_material))
#self._materials.append((PreviewSurface(prim_path=f"{objects_path}/materials/color{i}", color=np.array(color))))
self._physics_material = PhysicsMaterial(
prim_path=f"{self.objects_path}/materials/physics",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sizes = [(UNIT, UNIT, UNIT), (UNIT, UNIT, UNIT * 2), (UNIT, UNIT * 2, UNIT * 2), (UNIT, UNIT, UNIT * 4), (UNIT * 2, UNIT * 2, UNIT * 4)]
for i in range(self.n_cuboids):
choice = i % self.varieties
obj_name = f"cuboid{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.3, .1), (0.5, .3, .1))
new_object = scene.add(
DynamicCuboid(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
size=1.0,
scale=sizes[choice],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
new_object._rigid_prim_view.set_sleep_thresholds(np.zeros(2))
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(new_object.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
self._ghost_objects[ghost_name] = ghost
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
self._table_contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", [self._scene_objects["table_top"].prim_path], name="table_contact_view", apply_rigid_body_api=False)
# note
self._table_contact_view.name = self._table_contact_view._name
self._table_contact_view.is_valid = lambda: True
self._table_contact_view.post_reset = lambda: None
self._scene.add(self._table_contact_view)
self._objects_contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", find_matching_prim_paths(f"{self.task_objects_path}/cuboid*"), name="objects_contact_view", apply_rigid_body_api=False)
self._objects_contact_view.name = self._objects_contact_view._name
self._objects_contact_view.is_valid = lambda: True
self._objects_contact_view.post_reset = lambda: None
self._scene.add(self._objects_contact_view)
return
def cleanup(self) -> None:
return super().cleanup()
def rerandomize(self) -> None:
for name, object in self._task_objects.items():
object.set_world_pose(self.rng.uniform((.4, -.3, .1), (0.5, .3, .1)), R.random(random_state=self.rng).as_quat())
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
TimeLimitedTask.pre_step(self, time_step_index, simulation_time)
test = self._objects_contact_view.get_contact_force_matrix()
contacts = self.contact_debounce.update(test)
Ts = get_obj_poses(self._task_objects.values())
lifted = abs(Ts[0,2,3] - Ts[1,2,3]) > .025
grasping = self.robot.gripper_contents != None
in_contact = np.any(contacts[:,:,2])
# Any mutual z forces?
if in_contact and lifted and not grasping:
if self.repeat:
self.rerandomize()
else:
pass
out_of_bounds = np.bitwise_or(Ts[:,:3, 3] > (1.1, .9, 1.5), Ts[:,:3, 3] < (-1.0, -.9, -.75))
if np.any(out_of_bounds):
self.rerandomize()
return
| 9,814 | Python | 44.022936 | 210 | 0.606582 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/lifting.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Dict, Optional
import carb
import numpy as np
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim, RigidContactView
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
from srl.teleop.assistance.transforms import get_obj_poses
class LiftingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "lifting",
n_cuboids=6,
varieties=1,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
max_duration=60 * 2,
repeat=False,
rng = None
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_cuboids = n_cuboids
self.varieties = varieties
self._done = False
self.robot = None
self._initial_scene_description = initial_scene_description
self.last_settled = 0
self.repeat = repeat
if rng is None:
rng = np.random.RandomState(0)
self._initial_random_state = rng.get_state()[1]
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, max_duration)
return
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_cuboids" : self.n_cuboids,
"varieties": self.varieties,
"seed": self._initial_random_state
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
else:
pass
UNIT = 0.032
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.ghosts_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._ghost_objects[name] = GhostObject(prim_path, name=name)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = RigidPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
for i, color in enumerate(COLORS):
material_raw_prim = add_reference_to_stage(f"{DATA_DIR}/cardboard.usda", f"{self.task_root}/materials/cardboard_color{i}", "Material")
raw_material = UsdShade.Material(material_raw_prim)
shader = UsdShade.Shader(get_prim_at_path(str(raw_material.GetPath()) + "/Shader"))
shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f).Set((color[0] * 2, color[1] * 2, color[2] * 2))
self._materials.append(VisualMaterial(material_raw_prim.GetName(), str(raw_material.GetPath()), raw_material, [shader], raw_material))
#self._materials.append((PreviewSurface(prim_path=f"{objects_path}/materials/color{i}", color=np.array(color))))
self._physics_material = PhysicsMaterial(
prim_path=f"{self.objects_path}/materials/physics",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sizes = [(UNIT, UNIT, UNIT), (UNIT, UNIT, UNIT * 2), (UNIT, UNIT * 2, UNIT * 2), (UNIT, UNIT, UNIT * 4), (UNIT * 2, UNIT * 2, UNIT * 4)]
for i in range(self.n_cuboids):
choice = i % self.varieties
obj_name = f"cuboid{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.3, .1), (0.5, .3, .1))
new_object = scene.add(
DynamicCuboid(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
size=1.0,
scale=sizes[choice],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
new_object._rigid_prim_view.set_sleep_thresholds(np.array((0,0)))
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(new_object.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
self._ghost_objects[ghost_name] = ghost
self._contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", [self._scene_objects["table_top"].prim_path], name="contact_monitor", apply_rigid_body_api=False)
# note
self._contact_view.name = self._contact_view._name
self._contact_view.is_valid = lambda: True
self._contact_view.post_reset = lambda: None
self._scene.add(self._contact_view)
"""self._other_view = RigidContactView(self._scene_objects["table_top"].prim_path, [f"{self.task_objects_path}/cuboid*", ], name="contact_monitor2", apply_rigid_body_api=False)
self._other_view.name = self._other_view._name
self._other_view.is_valid = lambda: True
self._other_view.post_reset = lambda: None
self._scene.add(self._other_view)"""
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
return
def cleanup(self) -> None:
return super().cleanup()
def rerandomize(self) -> None:
for name, object in self._task_objects.items():
object.set_world_pose(self.rng.uniform((.4, -.3, .1), (0.5, .3, .1)), R.random(random_state=self.rng).as_quat())
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
TimeLimitedTask.pre_step(self, time_step_index, simulation_time)
Ts = get_obj_poses(self._task_objects.values())
lifted = abs(Ts[0,2,3]) > .025
grasping = self.robot.gripper_contents != None
if time_step_index - self.last_settled > 100 and lifted and grasping:
if self.repeat:
self.rerandomize()
self.last_settled = time_step_index
else:
pass
return
| 8,528 | Python | 42.963917 | 188 | 0.597444 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/__init__.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
COLORS = [(69,119,170),(102,204,238),(34,136,51), (204,187,168), (238,102,119), (170,51,119)]
BRIGHT_COLORS = [(0,119,187),(51,187,238),(0,153,136), (238,119,51), (204,51,17), (238,51,119)]
COLORS = [(0,1,1),(1,0,1),(1,1,0), (0,0,1), (0,1,0), (1,0,0)]
COLORS = [(0,.37,1),(.983,.13,.98),(.873,.24,0), (1,.1,.1), (0.276,.56,.1), (1,.1,.1), (0.2,0.2,0.2)]
CUBOID_FACE_COLORS = [(0,0,1), (0,0,1), (0,1,0), (0,1,0), (1,0,0), (1,0,0)] | 575 | Python | 46.999996 | 101 | 0.558261 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/table_task.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Dict, Optional
import numpy as np
import omni.usd
from omni.isaac.core.tasks.base_task import BaseTask
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import FixedCuboid, GroundPlane
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
from srl.teleop.assistance.ghost_franka import GhostFranka
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
class TableTask(BaseTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "sorting",
offset: Optional[np.ndarray] = None,
) -> None:
self._task_objects = {}
self._scene_objects = {}
self._ghost_objects = {}
self._ghost_robots = {}
self._contact_view = None
self.robot = None
self._materials = []
self._physics_material = None
self._settings = carb.settings.get_settings()
# NOTE: Needed for shadows
self._settings.set("/rtx/directLighting/sampledLighting/enabled", True)
def add_groundplane(
self,
prim_path: str,
z_position: float = 0,
name="ground_plane",
static_friction: float = 0.5,
dynamic_friction: float = 0.5,
restitution: float = 0.8,
) -> None:
"""[summary]
Args:
z_position (float, optional): [description]. Defaults to 0.
name (str, optional): [description]. Defaults to "default_ground_plane".
prim_path (str, optional): [description]. Defaults to "/World/defaultGroundPlane".
static_friction (float, optional): [description]. Defaults to 0.5.
dynamic_friction (float, optional): [description]. Defaults to 0.5.
restitution (float, optional): [description]. Defaults to 0.8.
Returns:
[type]: [description]
"""
if self.scene.object_exists(name=name):
carb.log_info("ground floor already created with name {}.".format(name))
return self.scene.get_object(self, name=name)
from srl.teleop.assistance import DATA_DIR
add_reference_to_stage(usd_path=f"{DATA_DIR}/ground_plane.usda", prim_path=prim_path)
physics_material = PhysicsMaterial(
prim_path=f"{prim_path}/materials/physics",
static_friction=static_friction,
dynamic_friction=dynamic_friction,
restitution=restitution,
)
plane = GroundPlane(prim_path=prim_path, name=name, z_position=z_position, physics_material=physics_material)
self.scene.add(plane)
return plane
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
self.task_root = f"/World/{self.name}"
self.objects_path = f"{self.task_root}/objects"
self.materials_path = f"{self.task_root}/materials"
self.task_objects_path = f"{self.objects_path}/task"
self.ghosts_path = f"{self.objects_path}/ghosts"
self.robots_path = f"{self.objects_path}/robots"
stage = omni.usd.get_context().get_stage()
stage.DefinePrim(self.objects_path, "Scope")
stage.DefinePrim(self.task_objects_path, "Scope")
stage.DefinePrim(self.ghosts_path, "Scope")
stage.DefinePrim(self.materials_path, "Scope")
stage.DefinePrim(self.robots_path, "Scope")
from srl.teleop.assistance import DATA_DIR
self.add_groundplane(z_position=-0.83, prim_path=f"{self.task_root}/ground_plane")
add_reference_to_stage(usd_path=DATA_DIR + "/table.usd", prim_path=f"{self.objects_path}/table")
add_reference_to_stage(usd_path=DATA_DIR + "/lighting.usda", prim_path=f"{self.task_root}/lights")
table = XFormPrim(f"{self.objects_path}/table")
table_top = FixedCuboid(f"{self.objects_path}/table/top/collider", name="table_top_collider")
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(table_top.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
table_top.set_collision_enabled(True)
table.set_world_pose((0.4, 0.0, -0.427), (1,0,0,1))
self._scene_objects["table_top"] = table_top
def add_robot(self):
"""[summary]
Returns:
Franka: [description]
"""
env_path = f"/World/{self.name}/robots"
contact_paths=[obj.prim_path for obj in self._task_objects.values()]
self.robot = self.scene.add(CameraFranka(prim_path=env_path + "/franka", name="franka", contact_paths=None))
def add_ghost_robots(self):
env_path = f"/World/{self.name}/robots"
for ghost_index in range(1):
ghost_name = f"ghost_franka{ghost_index}"
ghost_path = f"{env_path}/{ghost_name}"
ghost_robot = self.scene.add(GhostFranka(prim_path=ghost_path, name=ghost_name, material_path=f"/World/{self.name}/materials/ghost"))
self._ghost_robots[ghost_name] = ghost_robot
def get_ghost_objects(self) -> Dict[str, RigidPrim]:
return self._ghost_objects
def get_scene_objects(self) -> Dict[str, RigidPrim]:
return self._scene_objects
def get_observations(self) -> np.ndarray:
"""[summary]
Returns:
dict: [description]
"""
observations = np.empty((len(self._task_objects),), dtype=POSE_DTYPE)
for i, obj in enumerate(self._task_objects.values()):
observations[i] = obj.get_world_pose()
return observations
def get_params(self) -> dict:
object_info = []
for obj in self._task_objects.values():
object_info.append((obj.name))
return {
"objects" : np.array(object_info, dtype=OBJECT_META_DTYPE),
"robot_name": self.robot.name,
"scene_description": self._initial_scene_description,
}
def set_object_poses(self, poses: np.ndarray):
with Sdf.ChangeBlock():
for i, obj in enumerate(self._task_objects.values()):
pose = poses[i]
obj.set_world_pose(*pose)
def post_reset(self) -> None:
for name, robot in self._ghost_robots.items():
robot.hide()
robot.gripper.open()
self.robot.set_joint_positions(np.array([-0.01561307, -1.2717055, -0.02706644, -2.859138, -0.01377442,
2.0233166, 0.7314064, 0.04, 0.04], dtype=np.float32))
self.robot.gripper.open()
return super().post_reset() | 7,454 | Python | 39.961538 | 145 | 0.632815 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/sorting.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from functools import partial
import math
from typing import Dict, Optional
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import DynamicCylinder, DynamicCone, DynamicCuboid, VisualCuboid, FixedCuboid, GroundPlane
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
from srl.teleop.assistance.ghost_franka import GhostFranka
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
class SortingTask(TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "sorting",
n_cuboids=18,
n_cylinders=6,
n_cones=6,
varieties=1,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
rng = None
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_cuboids = n_cuboids
self.n_cylinders = n_cylinders
self.n_cones = n_cones
self._zones = {}
self._initial_scene_description = initial_scene_description
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
def get_params(self) -> dict:
object_info = []
for obj in self._task_objects.values():
object_info.append(obj.name)
return {
"n_cuboids" : self.n_cuboids,
"n_cylinders" : self.n_cylinders,
"n_cones" : self.n_cones,
"objects" : np.array(object_info, dtype=OBJECT_META_DTYPE),
"robot_name": self.robot.name,
"scene_description": self._initial_scene_description
}
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
else:
pass
UNIT = 0.032
zones_path = f"{self.objects_path}/zones"
stage = omni.usd.get_context().get_stage()
stage.DefinePrim(zones_path, "Scope")
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.ghosts_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._ghost_objects[name] = GhostObject(prim_path, name=name)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = RigidPrim(prim_path, name=name)
for prim in get_prim_at_path(zones_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._zones[name] = GeometryPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
for i, color in enumerate(COLORS):
material_raw_prim = add_reference_to_stage(f"{DATA_DIR}/cardboard.usda", f"{self.task_root}/materials/cardboard_color{i}", "Material")
raw_material = UsdShade.Material(material_raw_prim)
shader = UsdShade.Shader(get_prim_at_path(str(raw_material.GetPath()) + "/Shader"))
shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f).Set((color[0] * 2, color[1] * 2, color[2] * 2))
self._materials.append(VisualMaterial(material_raw_prim.GetName(), str(raw_material.GetPath()), raw_material, [shader], raw_material))
#self._materials.append((PreviewSurface(prim_path=f"{objects_path}/materials/color{i}", color=np.array(color))))
self._physics_material = PhysicsMaterial(
prim_path=f"{self.objects_path}/materials/physics",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sizes = [(UNIT, UNIT, UNIT), (UNIT, UNIT, UNIT * 2), (UNIT, UNIT * 2, UNIT * 2), (UNIT, UNIT, UNIT * 4), (UNIT * 2, UNIT * 2, UNIT * 4)]
for i in range(self.n_cuboids):
choice = i % len(sizes)
obj_name = f"cuboid{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.3, .2), (0.5, .3, .4))
new_object = scene.add(
DynamicCuboid(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
size=1.0,
scale=sizes[choice],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
"""new_object.geom.GetDisplayColorAttr().Set(cube_vert_colors)
UsdShade.MaterialBindingAPI(new_object.geom).UnbindAllBindings()
UsdGeom.Primvar(new_object.geom.GetDisplayColorAttr()).SetInterpolation("uniform")"""
"""viz = viz_axis(prim_path + "/axis", None, None)
viz.set_local_scale((20, 20, 20))"""
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(new_object.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
self._ghost_objects[ghost_name] = ghost
sizes = [(UNIT / 2., UNIT * 2), (UNIT / 2., UNIT * 4), (UNIT, UNIT * 2), (UNIT, UNIT * 4), (UNIT * 2, UNIT * 4)]
for i in range(self.n_cylinders):
choice = i % len(sizes)
obj_name = f"cylinder{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.3, .2), (0.5, .3, .4))
new_object = scene.add(
DynamicCylinder(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
radius=sizes[choice][0],
height=sizes[choice][1],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
# PhysX has custom collision implementations for cones and cylinders
new_object.prim.CreateAttribute(PhysxSchema.Tokens.physxCollisionCustomGeometry, Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
ghost.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
ghost.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
self._ghost_objects[ghost_name] = ghost
for i in range(self.n_cones):
choice = i % len(sizes)
obj_name = f"cone{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
name = f"cone{i}"
rand_pos = self.rng.uniform((.4, -.3, .2), (0.5, .3, .4))
new_object = scene.add(
DynamicCone(
name=name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
radius=sizes[choice][0],
height=sizes[choice][1],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
new_object.prim.CreateAttribute(PhysxSchema.Tokens.physxCollisionCustomGeometry, Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
ghost.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
ghost.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
self._ghost_objects[ghost_name] = ghost
#new_object.prim.SetInstanceable(True)
for i in range(len(sizes)):
choice = i % len(sizes)
prim_path = f"{zones_path}/zone{i}"
name = f"zone{i}"
zone = scene.add(
FixedCuboid(
name=name,
position=(.3, .3 + (i * -.15), .00125 / 2),
orientation=(1.,0.,0.,0.),
prim_path=prim_path,
size=1.0,
scale=(.1, .1, .00125),
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._zones[name] = zone
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
#delete_prim("/World/sorting/objects")
#self.load_scene_description(self._initial_scene_description)
self._scene_objects.update(self._zones)
def cleanup(self) -> None:
return super().cleanup()
def is_done(self):
return False | 12,525 | Python | 47.929687 | 150 | 0.559441 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/subset_stacking.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from functools import partial
import math
import time
from typing import Dict, Optional
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import DynamicCuboid, DynamicCylinder
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim, RigidContactView
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim, find_matching_prim_paths
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
class SubsetStackingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "subset_stacking",
n_cuboids=36,
n_cylinders=0,
varieties=4,
n_stackable=3,
n_stacks=2,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
rng = None
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_cuboids = n_cuboids
self.n_cylinders = n_cylinders
self.n_stackable = n_stackable
self.n_stacks = n_stacks
self.varieties = varieties
self.robot = None
self._initial_scene_description = initial_scene_description
if rng is None:
rng = np.random.RandomState(0)
self._initial_random_state = rng.get_state()[1]
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, 60 * 7)
return
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_cuboids" : self.n_cuboids,
"n_cylinders": self.n_cylinders,
"n_stackable": self.n_stackable,
"n_stacks": self.n_stacks,
"varieties": self.varieties,
"seed": self._initial_random_state
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
else:
pass
UNIT = 0.032
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.ghosts_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._ghost_objects[name] = GhostObject(prim_path, name=name)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = RigidPrim(prim_path, name=name)
table_top = GeometryPrim(f"{self.objects_path}/table/top")
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
for i, color in enumerate(COLORS):
material_raw_prim = add_reference_to_stage(f"{DATA_DIR}/cardboard.usda", f"{self.task_root}/materials/cardboard_color{i}", "Material")
raw_material = UsdShade.Material(material_raw_prim)
shader = UsdShade.Shader(get_prim_at_path(str(raw_material.GetPath()) + "/Shader"))
shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f).Set((color[0] * 2, color[1] * 2, color[2] * 2))
self._materials.append(VisualMaterial(material_raw_prim.GetName(), str(raw_material.GetPath()), raw_material, [shader], raw_material))
#self._materials.append((PreviewSurface(prim_path=f"{objects_path}/materials/color{i}", color=np.array(color))))
self._physics_material = PhysicsMaterial(
prim_path=f"{self.objects_path}/materials/physics",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sizes = [(UNIT, UNIT, UNIT), (UNIT, UNIT, UNIT * 2), (UNIT, UNIT * 2, UNIT * 2), (UNIT, UNIT, UNIT * 4), (UNIT * 2, UNIT * 2, UNIT * 4)]
for i in range(self.n_cuboids):
choice = i % self.varieties
material = self._materials[-1]
if i < self.n_stackable * self.n_stacks:
material = self._materials[i // self.n_stackable]
obj_name = f"cuboid{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.35, .2), (0.5, .35, .4))
new_object = scene.add(
DynamicCuboid(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
size=1.0,
scale=sizes[choice],
visual_material=material,
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
new_object._rigid_prim_view.set_sleep_thresholds(np.zeros(2))
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(new_object.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
self._ghost_objects[ghost_name] = ghost
sizes = [(UNIT / 2., UNIT * 2), (UNIT / 2., UNIT * 4)]
for i in range(self.n_cylinders):
choice = i % len(sizes)
obj_name = f"cylinder{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.35, .2), (0.5, .35, .4))
new_object = scene.add(
DynamicCylinder(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
radius=sizes[choice][0],
height=sizes[choice][1],
visual_material=self._materials[-1],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
# PhysX has custom collision implementations for cones and cylinders
new_object.prim.CreateAttribute(PhysxSchema.Tokens.physxCollisionCustomGeometry, Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
new_object.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
ghost.prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool, True).Set(True)
ghost.prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int, True).Set(2)
self._ghost_objects[ghost_name] = ghost
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
"""self._objects_contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", find_matching_prim_paths(f"{self.task_objects_path}/cuboid*"), name="objects_contact_view", apply_rigid_body_api=False)
self._objects_contact_view.name = self._objects_contact_view._name
self._objects_contact_view.is_valid = lambda: True
self._objects_contact_view.post_reset = lambda: None
self._scene.add(self._objects_contact_view)"""
#delete_prim("/World/sorting/objects")
#self.load_scene_description(self._initial_scene_description)
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
TimeLimitedTask.pre_step(self, time_step_index, simulation_time)
| 10,043 | Python | 47.057416 | 217 | 0.590561 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/reaching.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Dict, Optional
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.core.prims import RigidPrim, XFormPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from srl.teleop.assistance.logging import OBJECT_META_DTYPE
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pack_Rp, pq2T, transform_dist
from omni.isaac.franka import KinematicsSolver
TARGET_POSES = [
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, -.2, .35]),
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, .2, .35]),
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, 0, .07]),
pack_Rp(make_rotation_matrix((0,0,-1), (.5,.5,0)), [.3, 0, .07]),
pack_Rp(make_rotation_matrix((0,.1,-1), (-.5,.5,0)), [.35, .10, .12]),
pack_Rp(make_rotation_matrix((1,0,-1), (-1,0,-1)), [.80, 0, .10])]
class ReachingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "reaching",
n_targets=6,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
rng = None,
max_duration=60 * 5
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_targets = n_targets
self._done = False
self._current_target = 0
self._current_target_T = None
self._scene_objects = {}
self._ghost_objects = {}
self._ghost_robots = {}
self.robot = None
self._initial_scene_description = initial_scene_description
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, max_duration=max_duration)
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_targets" : self.n_targets,
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = XFormPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
obj_name = f"target0"
prim_path = f"{self.task_objects_path}/{obj_name}"
target_p, target_q = T2pq(TARGET_POSES[0], as_float_array=True)
target_prim = VisualSphere(prim_path, name=obj_name, position=target_p, orientation=target_q, radius=0.005, color=np.array((1.,1.,1.)))
#target_prim = add_reference_to_stage(usd_path=DATA_DIR + "/axis.usda", prim_path=prim_path)
#target_prim = XFormPrim(str(target_prim.GetPath()), name=obj_name, position=target_p, orientation=target_q, scale=(0.3,0.3,0.3))
new_object = scene.add(
target_prim
)
self._task_objects[obj_name] = new_object
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
self.solver = KinematicsSolver(self.robot)
return
def cleanup(self) -> None:
return super().cleanup()
def post_reset(self) -> None:
self._current_target = 0
return super().post_reset()
def set_target(self, T):
pq = T2pq(T, as_float_array=True)
self._task_objects["target0"].set_world_pose(*pq)
actions, success = self.solver.compute_inverse_kinematics(
*pq
)
display_config = np.empty(9)
display_config[:7] = actions.joint_positions[:7]
# IK Doesn't solve for the fingers. Manually set open values
display_config[7] = 0.04
display_config[8] = 0.04
self._ghost_robots['ghost_franka0'].set_joint_positions(display_config)
self._ghost_robots['ghost_franka0'].show(gripper_only=True)
self._current_target_T = T
def pre_step(self, sim_step, sim_time):
TimeLimitedTask.pre_step(self, sim_step, sim_time)
if self._current_target_T is None:
self.set_target(TARGET_POSES[self._current_target])
eff_prim = XFormPrim(self.robot.prim_path + "/panda_hand/eff")
ee_p, ee_q = eff_prim.get_world_pose()
ee_T = pq2T(ee_p, ee_q)
#print(rot_diff)
if transform_dist(ee_T, self._current_target_T, .15) < .03:
# advance to next target
self._current_target = (self._current_target + 1) % len(TARGET_POSES)
self.set_target(TARGET_POSES[self._current_target])
| 5,982 | Python | 38.104575 | 147 | 0.608492 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/time_limited_task.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Optional
class TimeLimitedTask():
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
max_duration: Optional[int]
) -> None:
self.max_duration = max_duration
self._start_wallclock_stamp = None
self._done = False
def get_params(self) -> dict:
non_optional = self.max_duration if self.max_duration is not None else -1
return {
"max_duration": non_optional,
}
@property
def time_remaining(self):
if not self.max_duration:
return None
return self.max_duration - (time.time() - self._start_wallclock_stamp)
def is_done(self):
return self._done
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
now = time.time()
if self._start_wallclock_stamp is None:
self._start_wallclock_stamp = time.time()
if self.max_duration and now - self._start_wallclock_stamp > self.max_duration:
self._done = True
| 1,512 | Python | 29.87755 | 94 | 0.609127 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/motion.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.motion_commander import MotionCommand, PlannedMoveCommand
from srl.teleop.assistance.transforms import T2pq, pq2T, transform_dist
from omni.isaac.cortex.df import DfAction, DfSetLockState, DfStateMachineDecider, DfStateSequence
import numpy as np
import quaternion
class PullTowardConfig(DfAction):
def enter(self):
pass
def step(self):
ctx = self.context
joint_config = self.params
ctx.tools.commander.set_command(PlannedMoveCommand(joint_config))
def exit(self):
pass
class SetUserTarget(DfAction):
def step(self):
ctx = self.context
new_target = self.params
ctx.tools.commander.set_command(MotionCommand(*new_target))
current_target_pose = ctx.tools.commander.target_prim.get_world_pose()
error = transform_dist(pq2T(*current_target_pose), pq2T(*new_target), .15)
if error < .02:
return None
else:
return self
class Reset(DfStateMachineDecider):
def __init__(self):
# This behavior uses the locking feature of the decision framework to run a state machine
# sequence as an atomic unit.
super().__init__(
DfStateSequence(
[
DfSetLockState(set_locked_to=True, decider=self),
SetUserTarget(),
DfSetLockState(set_locked_to=False, decider=self),
]
)
)
self.is_locked = False
| 1,648 | Python | 30.113207 | 97 | 0.64017 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/select.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import numpy as np
from omni.isaac.cortex.df import DfAction, DfDecision, DfDecider, DfLogicalState
from srl.teleop.assistance.behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.control import ControlContext
from srl.teleop.assistance.proposals import GraspNormalProposalTable, GroupedPoseProposalTable, InvalidReason, PlanePlaneProposal, GraspProposal, \
PlacementProposal
from srl.teleop.assistance.suggestions import check_grasp_orientation_similarity
from srl.teleop.assistance.transforms import R_to_angle, orthogonalize, pack_Rp, transform_dist, unpack_T
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pq2T, invert_T, normalized
import time
import carb
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.viz import viz_axis_named_T, viz_axis_named_Ts
class SelectionContext(DfLogicalState):
def __init__(self, tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, use_surrogates: bool, use_snapping: bool):
super().__init__()
self.tools = tools
self.scene_context = scene_context
self.control_context = control_context
self.grasp_distribution = None
self.placement_distribution = None
self.plane_distribution = None
self.current_grasp_proposal = None
self.current_placement_proposal = None
self.cursor_ray = None
self.use_surrogates = use_surrogates
self.use_snapping = use_snapping
self.scene_mesh_dirty = False
self.time_at_last_placement_update = None
self.fixed_proposal = None
self.monitors = [
SelectionContext.monitor_grasp_proposals,
SelectionContext.monitor_placement_proposals
]
def has_grasp_proposal(self):
return self.current_grasp_proposal is not None
@property
def suggestion_is_snap(self):
if self.current_grasp_proposal and isinstance(self.current_grasp_proposal._table, GroupedPoseProposalTable):
return True
return False
def has_placement_proposal(self):
return self.current_placement_proposal is not None
def get_current_proposal(self):
if self.fixed_proposal is not None:
return self.fixed_proposal
if self.has_grasp_proposal():
return self.current_grasp_proposal
elif self.has_placement_proposal():
return self.current_placement_proposal
def reset_placement_proposal(self):
self.current_placement_proposal = None
def reset_grasp_proposal(self):
self.current_grasp_proposal = None
def monitor_grasp_proposals(self):
scene_ctx = self.scene_context
self.scene_mesh_dirty |= scene_ctx.scene_mesh_changed
if self.tools.grasp_table:
self.tools.grasp_table.objects_dirty |= scene_ctx.moving_objects
else:
return
if scene_ctx.object_in_gripper is not None:
return
# Wait until we have an initial collision env
if scene_ctx.scene_mesh is None:
return
table = self.tools.grasp_table
dirty_mask = np.full_like(table._owners, False, dtype=bool)
moving_mask = np.full_like(table._owners, False, dtype=bool)
in_gripper_mask = np.full_like(table._owners, False, dtype=bool)
for i, (dirty, moving) in enumerate(zip(table.objects_dirty, scene_ctx.moving_objects)):
mask = table.mask_by_owner(i)
if dirty:
dirty_mask |= mask
if moving:
moving_mask |= mask
if i == scene_ctx.object_in_gripper_i:
in_gripper_mask = mask
if dirty and not moving:
# We're only going to update this object if it isn't moving!
table.objects_dirty[i] = False
# This object moved! That means any cached IK solves are no longer valid. Clear them out
table._configs[dirty_mask] = np.nan
table.invalidate(moving_mask, InvalidReason.MOVING)
check_mask = dirty_mask & ~moving_mask & ~in_gripper_mask
if check_mask.sum() == 0 and self.scene_mesh_dirty:
check_mask = np.full_like(table._owners, True, dtype=bool)
self.scene_mesh_dirty = False
candidate_Ts_world = scene_ctx.obj_Ts[table._owners[check_mask]] @ table._poses[check_mask]
dists = np.linalg.norm(candidate_Ts_world[:,:3,3], axis=1)
sideness = np.linalg.norm(candidate_Ts_world[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
# Start by assuming the suggestion is valid
table._valid[check_mask] = InvalidReason.VALID.value
table.invalidate_submask(check_mask, dists > 1.0, InvalidReason.UNREACHABLE)
# No side grasps
table.invalidate_submask(check_mask, (sideness > .6) & (candidate_Ts_world[:,2,3] < .3), InvalidReason.UNREACHABLE)
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask = np.zeros(check_mask.sum(), dtype=bool)
proposable_checked_mask[proposable_check_indices] = True
world_col_res = self.tools.geometry_scene.query(candidate_Ts_world[proposable_check_indices], from_mesh=self.tools.gripper_collision_mesh, to_mesh=scene_ctx.scene_mesh, render=False, query_name=f"grasp_scene")
table.invalidate_submask(proposable_checked_mask, world_col_res != 0, InvalidReason.SCENE_COLLISION)
table.update_world_poses_masked(check_mask,candidate_Ts_world)
def monitor_placement_proposals(self):
now = time.time()
scene_ctx = self.scene_context
if self.tools.placement_table:
for table in self.tools.placement_table:
table.objects_dirty |= scene_ctx.moving_objects
if scene_ctx.object_in_gripper is None:
return
obj_to_ee_T = invert_T(scene_ctx.ee_to_obj_T)
# Check whether any current proposals became invalid
gripper_obj_i = scene_ctx.object_in_gripper_i
gripper_obj = scene_ctx.object_in_gripper
# We rate limit this to avoid jumpiness, and reduce CPU burden
if self.time_at_last_placement_update is None or (now -
self.time_at_last_placement_update) > 1.:
table = self.tools.placement_table[gripper_obj_i]
moving_mask = np.full_like(table._owners, False, dtype=bool)
in_gripper_mask = np.full_like(table._owners, False, dtype=bool)
for i, moving in enumerate(scene_ctx.moving_objects):
mask = table.mask_by_owner(i)
if moving:
moving_mask |= mask
if i == gripper_obj_i:
in_gripper_mask = mask
table.objects_dirty[i] = False
# Give a heads up that we can't vouch for proposal quality while the object is moving
table.invalidate(moving_mask, InvalidReason.MOVING)
check_mask = ~moving_mask & ~in_gripper_mask
support_T = scene_ctx.obj_Ts
candidate_Ts = table._poses[check_mask] #wrt to the support obj
ee_Ts_support = candidate_Ts @ obj_to_ee_T
world_Ts = support_T[table._owners[check_mask]] @ ee_Ts_support
placement_Ts = world_Ts @ invert_T(obj_to_ee_T)
dists = np.linalg.norm(world_Ts[:,:3,3], axis=1)
sideness = np.linalg.norm(world_Ts[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
is_top_grasp = check_grasp_orientation_similarity(world_Ts, axis_z_filter=np.array((0.,0.,-1.)), axis_z_filter_thresh=.3)
# Start by assuming the suggestion is valid
table._valid[:] = InvalidReason.VALID.value
table.invalidate_submask(check_mask, dists > 1.0, InvalidReason.UNREACHABLE)
table.invalidate_submask(check_mask, (sideness > .6) & (world_Ts[:,2,3] < .3), InvalidReason.UNREACHABLE)
#suggestions_table.invalidate_submask(check_mask, ~is_top_grasp, InvalidReason.UNREACHABLE)
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask = np.zeros(check_mask.sum(), dtype=bool)
proposable_checked_mask[proposable_check_indices] = True
# Would the gripper collide with the support object? Happens often with side alignments
gripper_collisions = self.tools.geometry_scene.query(world_Ts[proposable_check_indices], from_mesh=self.tools.gripper_collision_mesh, to_mesh=scene_ctx.scene_mesh)
table.invalidate_submask(proposable_checked_mask, gripper_collisions != 0, InvalidReason.SCENE_COLLISION)
# Shrink the gripper object mesh back a bit to see if the volume where it needs to go is roughly empty
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask[:] = False
proposable_checked_mask[proposable_check_indices] = True
scene_collisions = scene_ctx.tools.geometry_scene.query(placement_Ts[proposable_check_indices], gripper_obj.prim, scene_ctx.scene_mesh, from_mesh_scale=0.95, query_name="place")
table.invalidate_submask(proposable_checked_mask, scene_collisions != 0, InvalidReason.SCENE_COLLISION)
table.update_world_poses_masked(check_mask, world_Ts)
self.time_at_last_placement_update = now
class SelectDispatch(DfDecider):
"""
Responsible for deciding whether to update the current suggestion
"""
def enter(self):
self.add_child("select_grasp_suggestion", SelectGraspProposal())
self.add_child("select_placement_suggestion", SelectPlacementProposal())
self.add_child("select_grasp_normal_suggestion", SelectGraspNormalProposal())
self.add_child("select_placement_plane_suggestion", SelectPlacementPlaneProposal())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
control_ctx = self.context.control_context
obj_in_gripper = scene_ctx.object_in_gripper
if len(scene_ctx.objects) == 0 or not control_ctx.user_gave_motion or control_ctx.assistance_in_use:
# No objects to provide assistance for
# If user isn't driving, we won't change the selection. Makes it easy to "rest" the system
if control_ctx.assistance_in_use:
# If user is opting into assistance, don't change the selection out from under them, and hide the cursor
ctx.cursor_ray = None
return DfDecision("do_nothing")
elif not scene_ctx.should_suggest_placements:
return DfDecision("do_nothing")
elif obj_in_gripper is not None:
ctx.reset_grasp_proposal()
if ctx.use_surrogates:
return DfDecision("select_placement_plane_suggestion", (obj_in_gripper))
else:
table = scene_ctx.tools.placement_table[scene_ctx.object_in_gripper_i]
if table and not table.empty():
return DfDecision("select_placement_suggestion", (obj_in_gripper, table))
elif scene_ctx.should_suggest_grasps:
ctx.reset_placement_proposal()
if ctx.use_surrogates:
return DfDecision("select_grasp_normal_suggestion")
else:
grasp_proposals = ctx.tools.grasp_table
if grasp_proposals and not grasp_proposals.empty():
return DfDecision("select_grasp_suggestion", (ctx.tools.grasp_table))
return DfDecision("do_nothing")
class SelectPlacementPlaneProposal(DfAction):
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, AVAILABLE_DOT_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
ctx.current_placement_proposal = None
gripper_obj = self.params
gripper_obj_i = scene_ctx.objects.index(gripper_obj)
gripper_obj_T = pq2T(*gripper_obj.get_world_pose())
plane_table = scene_ctx.tools.plane_table
scene_ctx.tools.plane_table._object_poses[gripper_obj_i] = gripper_obj_T
if ctx.use_snapping and ctx.tools.placement_table:
snaps_table = ctx.tools.placement_table[gripper_obj_i]
if snaps_table._poses_world is not None:
ctx.placement_distribution = np.full(len(snaps_table), AVAILABLE_DOT_COLOR_KEY)
ctx.placement_distribution[~snaps_table.proposable] = UNAVAILABLE_COLOR_KEY
else:
ctx.placement_distribution = None
elif not ctx.use_snapping:
snaps_table = None
ctx.placement_distribution = None
# Support geometry is in object frame
# Mask to only look at the object we're holding
object_mask = np.empty((len(plane_table.facet_object_owner), 3), dtype=bool)
object_mask[:] = (plane_table.facet_object_owner != gripper_obj_i)[:, None]
support_normals = np.ma.masked_where(object_mask, plane_table.support_normals, copy=False)
#support_centroids = ma.masked_where(object_mask, self.tools.plane_table.support_centroids, copy=False)
# Figure out what way Z axis of end effector is pointing in the object frmae
ee_dir_in_obj = scene_ctx.ee_to_obj_T[:3,:3].T[:,2]
scores = support_normals.dot(ee_dir_in_obj)
closest_to_normal = scores.argmax()
in_gripper_support_face = closest_to_normal
ee_p, ee_q = ctx.tools.commander.get_fk_pq()
in_gripper_support_face_i = in_gripper_support_face
in_gripper_support_centroid = plane_table.get_centroids_world(in_gripper_support_face_i)
in_gripper_support_normal_world = plane_table.get_normals_world(in_gripper_support_face_i)
hit_path, hit_pos, _, hit_dist = ctx.tools.ray_cast(in_gripper_support_centroid, in_gripper_support_normal_world, ignore_obj_handler=lambda path: ctx.tools.should_ignore_in_raycast(path, gripper_obj.prim_path))
ctx.cursor_ray = in_gripper_support_centroid, in_gripper_support_normal_world, hit_dist
dists = np.linalg.norm(plane_table.get_centroids_world() - ee_p, axis=1)
dists[plane_table._valid != InvalidReason.VALID.value] = float('inf')
dists[plane_table.facet_object_owner == gripper_obj_i] = float('inf')
if hit_path:
hit_obj = None
for i, obj in enumerate(itertools.chain(scene_ctx.objects, scene_ctx.tools.scene_objects.values())):
if obj.prim_path == hit_path:
hit_obj = obj
hit_obj_i = i
#print(hit_obj)
break
if hit_obj:
# Take the object we hit by default
dists[plane_table.facet_object_owner != hit_obj_i] = float('inf')
closest_i = np.argmin(dists)
if dists[closest_i] == float("inf") or hit_pos is None:
# No valid plane
ctx.current_placement_proposal = None
return
plane_table.update_object_poses(np.vstack((scene_ctx.obj_Ts, scene_ctx.fixed_Ts)))
if ctx.current_placement_proposal is None or (isinstance(ctx.current_placement_proposal, PlanePlaneProposal) and ctx.current_placement_proposal.place_obj != gripper_obj):
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
elif ctx.current_placement_proposal and isinstance(ctx.current_placement_proposal, PlanePlaneProposal):
proposal = ctx.current_placement_proposal
if proposal.support_index != closest_i:
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
else:
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
# Alternative point solution from projecting straight down
#current_in_plane_p = proposal.project_to_constraint(proposal.place_centroid_world, proposal.place_centroid)
current_in_plane_p = proposal.project_to_constraint(hit_pos, proposal.place_centroid)
#proposal.T_world = proposal.support_obj_T @ proposal.get_placement_T() @ invert_T(assist_ctx.ee_to_obj_T)
#viz_axis_named_T("placement", proposal.get_placement_T(), (.2,.2,.2))
ee_T = ctx.tools.commander.get_fk_T()
ee_ax = ee_T[:3, 0]
ee_ay = ee_T[:3,1]
# Try to project X and Y axes onto the placement plane
# NOTE: This assumes that the robot is at (0,0,0)
vec_to_base = -proposal.support_centroid_world
# Strategy: Project end effector X and Y to be orthogonal to the current placement normal and then again
# to be orthogonal to the support normal. Then we'll have two fully specified rotations which we can
# rotate into alignment which minimize the amount of twisting that needs to happen
# Define a new world rotation: z out of the placement surface, other two axes as projections of gripper axes
proposed_face_R = np.array([ee_ax, ee_ay, proposal.place_normal_world]).T
try:
face_R = orthogonalize(proposed_face_R, prioritize=(2,0,1))
except np.linalg.LinAlgError as e:
face_R = make_rotation_matrix(proposal.place_normal_world, vec_to_base)
#viz_axis_named_Rp("on_obj", face_R, proposal.place_centroid_world, scale=(.2,.2,.2))
proposed_solution_R = np.array([ee_ax, ee_ay, -proposal.support_normal_world]).T
try:
solution_R = orthogonalize(proposed_solution_R, prioritize=(2,0,1))
except np.linalg.LinAlgError as e:
solution_R = make_rotation_matrix(-proposal.support_normal_world, vec_to_base)
#viz_axis_named_Rp("proj_sol", solution_R, current_in_plane_p, scale=(.2,.2,.2))
# Subtract out the original object orientation, leaving just the rotation that takes us from the object to the new frame
obj_to_sol_R = gripper_obj_T[:3,:3].T @ face_R
proposal.T_world = pack_Rp(solution_R @ obj_to_sol_R.T, current_in_plane_p) @ invert_T(scene_ctx.ee_to_obj_T)
if ctx.use_snapping and snaps_table and snaps_table._poses_world is not None:
snap_Ts = snaps_table._poses_world
snap_scores = transform_dist(snap_Ts, proposal.T_world, R_weight=.15)
snap_scores[~snaps_table.proposable] = float('inf')
closest_point_snap_i = np.argmin(snap_scores)
if snap_scores[closest_point_snap_i] < 0.05:
ctx.placement_distribution[:] = AVAILABLE_COLOR_KEY
ctx.placement_distribution[~snaps_table.proposable] = UNAVAILABLE_COLOR_KEY
ctx.placement_distribution[closest_point_snap_i] = SNAPPED_COLOR_KEY
proposal = PlacementProposal(closest_point_snap_i, snaps_table, scene_ctx.objects[snaps_table._owners[closest_point_snap_i]], gripper_obj)
offset_T = proposal.T_world.copy()
offset_T = offset_T @ scene_ctx.ee_to_obj_T
offset_T[2,3] += 0.005
collisions = scene_ctx.tools.geometry_scene.query(offset_T[None], gripper_obj.prim, scene_ctx.scene_mesh, render=False, query_name="place")
if collisions[0] > 0:
if isinstance(proposal, PlacementProposal):
proposal.mark_invalid(InvalidReason.SCENE_COLLISION)
return
#viz_axis_named_T("final", proposal.T_world, scale=(.15,.15,.15))
ctx.current_placement_proposal = proposal
return
class SelectPlacementProposal(DfAction):
def __init__(self):
self.start_T = None
self.start_T_stamp = None
self.memory = None
self.prior = None
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, AVAILABLE_DOT_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
gripper_obj, table = self.params
Ts = table._poses_world
if Ts is None:
return
if self.memory is None or len(self.prior) != len(Ts):
self.memory = np.zeros((len(Ts)), dtype=float)
if self.prior is None or len(self.prior) != len(Ts):
self.prior = np.zeros((len(Ts)), dtype=float)
mask = table.proposable
ee_p, ee_q = ctx.tools.commander.get_fk_pq()
ee_T = pq2T(ee_p, ee_q)
now = time.time()
if self.start_T_stamp is None or now - self.start_T_stamp > 2.:
self.start_T = ee_T
self.start_T_stamp = now
pairwise_dist = transform_dist(ee_T, Ts[mask], .15)
self.prior[mask] = np.exp(-pairwise_dist)
self.prior[mask] /= self.prior[mask].sum()
self.prior[~mask] = 0
s_to_u_cost = approx_traj_cost(self.start_T, ee_T)
u_to_g_costs = approx_traj_cost(ee_T, Ts[mask])
s_to_g_costs = approx_traj_cost(self.start_T, Ts[mask])
# Eq. 9 in Formalizing Assitive Teleop, because
# above is using a quadratic cost
self.memory[mask] = (np.exp(-s_to_u_cost - u_to_g_costs) / np.exp(-s_to_g_costs))
self.memory[~mask] = 0
if self.context.placement_distribution is None or len(self.context.placement_distribution) != len(Ts):
ctx.placement_distribution = np.ones(len(Ts))
#ctx.tools.viewport_scene.manipulator.set_grasp_distribution(ctx.grasp_distribution)
ctx.placement_distribution[:] = AVAILABLE_DOT_COLOR_KEY
ctx.placement_distribution[~table.proposable] = UNAVAILABLE_COLOR_KEY
placement_scores = self.memory * self.prior
best_i = np.argmax(placement_scores)
if placement_scores[best_i] == float("-inf"):
ctx.current_placement_proposal = None
return
ctx.placement_distribution[best_i] = SNAPPED_COLOR_KEY
support_obj = scene_ctx.objects[table._owners[best_i]]
current_prop = ctx.current_placement_proposal
if current_prop:
if current_prop.identifier == best_i and current_prop.support_obj == support_obj:
return
ctx.current_placement_proposal = PlacementProposal(best_i, table, support_obj, gripper_obj)
def approx_traj_cost(T1, T2, R_weight=.1):
# eq 7 from 10.1007/978-3-319-33714-2_10, squared
R1_inv = np.swapaxes(T1[...,:3,:3], -1, -2)
R2 = T2[...,:3,:3]
return np.linalg.norm(T2[..., :3, 3] - T1[...,:3,3], axis=-1) + (2 * R_weight ** 2 * (1 - (np.trace(R1_inv @ R2, axis1=-1, axis2=-2) / 3)))
class SelectGraspProposal(DfAction):
def __init__(self):
self.memory = None
self.prior = None
self.start_T = None
self.start_T_stamp = None
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
table = self.params
scene_ctx = self.context.scene_context
Ts = table._poses_world
if Ts is None:
return
if self.memory is None:
self.memory = np.ones((len(Ts)), dtype=float)
if self.prior is None:
self.prior = np.ones((len(Ts)), dtype=float)
# viz_axis_named_Ts("grasp_props", Ts)
ee_T = scene_ctx.tools.commander.get_fk_T()
now = time.time()
if self.start_T_stamp is None or now - self.start_T_stamp > 2.:
self.start_T = ee_T
self.memory[:] = 1
self.start_T_stamp = now
#
pairwise_dist = transform_dist(ee_T, Ts, .15)
s_to_u_cost = approx_traj_cost(self.start_T, ee_T)
u_to_g_costs = approx_traj_cost(ee_T, Ts)
s_to_g_costs = approx_traj_cost(self.start_T, Ts)
# Eq. 9 in Formalizing Assitive Teleop, because
# above is using a quadratic cost
self.memory[:] = (np.exp(-s_to_u_cost - u_to_g_costs) / np.exp(-s_to_g_costs))
if ctx.grasp_distribution is None:
ctx.grasp_distribution = np.ones_like(self.prior)
self.prior[:] = np.exp(-pairwise_dist)
self.prior[:] /= self.prior[:].sum()
scores = self.memory * self.prior
scores[~table.proposable] = float("-inf")
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~table.proposable] = UNAVAILABLE_COLOR_KEY
# Pick the max
best_i = np.argmax(scores)
#print(i, highest_prob)
if scores[best_i] == float("-inf"):
ctx.current_grasp_proposal = None
return
ctx.grasp_distribution[best_i] = SNAPPED_COLOR_KEY
current_prop = ctx.current_grasp_proposal
# Don't override accepted proposals
if current_prop is not None:
if best_i != current_prop.identifier:
#viz_axis_named_T("cur_grasp_prop", grasp_proposals[i].T_world)
ctx.current_grasp_proposal = GraspProposal(best_i, table)
else:
# No current proposal to take care of
ctx.current_grasp_proposal = GraspProposal(best_i, table)
class SelectGraspNormalProposal(DfAction):
def get_cursor_T(self, body, point, normal, distance):
scene_ctx = self.context.scene_context
if not body:
return None
target_obj = None
target_obj_i = None
for i, obj in enumerate(scene_ctx.objects):
if obj.prim_path == body:
target_obj = obj
target_obj_i = i
break
if target_obj is None:
return None
ee_T = scene_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
carb.profiler.begin(1, "select_grasp_normal(make_table)", active=True)
table = GraspNormalProposalTable(target_obj, ee_T, point, normal)
table._valid[:] = InvalidReason.VALID.value
sideness = np.linalg.norm(table.grasp_Ts[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
# No side grasps beneath 30cm
table.invalidate((sideness > .6) & (table.grasp_Ts[:,2,3] < .3), InvalidReason.UNREACHABLE)
carb.profiler.end(1, True)
if scene_ctx.scene_mesh is None:
return
initial_check_mask = table.proposable
with profile("initial_collision_check"):
collisions, contact_points = scene_ctx.tools.geometry_scene.query_grasp_contacts(table.grasp_Ts[initial_check_mask], scene_ctx.tools.gripper_collision_mesh, scene_ctx.scene_mesh, render=False, query_name="normal")
table.invalidate_submask(table.proposable, collisions > 0, InvalidReason.SCENE_COLLISION)
"""left_T = table.grasp_Ts[best_i].copy()
right_T = table.grasp_Ts[best_i].copy()
left_T[:3, 3] += left_T[:3, 1] * (.04 - contact_points[best_i, 0])
right_T[:3, 3] -= right_T[:3, 1] * (.04 - contact_points[best_i, 1])
viz_axis_named_T("left_t", left_T, scale=(.2,.2,.2))
viz_axis_named_T("right_t", right_T,scale=(.2,.2,.2))"""
#viz_axis_named_T("old", table.grasp_Ts[0], scale=(.1,.1,.1))
if table.proposable.sum() == 0:
return None
collision_free_mask = collisions == 0
left_shift_amount = (.04 - contact_points[collision_free_mask,1]) - (.04 - contact_points[collision_free_mask, 0]) / 2
recheck_ind = np.where(initial_check_mask)[0][collision_free_mask]
to_check_again = table.grasp_Ts[recheck_ind].copy()
to_check_again[:, :3, 3] -= to_check_again[:, :3, 1] * left_shift_amount[:, None]
#viz_axis_named_T("new", table.grasp_Ts[0], scale=(.1,.1,.1))
with profile("collision_check_post_adjust"):
new_collisions = scene_ctx.tools.geometry_scene.query(to_check_again, scene_ctx.tools.gripper_collision_mesh, scene_ctx.scene_mesh, render=False, query_name="normal")
successfully_moved_ind = recheck_ind[new_collisions == 0]
table.grasp_Ts[successfully_moved_ind] = to_check_again[new_collisions == 0]
carb.profiler.begin(1, "select_grasp_normal(calcs)", active=True)
rot_to_grasp = ee_R.T @ table.grasp_Ts[table.proposable, :3, :3]
rot_diff = R_to_angle(rot_to_grasp)
# Show equally good (wrt z axis rotation) grasps
#viz_axis_named_Ts("best_rots", table.grasp_Ts[best_rot_i], scale=(0.01, 0.01, 0.01))
best_rot_i = np.where(rot_diff == rot_diff.min())[0]
standoff_subset = contact_points[collision_free_mask][best_rot_i, 2]
best_subset_standoff_i = np.where(standoff_subset == standoff_subset.min())[0]
best_i = np.where(table.proposable)[0][best_rot_i[best_subset_standoff_i][0]]
carb.profiler.end(1, True)
if not table.valid[best_i]:
return None
return best_i, table
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
ee_T = scene_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
# Where is the tip of the gripper pointing
ee_az = ee_T[:3, 2]
snaps = ctx.tools.grasp_table
if ctx.grasp_distribution is None and ctx.use_snapping and snaps._poses_world is not None:
ctx.grasp_distribution = np.full(len(snaps), -4.)
ctx.grasp_distribution[~snaps.proposable] = float('-inf')
elif not ctx.use_snapping:
ctx.grasp_distribution = None
if snaps._poses_world is not None:
snap_Ts = snaps._poses_world
else:
snap_Ts = np.empty((0,4,4))
disp_to_snap = snap_Ts[:, :3, 3] - ee_p
dist_to_snap = np.linalg.norm(disp_to_snap, axis=1)
dir_to_snap = disp_to_snap / np.expand_dims(dist_to_snap, axis=1)
# Angle between z axis of gripper (point dir) and each grasp position
point_dir_scores = np.arccos(dir_to_snap.dot(ee_az))
body, point, normal, distance = ctx.tools.ray_cast(ee_p, ee_az, ignore_obj_handler=ctx.tools.should_ignore_in_raycast)
target_obj = None
target_obj_i = None
for i, obj in enumerate(scene_ctx.objects):
if obj.prim_path == body:
target_obj = obj
target_obj_i = i
break
ctx.cursor_ray = ee_p, ee_az, distance
cursor_results = self.get_cursor_T(body, point, normal, distance)
cone_cutoff = .2
if cursor_results:
cursor_i, table = cursor_results
cursor_T = table.grasp_Ts[cursor_i]
if snaps._poses_world is None or not ctx.use_snapping:
# The snaps haven't loaded yet
ctx.current_grasp_proposal = GraspProposal(cursor_i, table)
return
#viz_axis_named_T("cursor_T", cursor_T)
snap_scores = transform_dist(snap_Ts, cursor_T, R_weight=.15)
snap_scores[~snaps.proposable] = float('inf')
closest_snap_i = np.argmin(snap_scores)
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[snaps._owners == target_obj_i] = SNAPPABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
if snap_scores[closest_snap_i] < 0.05:
ctx.grasp_distribution[closest_snap_i] = SNAPPED_COLOR_KEY
ctx.current_grasp_proposal = GraspProposal(closest_snap_i, snaps)
else:
ctx.current_grasp_proposal = GraspProposal(cursor_i, table)
elif ctx.use_snapping and target_obj is None and snaps._poses_world is not None:
# Missed the object (so no cursor results). Try to provide a snap
snap_scores = transform_dist(snap_Ts, ee_T, .15)
# Only select amongst those we are pointing at
snap_scores[point_dir_scores > cone_cutoff] = float('inf')
snap_scores[~snaps.proposable] = float('inf')
closest_snap_i = np.argmin(snap_scores)
ctx.grasp_distribution[point_dir_scores <= cone_cutoff] = SNAPPABLE_COLOR_KEY
ctx.grasp_distribution[point_dir_scores > cone_cutoff] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
if snap_scores[closest_snap_i] == float('inf'):
ctx.current_grasp_proposal = None
else:
ctx.grasp_distribution[closest_snap_i] = SNAPPED_COLOR_KEY
ctx.current_grasp_proposal = GraspProposal(closest_snap_i, snaps)
else:
# Keep the old proposal if it's close enough to the current collision point
if ctx.current_grasp_proposal and isinstance(ctx.current_grasp_proposal._table, GraspNormalProposalTable) and np.linalg.norm(point - ctx.current_grasp_proposal._table.point) < 0.1:
pass
else:
ctx.current_grasp_proposal = None
if ctx.grasp_distribution is not None:
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
| 33,511 | Python | 47.357864 | 225 | 0.630778 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/control.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Callable
import numpy as np
from omni.isaac.cortex.df import DfAction, DfDecider, DfDecision, DfLogicalState
from srl.teleop.assistance.proposals import PlacementProposal, make_approach_params_for_proposal, sigmoid
from .scene import ContextTools, SceneContext
from .motion import PullTowardConfig, Reset
from srl.teleop.assistance.motion_commander import VelocityMotionCommand, MotionCommand, calc_shifted_approach_target
from srl.teleop.assistance.transforms import T2pq, invert_T, normalized, pq2T, R_to_rot_vector
from srl.teleop.assistance.ui import AssistanceMode, ControlFrame
from srl.spacemouse.buttons import SpaceMouseButtonDebouncer, DEVICE_BUTTON_STRUCT_INDICES
from srl.spacemouse.spacemouse import SpaceMouse
from omni.isaac.core.utils.rotations import euler_angles_to_quat
import quaternion
class ControlContext(DfLogicalState):
CONTROL_MAPPING = {
# Pro Mouse
"CTRL": "ASSIST",
"ALT": "ASSIST",
"ESC": "ASSIST",
"SHIFT": "GRIPPER",
"ROLL CLOCKWISE": "SWAP VIEW",
"F": "SWAP VIEW",
"T": "SWAP VIEW",
"R": "SWAP VIEW",
"ROTATION": "SWAP VIEW",
"FIT": "HOME",
"MENU": "HOME",
# 2 Button Mouse
"LEFT": "GRIPPER",
"RIGHT": "ASSIST"
}
COMMAND_TO_BUTTONS = {}
def __init__(self, tools: ContextTools, spacemouse: SpaceMouse, control_frame: ControlFrame, assistance_mode: AssistanceMode, scene_context: SceneContext, avoid_obstacles: bool):
super().__init__()
for k, v in ControlContext.CONTROL_MAPPING.items():
ControlContext.COMMAND_TO_BUTTONS[v] = ControlContext.COMMAND_TO_BUTTONS.get(v, []) + [k]
self.tools = tools
self.command = None
self.button_command_names = ("GRIPPER", None, "ASSIST", None, None, "HOME", "SWAP VIEW")
self.button_command = False, False, False, False, False, False, None
self.spacemouse = spacemouse
self.spacemouse_debouncer = SpaceMouseButtonDebouncer(DEVICE_BUTTON_STRUCT_INDICES[self.spacemouse.name], {"SHIFT", "LEFT", "RIGHT"}, False, 0.3)
self.scene_context = scene_context
self.gripper_opened = np.sum(tools.robot.gripper.get_joint_positions()) > .05
self.monitors = [
ControlContext.monitor_control_received,
]
self.assistance_in_use = False
self.user_gave_motion = False
self.avoid_obstacles = avoid_obstacles
self.current_command_text = ""
self.control_frame = control_frame
self.assistance_mode = assistance_mode
# Needs to be provided after construction
self.selection_context = None
def monitor_control_received(self):
control = self.spacemouse.get_controller_state()
if control is None:
return
self.command = None
stamp, trans, rot, raw_buttons = control
buttons = self.spacemouse_debouncer.update(raw_buttons)
self.update_current_command_text(buttons)
def buttons_mapped(command):
value = False
for button_name in ControlContext.COMMAND_TO_BUTTONS[command]:
value |= buttons[button_name]
return value
values = []
for i, command_name in enumerate(self.button_command_names):
if isinstance(command_name, tuple):
hit = False
for sub_control in command_name:
if buttons_mapped(sub_control):
values.append(sub_control)
hit = True
if not hit:
values.append(None)
elif command_name is None:
values.append(False)
else:
values.append(buttons_mapped(command_name))
self.button_command = tuple(values)
if not np.allclose(np.hstack((trans, rot)), np.array([0,0,0,0,0,0]), atol=1e-4):
self.command = trans, rot
else:
self.command = None
def control_to_twist(self, trans, rot):
step = self.tools.world.get_physics_dt()
# Normalize control by sim step size so increasing sim frequency doesn't make controller more sensitive
trans = np.array(trans)
rot = np.array(rot)
trans *= step
rot *= step
# Flip X and Y to match sim
trans[[0,1]] = trans[[1,0]]
trans[1] *= -1
dori_world = quaternion.from_float_array(euler_angles_to_quat(rot))
return trans, quaternion.as_rotation_vector(dori_world)
def update_current_command_text(self, buttons):
if buttons.value == 0:
# Nothing is being pressed right now
self.current_command_text = ""
else:
active_controls = set()
for button_name, command_name in ControlContext.CONTROL_MAPPING.items():
if buttons[button_name]:
active_controls.add(command_name)
self.current_command_text = " ".join(list(active_controls))
def get_control_frames(self, frame_preference: ControlFrame):
perm_rot = np.identity(3)
perm_rot[:, 0] *= -1
if frame_preference is ControlFrame.END_EFFECTOR:
perm = np.identity(3)
perm[:, 0] *= -1
perm[:, 2] *= -1
return perm, perm_rot
elif frame_preference is ControlFrame.MIXED:
ee_R = self.tools.commander.get_fk_R()
return ee_R.T, perm_rot
elif frame_preference is ControlFrame.WORLD:
ee_R = self.tools.commander.get_fk_R()
camera_rotated_R = ee_R.T.copy()
camera_rotated_R[:, 0] *= -1
camera_rotated_R[:, 1] *= -1
perm_rot = np.identity(3)
perm_rot[:, 1] *= 1
perm_rot[:, 2] *= -1
return camera_rotated_R, camera_rotated_R @ perm_rot
class ControlDispatch(DfDecider):
def __init__(self, view_change_callback: Callable):
super().__init__()
self.view_change_callback = view_change_callback
def enter(self):
self.add_child("reset", Reset())
self.add_child("pull_toward_config", PullTowardConfig())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
selection_ctx = self.context.selection_context
robot = ctx.tools.robot
ctx.assistance_in_use = False
ctx.user_gave_motion = False
gripper, cancel, pull, reset, bypass, modifier1, view_change = ctx.button_command
# Gripper and view change should apply no matter what other buttons are currently being held
if gripper:
# Have we already tried to open? If so, interpret as request to close
if ctx.gripper_opened:
robot.gripper.close()
else:
robot.gripper.open()
# User expressed intent to close, and we tried
ctx.gripper_opened = not ctx.gripper_opened
if view_change is not None and self.view_change_callback is not None:
self.view_change_callback(view_change)
current_proposal = selection_ctx.get_current_proposal()
if modifier1:
# Pull back to home config
return DfDecision("pull_toward_config", (robot.HOME_CONFIG))
# When we're driving the robot, repel from objects
if ctx.command is not None and ctx.avoid_obstacles:
scene_ctx.disable_near_obstacles()
else:
scene_ctx.disable_all_obstacles()
if ctx.command is not None:
if current_proposal and not bypass and \
(ctx.assistance_mode == AssistanceMode.FORCED_FIXTURE or ctx.assistance_mode == AssistanceMode.VIRTUAL_FIXTURE):
# Interface is in a mode where we're going to limit their velocities
trans, rot = ctx.command
trans = current_proposal.map_velocity_input(ctx.tools.commander.get_current_p(), trans)
else:
trans, rot = ctx.command
if ctx.assistance_mode == AssistanceMode.FORCED_FIXTURE and current_proposal:
# TODO: Move this forcing into the map_velocity_input implementation and make amount of forcing a float param
pose_T = current_proposal.T_world
pose = T2pq(pose_T)
# FIXME: no effect until I can enhance the motion command interface
frame_trans, frame_rot = ctx.get_control_frames(ctx.control_frame)
linear_vel, angular_vel = ctx.control_to_twist(trans, rot)
approach_params = None
# Shape control towards the suggestion if the user is holding that button
if pull and current_proposal:
ctx.assistance_in_use = True
prop_T = current_proposal.T_world
ee_T = ctx.tools.commander.get_fk_T()
approach_params = make_approach_params_for_proposal(current_proposal)
if approach_params:
offset_T = prop_T.copy()
offset_T[:3, 3] = calc_shifted_approach_target(prop_T, ee_T, approach_params)
else:
offset_T = prop_T
target_T = invert_T(ee_T) @ offset_T
dist_to_prop = np.linalg.norm(target_T[:3,3])
lin_to_prop = normalized(target_T[:3,3]) * np.linalg.norm(linear_vel) #min(dist_to_prop, 1/20, np.linalg.norm(linear_vel))
aa_to_prop = R_to_rot_vector(target_T[:3,:3])
theta_to_prop = np.linalg.norm(aa_to_prop)
aa_to_prop = normalized(aa_to_prop) * np.linalg.norm(angular_vel) #min(theta_to_prop, 1/20, np.linalg.norm(angular_vel))
alpha = sigmoid(-dist_to_prop, -.3, 5)
#viz_axis_named_T("twist", ee_T @ integrate_twist(lin_to_prop, aa_to_prop, 1))
#linear_vel = (1 - alpha) * linear_vel + (alpha * (lin_to_prop @ frame_trans))
#angular_vel = (1 - alpha) * angular_vel + (alpha * (aa_to_prop @ frame_rot))
linear_vel = linear_vel + (alpha * (lin_to_prop @ frame_trans))
angular_vel = angular_vel + (alpha * (aa_to_prop @ frame_rot))
ctx.tools.commander.set_command(
VelocityMotionCommand(
linear_vel,
angular_vel,
frame_trans,
frame_rot
)
)
if not pull:
# We only consider updating the proposals if the user is moving the robot.
# But if they're asking to be pulled, we won't pull the current suggestion out from under you.
# This makes the system easy to "put to rest" by simply taking your hands off the controls.
ctx.user_gave_motion = True
return DfDecision("do_nothing")
elif pull:
# No command, just pull toward the current target
current_proposal = selection_ctx.get_current_proposal()
if current_proposal is not None:
current_proposal.T_world
ctx.assistance_in_use = True
approach_params = make_approach_params_for_proposal(current_proposal)
# current_proposal.T_obj @ invert_T(pq2T(*scene_ctx.object_in_gripper.get_world_pose()))
"""if isinstance(current_proposal, PlacementProposal):
# For some reason placements are sometimes slightly offset at the end of the pull. It seems
# to be a controller issue...
ee_delta = current_proposal.T_world @ invert_T(ctx.tools.commander.get_eef_T())
obj_delta = current_proposal.get_placement_T() @ invert_T(pq2T(*scene_ctx.object_in_gripper.get_world_pose()))
offsets = np.linalg.norm(ee_delta[:3,3]), np.linalg.norm(obj_delta[:3,3])"""
ctx.tools.commander.set_command(MotionCommand(
*T2pq(current_proposal.T_world),
approach_params=approach_params
))
else:
ctx.tools.commander.set_command(
VelocityMotionCommand(
np.array((0, 0, 0)),
np.array((0, 0, 0))
)
)
else:
ctx.tools.commander.set_command(
VelocityMotionCommand(
np.array((0,0,0)),
np.array((0,0,0))
)
)
return DfDecision("do_nothing")
| 12,838 | Python | 41.939799 | 182 | 0.584281 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/scene.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import time
from typing import Dict, List
import carb
import numpy as np
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.cortex.dfb import DfLogicalState
from omni.isaac.debug_draw import _debug_draw
from omni.isaac.franka import KinematicsSolver
from omni.physx import get_physx_scene_query_interface
from srl.teleop.assistance.check_collision import WarpGeometeryScene
from srl.teleop.assistance.motion_commander import MotionCommander
from srl.teleop.assistance.proposals import InvalidReason, PlanePlaneProposalTable, GroupedPoseProposalTable
from srl.teleop.assistance.transforms import get_obj_poses, invert_T, pq2T, transform_dist, FrameVelocityEstimator
from srl.teleop.assistance.scene import AssistanceManipulator
class ContextTools:
def __init__(self, world,
viewport_manipulator: AssistanceManipulator,
objects: Dict[str, RigidPrim],
scene_objects: Dict[str, RigidPrim],
obstacles,
object_ghosts: List[RigidPrim],
robot,
robot_ghosts,
commander: MotionCommander,
grasp_table: GroupedPoseProposalTable,
placement_table: GroupedPoseProposalTable,
plane_table: PlanePlaneProposalTable,
geometry_scene: WarpGeometeryScene,
gripper_collision_mesh):
self.world = world
self.viewport_manipulator = viewport_manipulator
self.objects = objects
self.scene_objects = scene_objects
self.obstacles = obstacles
self.object_ghosts = object_ghosts
self.robot_ghosts = robot_ghosts
self.robot = robot
self.commander = commander
self.solver = KinematicsSolver(self.robot)
self.grasp_table = grasp_table
self.placement_table = placement_table
self.plane_table = plane_table
self.geometry_scene = geometry_scene
self.gripper_collision_mesh = gripper_collision_mesh
self.draw = _debug_draw.acquire_debug_draw_interface()
self.physx_query_interface = get_physx_scene_query_interface()
self._obj_paths_set = set([obj.prim_path for obj in self.objects.values()])
self._raycastable_paths_set = set([obj.prim_path for obj in self.scene_objects.values()]).union(self._obj_paths_set)
self.robot.set_contact_path_filter(lambda path: str(path) in self._obj_paths_set)
def ray_cast(self, position, direction, max_dist=10, offset=np.array((0,0,0)), ignore_obj_handler=lambda x: False):
origin = (position[0], position[1], position[2])
ray_dir = (direction[0], direction[1], direction[2])
last_hit = None
last_hit_dist = float("inf")
def report_all_hits(hit):
if ignore_obj_handler(hit.rigid_body):
return True
nonlocal last_hit
nonlocal last_hit_dist
if hit.distance < last_hit_dist:
last_hit_dist = hit.distance
last_hit = hit
return True
self.physx_query_interface.raycast_all(origin, ray_dir, max_dist, report_all_hits)
if last_hit:
distance = last_hit.distance
return last_hit.rigid_body, np.array(last_hit.position), np.array(last_hit.normal), distance
return None, None, None, 10000.0
def should_ignore_in_raycast(self, path, also_ignore=None):
if also_ignore and path == also_ignore:
return True
if path not in self._raycastable_paths_set:
return True
return False
class SceneContext(DfLogicalState):
def __init__(self, tools: ContextTools, should_suggest_grasps, should_suggest_placements):
super().__init__()
self.tools = tools
self.objects = []
for _, obj in self.tools.objects.items():
self.objects.append(obj)
self.obstacle_enabled = {}
for obs in itertools.chain(self.tools.objects.values(), self.tools.scene_objects.values()):
try:
self.tools.commander.add_obstacle(obs)
self.obstacle_enabled[obs.name] = True
except:
pass
self.disable_all_obstacles()
self.should_suggest_grasps = should_suggest_grasps
self.should_suggest_placements = should_suggest_placements
self.obj_Ts = get_obj_poses(self.objects)
self.fixed_Ts = get_obj_poses(list(self.tools.scene_objects.values()))
self.scene_mesh_object_dirty = np.full((len(self.objects),), False, dtype=bool)
self.scene_mesh_changed = False
# Conservative initialization. Takes us a sim step to be able to see what's actually moving
self.moving_objects = np.full((len(self.objects),), True, dtype=bool)
self.last_movement_stamps = np.array([time.time() for _ in range(len(self.objects))])
self.object_gripper_rel_T_trackers = FrameVelocityEstimator(tools.world.get_physics_dt())
self.object_in_gripper = None
self.object_in_gripper_i = None
self.ee_vel_tracker = FrameVelocityEstimator(tools.world.get_physics_dt())
self.ee_to_obj_T = None
self.scene_mesh = None
self.last_scene_mesh_update = time.time()
self.monitors = [
SceneContext.monitor_object_movement,
SceneContext.monitor_object_in_gripper,
SceneContext.monitor_scene_mesh,
SceneContext.monitor_plane_table,
SceneContext.monitor_relative_object_dist_vel,
]
def get_obj_relative_metrics(self):
# These can be used to make heuristic decisions about which object the user is trying to interact with
metrics = []
assert False
for _, obj_tracker in enumerate(self.object_gripper_rel_T_trackers):
T = obj_tracker.T_prev
# Check displacement
dist = np.linalg.norm(T[:3, 3])
vel = obj_tracker.T_vel[:3, 3]
metrics.append((dist,vel))
return metrics
def monitor_object_movement(self):
obj_poses = get_obj_poses(self.objects)
now = time.time()
dists = transform_dist(obj_poses, self.obj_Ts, .15)
time_deltas = now - self.last_movement_stamps
close_mask = dists < 0.005
last_move_timedout_mask = time_deltas > .3
self.moving_objects[close_mask & last_move_timedout_mask] = False
self.moving_objects[~close_mask] = True
self.obj_Ts[~close_mask] = obj_poses[~close_mask]
self.last_movement_stamps[~close_mask] = now
def monitor_scene_mesh(self):
self.scene_mesh_changed = False
self.scene_mesh_object_dirty |= self.moving_objects
except_gripper_obj_mask = np.full((len(self.objects)), True)
if self.object_in_gripper:
except_gripper_obj_mask[self.object_in_gripper_i] = False
if np.any(self.scene_mesh_object_dirty[except_gripper_obj_mask]) and not any(self.moving_objects) and (time.time() - self.last_scene_mesh_update) > 1.5:
obj_poses = get_obj_poses(self.objects)
self.last_scene_mesh_update = time.time()
carb.profiler.begin(1, "make_scene_mesh", active=True)
to_combine = []
to_combine_xforms = []
for obj, xform in itertools.chain(zip(self.objects, obj_poses), zip(self.tools.scene_objects.values(), self.fixed_Ts)):
if self.object_in_gripper == obj:
continue
if not hasattr(obj, 'geom'):
continue
to_combine.append(obj.geom)
to_combine_xforms.append(xform)
self.scene_mesh = self.tools.geometry_scene.combine_geometries_to_mesh(to_combine, to_combine_xforms)
carb.profiler.end(1, True)
self.scene_mesh_object_dirty[except_gripper_obj_mask] = False
# Let scene mesh consumers know they need to revalidate
self.scene_mesh_changed = True
def monitor_plane_table(self):
if not self.tools.plane_table:
return
self.tools.plane_table.update_object_poses(np.vstack((self.obj_Ts, self.fixed_Ts)))
# Let's see which facets of the object look good for placement now
# Support geometry is in object frame
self.tools.plane_table._valid[:] = InvalidReason.VALID.value
support_normals = self.tools.plane_table.get_normals_world()
scores = np.arccos(support_normals.dot((0,0,1)))
self.tools.plane_table._valid[scores > 0.25] = InvalidReason.UNREACHABLE.value
#self.tools.viewport_manipulator.manipulator.invalidate()
def monitor_relative_object_dist_vel(self):
eef_T = self.tools.commander.get_fk_T()
in_gripper_frame = invert_T(eef_T) @ self.obj_Ts
self.object_gripper_rel_T_trackers.update(in_gripper_frame)
self.ee_vel_tracker.update(eef_T)
def monitor_object_in_gripper(self):
path_in_hard = self.tools.robot.gripper_contents
for i, obj in enumerate(self.objects):
if obj.prim_path != path_in_hard:
continue
if self.object_in_gripper != obj:
# Gripper object changed, force the scene mesh to regenerate
self.scene_mesh_object_dirty[:] = True
self.object_in_gripper = obj
self.object_in_gripper_i = i
break
else:
self.object_in_gripper = None
self.object_in_gripper_i = None
return
in_gripper_pos, in_gripper_rot = self.object_in_gripper.get_world_pose()
ee_T = self.tools.commander.get_eef_T()
#viz_axis_named_T("ee_T", ee_T)
gripper_obj_T = pq2T(in_gripper_pos, in_gripper_rot)
# "subtract" out the part of the transform that goes to the ee, leaving relative transform
ee_to_obj_T = invert_T(ee_T).dot(gripper_obj_T)
self.ee_to_obj_T = ee_to_obj_T
def disable_near_obstacles(self):
ee_T = self.tools.commander.get_fk_T()
ee_p = ee_T[:3, 3]
ee_point_dir = ee_T[:3, 2]
obj_centroids = self.obj_Ts[:, :3,3]
# Displacement to each grasp (in world frame)
disp_to_grasp = obj_centroids - ee_p
dist_to_grasp = np.linalg.norm(disp_to_grasp, axis=1)
dir_to_obj = disp_to_grasp / dist_to_grasp[:, None]
# Angle between z axis of gripper (point dir) and each grasp position
point_dir_scores = dir_to_obj.dot(ee_point_dir)
should_disable_collision = ((dist_to_grasp < 0.25) & (point_dir_scores > 0.3)) | (dist_to_grasp < 0.05)
for i, should_disable in enumerate(should_disable_collision):
obj = self.objects[i]
if obj.name not in self.obstacle_enabled:
continue
active = self.obstacle_enabled[obj.name]
if should_disable and active:
self.tools.commander.disable_obstacle(self.objects[i])
self.obstacle_enabled[obj.name] = False
elif not should_disable and not active:
self.tools.commander.enable_obstacle(self.objects[i])
self.obstacle_enabled[obj.name] = True
def disable_all_obstacles(self):
for obj in self.objects:
if obj.name not in self.obstacle_enabled:
continue
active = self.obstacle_enabled[obj.name]
if active:
self.tools.commander.disable_obstacle(obj)
self.obstacle_enabled[obj.name] = False
| 11,573 | Python | 43.344827 | 160 | 0.634494 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/network.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.isaac.cortex.dfb import DfNetwork
from srl.spacemouse.spacemouse import SpaceMouse
from ..ui import AssistanceMode, ControlFrame
from .scene import ContextTools, SceneContext
from .control import ControlDispatch, ControlContext
from .display import DispatchDisplay, DisplayContext
from .select import SelectDispatch, SelectionContext
from typing import Callable
def build_suggestion_display_behavior(tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, selection_context: SelectionContext, label):
return DfNetwork(root=DispatchDisplay(), context=DisplayContext(tools, scene_context, control_context, selection_context, label))
def build_control_behavior(tools: ContextTools,
spacemouse: SpaceMouse,
control_frame: ControlFrame,
scene_context: SceneContext,
assistance_mode: AssistanceMode,
view_change_callback: Callable,
avoid_obstacles: bool):
return DfNetwork(root=ControlDispatch(view_change_callback), context=ControlContext(tools, spacemouse, control_frame, assistance_mode, scene_context, avoid_obstacles))
def build_suggestion_selection_behavior(tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, use_surrogates: bool, use_snapping: bool):
return DfNetwork(root=SelectDispatch(), context=SelectionContext(tools, scene_context, control_context, use_surrogates, use_snapping)) | 1,680 | Python | 53.225805 | 171 | 0.738095 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/display.py | # Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.control import ControlContext
from srl.teleop.assistance.behavior.select import SelectionContext
from srl.teleop.assistance.proposals import InvalidReason, PlanePlaneProposal
from srl.teleop.assistance.transforms import invert_T, transform_dist, unpack_T
from omni.isaac.cortex.df import DfAction, DfDecider, DfDecision, DfLogicalState
import numpy as np
import quaternion
import carb
from srl.teleop.assistance.transforms import T2pq, integrate_twist_stepwise, normalized
from omni.isaac.debug_draw import _debug_draw
import time
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from ..profiling import profile
GRASP_FORK_START_POINTS = np.array(
[[0, -.04, -.04, 1], [0, 0, -.08, 1], [0, -.04, -.04, 1], [0, .04, -.04, 1], [0, 0, -.08, 1]])
GRASP_FORK_END_POINTS = np.array([[0, .04, -.04, 1], [0, 0, -.04, 1], [0., -.04, 0, 1], [0, .04, 0, 1], [0, 0, -.09, 1]])
AXIS_MARKER_STARTS = np.array([[0,0,0, 1], [0,0,0, 1], [0,0,0, 1]])
AXIS_MARKER_ENDS = np.array([[.05,0,0, 1], [0,.05,0, 1], [0,0,.05, 1]])
SNAPPED_COLOR_KEY = np.log(1.0)
SNAPPABLE_COLOR_KEY = np.log(0.6)
AVAILABLE_COLOR_KEY = np.log(0.1)
AVAILABLE_DOT_COLOR_KEY = np.log(0.2)
UNAVAILABLE_COLOR_KEY = float("-inf")
class DisplayContext(DfLogicalState):
def __init__(self, tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, selection_context: SelectionContext, label):
super().__init__()
self.tools = tools
self.scene_context = scene_context
self.control_context = control_context
self.selection_context = selection_context
self.label = label
def get_current_robot_ghost_joint_positions(self) -> np.ndarray:
if not self.tools.robot_ghosts[0].visible:
return np.full((9,), np.NaN)
return self.tools.robot_ghosts[0].get_joint_positions()
def get_current_object_ghost_index_and_pose(self):
for i, obj in enumerate(self.tools.object_ghosts.values()):
if obj.visible:
return i, obj.get_world_pose()
return -1, (np.full((3,), np.NaN), np.full((4,), np.NaN))
class DispatchDisplay(DfDecider):
def __init__(self):
super().__init__()
self.draw = _debug_draw.acquire_debug_draw_interface()
ncolors = 256
color_array = cm.hot(np.linspace(0.,1., ncolors))
# change alpha values
color_array[:,-1] = np.linspace(0.05,0.7,ncolors)
# create a colormap object
self.cm = LinearSegmentedColormap.from_list(name='hot_alpha',colors=color_array)
self.axes_prim = None
self._last_non_empty_command_text = None
self._last_non_empty_command_stamp = -1
def enter(self):
self.add_child("show_grasp", GraspSuggestionDisplayDispatch())
self.add_child("show_placement", PlacementSuggestionDisplayDispatch())
self.add_child("show_plane_placement", PlaneSuggestionDisplayDispatch())
self.add_child("do_nothing", DfAction())
#UsdGeom.Imageable(self.context.tools.commander.target_prim.prim).MakeInvisible()
def draw_cursor_ray(self):
scene_ctx = self.context.scene_context
selection_ctx = self.context.selection_context
if not selection_ctx.cursor_ray:
ee_T = selection_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
# Where is the tip of the gripper pointing
ee_az = ee_T[:3, 2]
gripper_obj_path = scene_ctx.object_in_gripper.prim_path if scene_ctx.object_in_gripper else None
dir = ee_az
origin = ee_p
body, point, normal, dist = selection_ctx.tools.ray_cast(ee_p, ee_az, ignore_obj_handler=lambda path: self.context.tools.should_ignore_in_raycast(path, gripper_obj_path))
else:
origin, dir, dist = selection_ctx.cursor_ray
hit_pos = np.array(origin) + np.array(dir) * dist
self.draw.draw_lines([np.array(origin)], [hit_pos], [(.2,.2,.2, .3)], [4])
self.draw.draw_points([hit_pos], [(1, 1, 1, .6)], [16])
def draw_control_trajectory(self, v, w, v_frame, w_frame):
v_goal = v_frame @ v
w_goal = w_frame @ w
points = integrate_twist_stepwise(v_goal* 3, w_goal * 12, 2, 10)
v_dir = normalized(v_goal)
w_dir = normalized(w_goal)
twist_true = self.context.scene_context.ee_vel_tracker.get_twist()
if twist_true is None:
return
v_true = twist_true[:3]
w_true = twist_true[3:]
v_true_dir = normalized(v_true)
w_true_dir = normalized(w_true)
v_agreement = v_true_dir.dot(v_dir)
w_agreement = w_true_dir.dot(w_dir)
disagreement = 0
if not (np.allclose(v_dir, (0,0,0)) or np.allclose(v_true, (0,0,0))):
disagreement += 1 - np.abs(v_agreement)
else:
# No v goal, disagreement is just magnitude of vel
disagreement += np.linalg.norm(v_true) + np.linalg.norm(v_goal)
if not (np.allclose(w_dir, (0,0,0)) or np.allclose(w_true, (0,0,0))):
disagreement += 1 - np.abs(w_agreement)
else:
disagreement += np.linalg.norm(w_true) + np.linalg.norm(w_goal)
points_h = np.empty((len(points), 4))
points_h[:, :3] = points
points_h[:, 3] = 1
points = (self.context.tools.commander.get_eef_T() @ points_h.T).T[:, :3]
self.draw.draw_lines_spline(points.tolist(), (1.,1. - disagreement,1. - disagreement,.5), 5, False)
def update_command_text_overlay(self, label_models, new_text):
label, bg = label_models
orig_style = label.style
orig_bg_style = bg.style
if new_text == "":
# No command right now. Dim the text, but don't clear it
# until a few seconds have passed
orig_style["color"] = 0x66FFFFFF
if time.time() - self._last_non_empty_command_stamp > 3.0:
self._last_non_empty_command_text = ""
orig_bg_style["background_color"] = 0x22000000
elif new_text != self._last_non_empty_command_text:
self._last_non_empty_command_stamp = time.time()
self._last_non_empty_command_text = new_text
orig_style["color"] = 0xFFFFFFFF
orig_bg_style["background_color"] = 0x33000000
else:
self._last_non_empty_command_stamp = time.time()
label.text = self._last_non_empty_command_text
label.set_style(orig_style)
bg.set_style(orig_bg_style)
def draw_grasp_candidate_distribution(self, Ts, dist, standardize=True):
if dist is None:
return
score_probs = np.exp(dist)
max_prob = np.max(np.abs(score_probs), axis=0)
if max_prob == 0:
return
non_zero_mask = score_probs != 0
if standardize:
pass
n_grasps = sum(non_zero_mask)
n_points = len(GRASP_FORK_START_POINTS)
starts = Ts[non_zero_mask][:,None] @ GRASP_FORK_START_POINTS[None,:, :, None]
starts = np.reshape(starts, (-1, 4))[..., :3]
ends = Ts[non_zero_mask][:,None] @ GRASP_FORK_END_POINTS[None,:, :, None]
ends = np.reshape(ends, (-1, 4))[..., :3]
colors = self.cm(score_probs[non_zero_mask])
colors = np.repeat(colors, n_points, axis=0)
sizes = np.full(n_grasps * n_points, 3)
with profile("draw_call", True):
self.context.tools.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
def draw_grasp_candidate_distribution_aggregated(self, Ts, dist, max_aggregation=True):
if dist is None or Ts is None:
return
if np.max(dist) == float("-inf"):
return
nonzero_mask = dist > float("-inf")
aggregated_Ts = Ts[nonzero_mask].copy()
# Going to aggregate grasps that only differ by flip of the palm
aggregated_Ts[:, :,(0,1)] = np.abs(aggregated_Ts[:, :,(0,1)])
aggregated_Ts[:,:,(0,1,2)] = aggregated_Ts[:,:,(0,1,2)].round(1)
# Round position to 1cm
aggregated_Ts[:,:,3] = aggregated_Ts[:,:,3].round(2)
if max_aggregation:
# Sort before unique to ensure that unique values are in contiguous blocks
sorted_indices = np.lexsort((aggregated_Ts[:,0,3], aggregated_Ts[:,2,3], aggregated_Ts[:,3,3]))
unique,unique_index, unique_inv_ind, unique_counts = np.unique(aggregated_Ts[sorted_indices[:,None], :3, np.array((0,2,3))[None,:]], return_index=True, return_inverse=True, return_counts=True, axis=0)
# Unique counts is the number of repetitions of the returned unique items to, but we want to know the number of repetitions
# for our original lexsorted input
sorted_unique_inv_ind = unique_inv_ind[np.sort(unique_index)]
# Take a max over the contiguous blocks
slice_indices = np.empty((len(unique_counts + 1)), dtype=int)
slice_indices[0] = 0
slice_indices[1:] = unique_counts[sorted_unique_inv_ind].cumsum()[:-1]
score_probs = np.maximum.reduceat(np.exp(dist[nonzero_mask][sorted_indices]),slice_indices)
unique_Ts = Ts[nonzero_mask][sorted_indices][np.sort(unique_index)]
else:
unique,unique_index, unique_inv_ind = np.unique(aggregated_Ts[sorted_indices[:,None], :3, np.array((0,2,3))[None,:]], return_index=True, return_inverse=True, axis=0)
score_probs = np.zeros(len(unique), dtype=float)
# Obscure but useful capability explained here:
# https://stackoverflow.com/questions/55735716/how-to-sum-up-for-each-distinct-value-c-in-array-x-all-elements-yi-where-xi
np.add.at(score_probs, unique_inv_ind, np.exp(dist[nonzero_mask]))
unique_Ts = Ts[nonzero_mask][unique_index]
n_grasps = len(unique_Ts)
n_points = len(GRASP_FORK_START_POINTS)
starts = unique_Ts[:,None] @ GRASP_FORK_START_POINTS[None,:, :, None]
starts = np.reshape(starts, (-1, 4))[..., :3]
ends = unique_Ts[:,None] @ GRASP_FORK_END_POINTS[None,:, :, None]
ends = np.reshape(ends, (-1, 4))[..., :3]
colors = self.cm(score_probs)
colors = np.repeat(colors, n_points, axis=0)
sizes = np.full(n_grasps * n_points, 4)
with profile("draw_call", True):
self.context.tools.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
def draw_placement_distribution_aggregated(self, Ts, dist, max_aggregation=True):
if dist is None or Ts is None:
return
if np.max(dist) == float("-inf"):
return
nonzero_mask = dist > float("-inf")
aggregated_Ts = Ts[nonzero_mask].copy() @ self.context.scene_context.ee_to_obj_T
# Round position to 1cm
aggregated_Ts[:,:,3] = aggregated_Ts[:,:,3].round(2)
if max_aggregation:
sorted_indices = np.lexsort((aggregated_Ts[:,0,3], aggregated_Ts[:,1,3], aggregated_Ts[:,2,3]))
unique,unique_index, unique_inv_ind, unique_counts = np.unique(aggregated_Ts[sorted_indices, :3, 3], return_index=True, return_inverse=True, return_counts=True, axis=0)
# Unique counts is the number of repetitions of the returned unique items to, but we want to know the number of repetitions
# for our original lexsorted input
sorted_unique_inv_ind = unique_inv_ind[np.sort(unique_index)]
slice_indices = np.empty((len(unique_counts + 1)), dtype=int)
slice_indices[0] = 0
slice_indices[1:] = unique_counts[sorted_unique_inv_ind].cumsum()[:-1]
score_probs = np.maximum.reduceat(np.exp(dist[nonzero_mask][sorted_indices]),slice_indices)
unique_Ts = Ts[nonzero_mask][sorted_indices][np.sort(unique_index)]
else:
unique,unique_index, unique_inv_ind = np.unique(aggregated_Ts[sorted_indices :3, 3], return_index=True, return_inverse=True, axis=0)
score_probs = np.zeros(len(unique), dtype=float)
# Obscure but useful capability explained here:
# https://stackoverflow.com/questions/55735716/how-to-sum-up-for-each-distinct-value-c-in-array-x-all-elements-yi-where-xi
np.add.at(score_probs, unique_inv_ind, np.exp(dist[nonzero_mask]))
unique_Ts = Ts[nonzero_mask][unique_index]
n_grasps = len(unique_Ts)
points = unique_Ts[:200,:3, 3]
colors = np.array(self.cm(score_probs)[:200])
sizes = np.full(len(points), 12)
with profile("draw_call", True):
self.context.tools.draw.draw_points(points.tolist(), colors.tolist(), sizes.tolist())
def draw_motion_target_axis(self, T):
starts = np.squeeze(T @ AXIS_MARKER_STARTS[:,:,None])
ends = np.squeeze(T @ AXIS_MARKER_ENDS[:,:,None])
colors = np.array([[1,0,0, .8], [0,1,0, .8], [0,0,1,.8]])
sizes = np.full(len(AXIS_MARKER_STARTS), 10)
self.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
self.draw.draw_points([T[:3,3].tolist()], [[0.3,0.3,0.3,.8]], [16])
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
control_ctx = self.context.control_context
selection_ctx = self.context.selection_context
self.update_command_text_overlay(ctx.label, control_ctx.current_command_text)
self.draw_cursor_ray()
#self.draw_motion_target_axis(pq2T(*ctx.tools.commander.target_prim.get_world_pose()))
placement_proposal = selection_ctx.current_placement_proposal
is_plane_proposal = isinstance(placement_proposal, PlanePlaneProposal)
if control_ctx.user_gave_motion:
trans, rot = control_ctx.command
frame_trans, frame_rot = control_ctx.get_control_frames(control_ctx.control_frame)
linear_vel, angular_vel = control_ctx.control_to_twist(trans, rot)
self.draw_control_trajectory(linear_vel, angular_vel, frame_trans, frame_rot)
if scene_ctx.object_in_gripper is not None:
if scene_ctx.should_suggest_placements:
with profile("viz_placement_dist", True):
props = scene_ctx.tools.placement_table[scene_ctx.object_in_gripper_i]
self.draw_placement_distribution_aggregated(props._poses_world, selection_ctx.placement_distribution, max_aggregation=True)
if placement_proposal is not None:
if is_plane_proposal:
return DfDecision("show_plane_placement", placement_proposal)
else:
return DfDecision("show_placement", placement_proposal)
# There's something in the gripper but no proposal yet.
else:
if scene_ctx.should_suggest_grasps:
with profile("viz_dist", True):
self.draw_grasp_candidate_distribution_aggregated(ctx.tools.grasp_table._poses_world, selection_ctx.grasp_distribution, max_aggregation=True)
return DfDecision("show_grasp")
return DfDecision("do_nothing")
class DisplayGripperSuggestionGhost(DfAction):
def enter(self):
self.currently_showing = None, None
def step(self):
ghost, display_config, color, opacity = self.params
_, current_config = self.currently_showing
if current_config is None or not np.allclose(display_config, current_config):
ghost.set_joint_positions(display_config)
ghost.show(gripper_only=True)
self.currently_showing = ghost, display_config
ghost.set_color(color, opacity)
def exit(self):
ghost, _, _, _ = self.params
ghost.hide()
class DisplayObjectSuggestionGhost(DfAction):
def __init__(self):
self._currently_showing = (None, None)
def enter(self):
pass
def step(self):
ghost, T, color, opacity = self.params
self.set_currently_showing(ghost, T)
ghost.set_color(color, opacity)
def exit(self):
self.set_currently_showing(None, None)
def set_currently_showing(self, ghost, T):
to_show = (ghost, T)
current = self._currently_showing
if to_show == (None, None):
if current != (None, None):
current[0].hide()
else:
# We're trying to show something
if current != (None, None):
# Are we setting the same values as we're currently showing?
if ghost == current[0] and transform_dist(T, current[1], 0.15) < 0.005:
# Idempotent
return
elif ghost != current[0]:
# We're setting a different object so hide the old one
current[0].hide()
p, q = T2pq(T)
ghost.set_world_pose(p, quaternion.as_float_array(q))
ghost.show()
self._currently_showing = to_show
class GraspSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of an existing grasp proposal
"""
def enter(self):
self.add_child("display_grasp_suggestion", DisplayGripperSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
selection_ctx = self.context.selection_context
proposal = selection_ctx.current_grasp_proposal
if proposal is None or not proposal.valid:
return DfDecision("do_nothing")
T = proposal.T_world
if np.any(np.isnan(proposal.joint_config)):
carb.profiler.begin(1, "grasp_display.ik", active=True)
p,q = T2pq(T)
actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=p,
target_orientation=quaternion.as_float_array(q),
)
carb.profiler.end(1, True)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")
else:
proposal.update_eff_goal(T, actions.joint_positions.astype(float)[:-2])
display_config = np.empty(9)
display_config[:7] = proposal.joint_config
# IK Doesn't solve for the fingers. Manually set open values
display_config[7] = 0.04
display_config[8] = 0.04
# First time showing this one?
color = "white"
return DfDecision("display_grasp_suggestion", (ctx.tools.robot_ghosts[0], display_config, color, .4))
class PlacementSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of existing placement proposal
"""
def enter(self):
self.add_child("display_placement_suggestion", DisplayObjectSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
proposal = self.params
placement_T = proposal.get_placement_T()
eff_T_goal = placement_T @ invert_T(scene_ctx.ee_to_obj_T)
eff_pq = T2pq(eff_T_goal)
actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=eff_pq[0],
target_orientation=quaternion.as_float_array(eff_pq[1]),
)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")
eef_T = ctx.tools.commander.get_fk_T()
dist_to_placement = transform_dist(eef_T, eff_T_goal, 0.15)
if dist_to_placement < .02:
return DfDecision("do_nothing")
object_ghost = ctx.tools.object_ghosts[scene_ctx.object_in_gripper.name + "_ghost"]
color = "white"
return DfDecision("display_placement_suggestion", (object_ghost, placement_T, color, .4))
class PlaneSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of existing placement proposal
"""
def enter(self):
self.add_child("display_placement_suggestion", DisplayObjectSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
proposal = self.params
eff_T_goal = proposal.T_world
placement_T = eff_T_goal @ scene_ctx.ee_to_obj_T
eef_T = ctx.tools.commander.get_eef_T()
dist_to_placement = transform_dist(eef_T, eff_T_goal, 0.15)
eff_pq = T2pq(eff_T_goal)
"""actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=eff_pq[0],
target_orientation=quaternion.as_float_array(eff_pq[1]),
)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")"""
if dist_to_placement < .02:
return DfDecision("do_nothing")
object_ghost = ctx.tools.object_ghosts[scene_ctx.object_in_gripper.name + "_ghost"]
color = "white"
return DfDecision("display_placement_suggestion", (object_ghost, placement_T, color, .2))
| 21,459 | Python | 44.952891 | 212 | 0.613356 |
NVlabs/fast-explicit-teleop/config/extension.toml | [core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "SRL Teleop Assist"
description = "Extension for Fast Explicit-Input Assistance for Teleoperation in Clutter"
authors = ["NVIDIA"]
repository = ""
keywords = ["isaac", "teleoperation", "manipulation"]
changelog = "docs/CHANGELOG.md"
readme = "README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
writeTarget.kit = true
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.quicklayout" = {}
"omni.usd" = []
"omni.ui.scene" = {}
"omni.kit.viewport.utility" = {}
"omni.physx" = {}
"omni.isaac.dynamic_control" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
"omni.isaac.cortex" = {}
"omni.isaac.franka" = {}
"omni.isaac.motion_generation" = {}
"srl.spacemouse" = {}
[[python.module]]
name = "srl.teleop.base_sample"
[[python.module]]
name = "srl.teleop.assistance"
[[python.module]]
name = "srl.teleop.analysis"
[[test]]
timeout = 960
[python.pipapi]
requirements = ["numpy", "rtree", "scipy", "trimesh", "h5py"]
use_online_index = true | 1,047 | TOML | 20.387755 | 89 | 0.671442 |
NVlabs/fast-explicit-teleop/docs/CHANGELOG.md | **********
CHANGELOG
**********
| 34 | Markdown | 4.833333 | 10 | 0.264706 |
Kaedim/omniverse-extension/README.md | # Extension Project Template
This project is the kaedim omniverse plugin
It currently supports login functionality and file import
Documentation on how to ise the plugin is linked here: https://docs.google.com/document/d/1fqclE2bfRH_RgXyCNmCKX2DZF5y6ZT9OYANDNyyiY5M/edit?usp=sharing | 286 | Markdown | 39.999994 | 151 | 0.835664 |
Kaedim/omniverse-extension/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
Kaedim/omniverse-extension/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
Kaedim/omniverse-extension/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
Kaedim/omniverse-extension/exts/kaedim.extension/kaedim/extension/extension.py | import omni.ext
import omni.ui as ui
import json
import os
import omni.kit.commands
import omni.usd
from pxr import Sdf
from urllib.request import Request, urlretrieve, urlopen
from urllib.error import URLError
import http.client
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class KaedimExtensionExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def load_credentials(self):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'credentials.json')
# Check if the file exists
if os.path.isfile(filepath):
# File exists, open it and try to read 'devID'
with open(filepath, 'r') as file:
data = json.load(file)
self.devID = data.get('devID', None)
self.apiKey = data.get('apiKey', None)
self.refreshToken = data.get('refreshToken', None)
self.jwt = data.get('jwt', None)
def update_json_file(self, kv_pairs):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'credentials.json')
data = {}
if os.path.isfile(filepath):
with open(filepath, 'r') as f:
data = json.load(f)
# Replace existing keys with new values, or add new keys
for key, value in kv_pairs.items():
data[key] = value
with open(filepath, 'w') as f:
json.dump(data, f, indent=4)
def login(self, devID, apiKey):
conn = http.client.HTTPSConnection("api.kaedim3d.com")
payload = json.dumps({
"devID": devID,
"destination": "https://nvidia.kaedim3d.com/hook"
})
headers = {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
}
conn.request("POST", "/api/v1/registerHook", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
data = json.loads(data)
if data["status"] == "success":
self.jwt = data["jwt"]
return True
return False
def refresh_jwt(self, devID, apiKey, rToken):
print(rToken)
payload = json.dumps({
"devID": devID
})
headers = {
'X-API-Key': apiKey,
'refresh-token': rToken,
'Content-Type': 'application/json'
}
conn = http.client.HTTPSConnection("api.kaedim3d.com")
conn.request("POST", "/api/v1/refreshJWT", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
data = json.loads(data)
if data["status"] == "success":
return data["jwt"]
return None
def login_panel(self, ext_id):
with self._window.frame:
devID = ui.SimpleStringModel()
apiKey = ui.SimpleStringModel()
rToken = ui.SimpleStringModel()
def on_connect():
jwt = ''
res = self.login(devID.as_string, apiKey.as_string)
if res:
label.text = 'Successfully logged in'
jwt = self.refresh_jwt(devID.as_string, apiKey.as_string, rToken.as_string)
if res and jwt is not None:
credentials = {
"devID" : devID.as_string,
"apiKey": apiKey.as_string,
"refreshToken": rToken.as_string,
"jwt": jwt
}
self.devID = devID.as_string
self.apiKey = apiKey.as_string
self.refreshToken = rToken.as_string
self.jwt = jwt
self.update_json_file(credentials)
self.load_ui(ext_id)
label.text = 'Successfully logged in'
else:
label.text = 'Oops! Something went wrong please try'
with ui.VStack():
ui.Label("Please enter your credentials:")
ui.Spacer(height=10)
ui.Label("DevID:")
ui.Spacer(height=5)
ui.StringField(model=devID, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=10)
ui.Label("Api-Key:")
ui.Spacer(height=5)
ui.StringField(model=apiKey, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=10)
ui.Label("Refresh-Token:")
ui.Spacer(height=5)
ui.StringField(model=rToken, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=5)
label = ui.Label("")
ui.Spacer(height=10)
ui.Button("Conect", clicked_fn=on_connect)
def asset_library(self, ext_id):
def import_asset():
asset = self.selected_asset
if not asset or asset is None: return
valid_iterations = [i for i in asset['iterations'] if i['status'] == 'completed' or i['status']=='uploaded']
latest_version = max(valid_iterations, key=lambda x: x['iterationID'])
results = latest_version['results']
name = asset['image_tags'][0]
requestID = asset['requestID']
if type(results) == dict:
file_path = check_and_download_file(requestID, results['obj'], 'obj')
omni.kit.commands.execute("CreateReference",
path_to=Sdf.Path("/World/"+name), # Prim path for where to create the reference
asset_path=file_path, # The file path to reference. Relative paths are accepted too.
usd_context=omni.usd.get_context()
)
def fetch_assets():
conn = http.client.HTTPSConnection("api.kaedim3d.com")
print('data', self.devID, self.apiKey, self.jwt)
payload = json.dumps({
"devID": self.devID
})
headers = {
'X-API-Key': self.apiKey,
'Authorization': self.jwt,
'Content-Type': 'application/json'
}
conn.request("GET", "/api/v1/fetchAll/", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
jwt = ''
if res.status == 401:
self.jwt = self.refresh_jwt(self.devID, self.apiKey, self.refreshToken)
headers["Authorization"] = self.jwt
conn.request("GET", "/api/v1/fetchAll/", payload, headers)
res = conn.getresponse()
data = res.read()
if res.status == 200:
if jwt:
credentials = {"jwt": self.jwt}
self.update_json_file(credentials)
data = json.loads(data)
assets = data["assets"]
asset_library_ui(assets)
if len(assets) <= 0:
print('No assets')
else:
print('Ok')
else:
print('Error')
def check_and_download_file(filename, url, filetype):
# Make sure the folder path exists
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(ext_id)
folder_path = ext_path + "/data"
if not os.path.exists(folder_path):
print(f"The folder {folder_path} does not exist.")
return
file_path = os.path.join(folder_path, filename + '.' + filetype)
# # Check if the file exists
if not os.path.isfile(file_path):
# Download and save the file from the url
try:
urlretrieve(url, file_path)
# print(f"File downloaded and saved as {filename} in the folder {folder_path}.")
except Exception as e:
print(f"Error occurred while downloading the file: {e}")
return file_path
def select_asset(asset):
self.selected_asset = asset
def isCompleted(asset):
completedIterations = [i for i in asset['iterations'] if i['status']=='completed' or i['status']=='uploaded']
return len(completedIterations) > 0
def logout():
emptyCredentials = {'devID':'','apiKey':'','jwt':'','refreshToken':''}
self.update_json_file(emptyCredentials)
self.login_panel(ext_id)
def asset_library_ui(assets):
self.selected_asset = None
with self._window.frame:
with ui.VStack():
with ui.HStack(height=20):
ui.Button('Refresh', height=20, clicked_fn=fetch_assets)
ui.Button('Logout', height=20, clicked_fn=logout)
with ui.ScrollingFrame():
with ui.Grid(ui.Direction(2), column_width=120, row_height=120):
for asset in assets:
url = asset['image'][0]
source_url = check_and_download_file(asset['requestID'], url, 'png')
name = asset['image_tags'][0]
completed = isCompleted(asset)
if not completed: name = name + '\n' + asset['iterations'][len(asset['iterations'])-1]['status']
ui.Button(name, enabled=completed, image_url=source_url, clicked_fn=lambda asset=asset: select_asset(asset))
ui.Button('Import', height=20, clicked_fn=import_asset)
fetch_assets()
def load_ui(self, ext_id):
with self._window.frame:
ui.Button('Load assets', clicked_fn=lambda ext_id=ext_id: self.asset_library(ext_id))
def on_startup(self, ext_id):
self._window = ui.Window("Kaedim Extension", width=300, height=300)
self.jwt = ''
self.load_credentials()
if not self.devID or not self.apiKey or not self.refreshToken:
self.login_panel(ext_id)
else:
print('User already logged in', self.devID)
self.asset_library(ext_id)
def on_shutdown(self):
print("kaedim extension shutdown")
return
| 10,793 | Python | 40.675676 | 140 | 0.523765 |
Kaedim/omniverse-extension/exts/kaedim.extension/kaedim/extension/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
Kaedim/omniverse-extension/exts/kaedim.extension/kaedim/extension/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
Kaedim/omniverse-extension/exts/kaedim.extension/kaedim/extension/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import kaedim.extension
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = kaedim.extension.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,668 | Python | 34.510638 | 142 | 0.682254 |
Kaedim/omniverse-extension/exts/kaedim.extension/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.1"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Kaedim"]
# The title and description fields are primarily for displaying extension info in UI
title = "Kaedim Extension"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "omni.kit.kaedim_importer"
# Keywords for the extension
keywords = ["kit", "example"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/icon.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import kaedim.extension".
[[python.module]]
name = "kaedim.extension"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,588 | TOML | 32.104166 | 118 | 0.746222 |
Kaedim/omniverse-extension/exts/kaedim.extension/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of kaedim omniverse extension with Login and Asset Library functionalities
| 214 | Markdown | 22.888886 | 92 | 0.738318 |
Kaedim/omniverse-extension/exts/kaedim.extension/docs/README.md | # Kaedim Omniverse Etesion [kaedim.extension]
This project is the kaedim omniverse plugin
It currently supports login functionality and file import
Documentation on how to ise the plugin is linked here: https://docs.google.com/document/d/1fqclE2bfRH_RgXyCNmCKX2DZF5y6ZT9OYANDNyyiY5M/edit?usp=sharing
| 304 | Markdown | 37.124995 | 151 | 0.828947 |
Kaedim/omniverse-extension/exts/kaedim.extension/docs/index.rst | kaedim.extension
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"kaedim.extension"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 333 | reStructuredText | 14.904761 | 43 | 0.618619 |
SoulVisionCreations/avataar_omniverse_ext/README.md | # Omniverse Extension
This extension enables users to seamlessly integrate 3D objects created using Avataar Incarnate App on the Omniverse platform.
## Install & Enable the Avataar Extension
https://github.com/SoulVisionCreations/avataar_omniverse_ext/assets/117340215/4f632c56-9236-4a68-b9b1-be225e381f77
## Instructions to add this Extension
To add this extension to your Omniverse app:
1. Go into: `Extension Manager` -> `Hamburger Icon` -> `Settings` -> `Extension Search Path`
2. Add this as a search path: `git://github.com/SoulVisionCreations/avataar_omniverse_ext.git?branch=master&dir=exts`
Alternatively:
1. Download or Clone the extension, unzip the file if downloaded
2. Copy the `exts` folder path within the extension folder
- i.e. `/home/.../avataar_omniverse_ext/exts` (Linux) or `C:/.../avataar_omniverse_ext/ext` (Windows)
3. Go into: `Extension Manager` -> `Hamburger Icon` -> `Settings` -> `Extension Search Path`
4. Add the `exts` folder path as a search path
Make sure no filter is enabled and in both cases you should be able to find the new extension in the `Third Party` tab list.
## Usage
https://github.com/SoulVisionCreations/avataar_omniverse_ext/assets/117340215/c3bf6f78-14c8-4543-ae00-833b643b12d7
| 1,241 | Markdown | 48.679998 | 126 | 0.768735 |
SoulVisionCreations/avataar_omniverse_ext/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
SoulVisionCreations/avataar_omniverse_ext/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
SoulVisionCreations/avataar_omniverse_ext/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/extension.py | import asyncio
import os
from os.path import abspath
from inspect import getsourcefile
from pathlib import Path
import shutil
import urllib.request
import zipfile
import carb
import omni.ext
import omni.ui as ui
import omni.kit.asset_converter
import omni.usd
from pxr import Sdf
import warnings
import usdrt
import time
warnings.filterwarnings("ignore")
class IncarnateOmniverseExtension(omni.ext.IExt):
def progress_callback(self, current_step: int, total: int):
print(f"{current_step} of {total}")
async def convert_asset_to_usd(self, input_asset: str, output_usd: str):
converter_context = omni.kit.asset_converter.AssetConverterContext()
# ... (other configurations)
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(input_asset, output_usd, self.progress_callback, converter_context)
success = await task.wait_until_finished()
if not success:
carb.log_error(task.get_status(), task.get_detailed_error())
print("converting done")
async def on_click_async(self, input_url):
newfolder = os.path.join(self.download_path, input_url.model.get_value_as_string().split("/")[-1][:-4])
os.makedirs(newfolder, exist_ok=True)
self.destination_path = os.path.join(newfolder, input_url.model.get_value_as_string().split("/")[-1])
with urllib.request.urlopen(input_url.model.get_value_as_string()) as response, open(self.destination_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
with zipfile.ZipFile(self.destination_path, 'r') as zip_ref:
zip_ref.extractall(newfolder)
await self.convert_asset_to_usd(os.path.join(newfolder,'obj', input_url.model.get_value_as_string().split("/")[-1][:-4] + ".obj"), os.path.join(newfolder, input_url.model.get_value_as_string().split("/")[-1][:-4] + ".usd"))
object_id = input_url.model.get_value_as_string().split("/")[-1][:-4]
asset_path = os.path.join(self.download_path, object_id, f'{object_id}.usd')
omni.kit.commands.execute(
'CreatePayloadCommand',
usd_context=omni.usd.get_context(),
path_to=Sdf.Path(f'/World/{object_id}'),
asset_path=asset_path,
instanceable=False)
self.objects.append(object_id)
def on_startup(self, ext_id):
print("[incarnate.omniverse] incarnate omniverse startup")
self.objects = []
self._count = 0
cur_path = Path(abspath(getsourcefile(lambda:1)))
self.currentdirectory = str(cur_path.parent.absolute())
self.download_path = os.path.join(self.currentdirectory,"downloads")
os.makedirs(self.download_path, exist_ok=True)
self._window = ui.Window("Incarnate Avataar Extension", width=300, height=200)
with self._window.frame:
with ui.VStack():
ui.Label("Enter mesh link from Avataar Creator")
self.user_profile = os.path.expanduser("~")
input_f = ui.StringField()
ui.Button("Import and View", clicked_fn=lambda: asyncio.ensure_future(self.on_click_async(input_f)))
def on_shutdown(self):
omni.kit.commands.execute(
'DeletePrimsCommand',
paths=[Sdf.Path(f'/World/{object_id}') for object_id in self.objects])
objs = os.listdir(self.download_path)
for obj in objs:
try:
shutil.rmtree(os.path.join(self.download_path,obj))
except:
print("Unable to delete")
print("[incarnate.omniverse] incarnate omniverse shutdown")
| 3,677 | Python | 39.417582 | 231 | 0.643731 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/assetconverter.py |
import carb
import omni
import omni.kit.asset_converter
# Progress of processing.
def progress_callback (current_step: int, total: int):
# Show progress
print(f"{current_step} of {total}")
# Convert asset file(obj/fbx/glTF, etc) to usd.
async def convert_asset_to_usd (input_asset: str, output_usd: str):
# Input options are defaults.
converter_context = omni.kit.asset_converter.AssetConverterContext()
converter_context.ignore_materials = False
converter_context.ignore_camera = False
converter_context.ignore_animations = False
converter_context.ignore_light = False
converter_context.export_preview_surface = False
converter_context.use_meter_as_world_unit = False
converter_context.create_world_as_default_root_prim = True
converter_context.embed_textures = True
converter_context.convert_fbx_to_y_up = False
converter_context.convert_fbx_to_z_up = False
converter_context.merge_all_meshes = False
converter_context.use_double_precision_to_usd_transform_op = False
converter_context.ignore_pivots = False
converter_context.keep_all_materials = True
converter_context.smooth_normals = True
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(input_asset, output_usd, progress_callback, converter_context)
# Wait for completion.
success = await task.wait_until_finished()
if not success:
carb.log_error(task.get_status(), task.get_detailed_error())
print("converting done") | 1,501 | Python | 39.594594 | 103 | 0.746835 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import incarnate.omniverse
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = incarnate.omniverse.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,674 | Python | 34.638297 | 142 | 0.683393 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Avataar"]
# The title and description fields are primarily for displaying extension info in UI
title = "incarnate avataar extension"
description="A python extension to view to view object captures made via incarnate onto Omniverse"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Capture App"
# Keywords for the extension
keywords = ["capture app", "incarnate", "mesh"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/new_icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import incarnate.omniverse".
[[python.module]]
name = "incarnate.omniverse"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,621 | TOML | 32.791666 | 118 | 0.74892 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.