file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
KallPap/FRL-SHAC-Extension/dflex/tests/test_beam.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Beam:
sim_duration = 3.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 1.0
def __init__(self, device='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
builder.add_soft_grid(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=2,
dim_z=2,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=10.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=5.0,
fix_left=True,
fix_right=True)
self.model = builder.finalize(device)
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.particle_radius = 0.05
self.model.ground = False
self.target = torch.tensor((-0.5)).to(device)
self.material = torch.tensor((100.0, 50.0, 5.0), requires_grad=True, device=device)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/beam.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# clamp material params to reasonable range
mat_min = torch.tensor((1.e+1, 1.e+1, 5.0), device=self.model.adapter)
mat_max = torch.tensor((1.e+5, 1.e+5, 5.0), device=self.model.adapter)
mat_val = torch.max(torch.min(mat_max, self.material), mat_min)
# broadcast stiffness params to all tets
self.model.tet_materials = mat_val.expand((self.model.tet_count, 3)).contiguous()
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_q, 0)
# minimize y
loss = loss - torch.norm(com_loss[1] - self.target)
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [
self.material,
]
def closure():
if optimizer:
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(self.material)
print(self.material.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for param in params:
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.5, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
beam = Beam(device='cpu')
#beam.run()
#beam.train('lbfgs')
beam.train('gd')
| 6,623 | Python | 28.704036 | 141 | 0.495697 |
KallPap/FRL-SHAC-Extension/dflex/tests/test_rigid_bounce.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class RigidBounce:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = True
name = "rigid_bounce"
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
builder.add_articulation()
# add sphere
link = builder.add_link(-1, df.transform((0.0, 0.0, 0.0), df.quat_identity()), (0,0,0), df.JOINT_FREE)
shape = builder.add_shape_sphere(
link,
(0.0, 0.0, 0.0),
df.quat_identity(),
radius=0.1,
ke=1.e+4,
kd=10.0,
kf=1.e+2,
mu=0.25)
builder.joint_q[1] = 1.0
#v_s = df.get_body_twist((0.0, 0.0, 0.0), (1.0, -1.0, 0.0), builder.joint_q[0:3])
w_m = (0.0, 0.0, 3.0) # angular velocity (expressed in world space)
v_m = (0.0, 0.0, 0.0) # linear velocity at center of mass (expressed in world space)
p_m = builder.joint_q[0:3] # position of the center of mass (expressed in world space)
# set body0 twist
builder.joint_qd[0:6] = df.get_body_twist(w_m, v_m, p_m)
# get decomposed velocities
print(df.get_body_angular_velocity(builder.joint_qd[0:6]))
print(df.get_body_linear_velocity(builder.joint_qd[0:6], p_m))
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
# initial velocity
#self.model.joint_qd[3] = 0.5
#self.model.joint_qd[4] = -0.5
#self.model.joint_qd[2] = 1.0
self.model.joint_qd.requires_grad_()
self.target = torch.tensor((1.0, 1.0, 0.0), dtype=torch.float32, device=adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
for i in range(0, self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
#loss = loss + torch.dot(self.state.joint_qd[3:6], self.state.joint_qd[3:6])*self.balance_penalty*discount
pos = self.state.joint_q[0:3]
loss = torch.norm(pos-self.target)
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.model.joint_qd
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.model.joint_qd.grad.tolist()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.model.joint_qd]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.model.joint_qd, "outputs/" + self.name + ".pt")
def load(self):
self.model.joint_qd = torch.load("outputs/" + self.name + ".pt")
#---------
robot = RigidBounce(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.check_grad = True
#df.config.no_grad = True
robot.run()
#df.config.verify_fp = True
#robot.load()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e-3)
| 8,881 | Python | 27.196825 | 118 | 0.516158 |
KallPap/FRL-SHAC-Extension/dflex/tests/test_rigid_slide.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
from pxr import Usd, UsdGeom, Gf
class RigidSlide:
sim_duration = 3.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.1
discount_scale = 1.0
discount_factor = 0.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
# load mesh
usd = Usd.Stage.Open("assets/suzanne.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Suzanne/Suzanne"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
builder = df.sim.ModelBuilder()
mesh = df.sim.Mesh(points, indices)
articulation = builder.add_articulation()
rigid = builder.add_link(
parent=-1,
X_pj=df.transform((0.0, 0.0, 0.0), df.quat_identity()),
axis=(0.0, 0.0, 0.0),
type=df.JOINT_FREE)
ke = 1.e+4
kd = 1.e+3
kf = 1.e+3
mu = 0.5
# shape = builder.add_shape_mesh(
# rigid,
# mesh=mesh,
# scale=(0.2, 0.2, 0.2),
# density=1000.0,
# ke=1.e+4,
# kd=1000.0,
# kf=1000.0,
# mu=0.75)
radius = 0.1
#shape = builder.add_shape_sphere(rigid, pos=(0.0, 0.0, 0.0), ke=ke, kd=kd, kf=kf, mu=mu, radius=radius)
#shape = builder.add_shape_capsule(rigid, pos=(0.0, 0.0, 0.0), radius=radius, half_width=0.5)
shape = builder.add_shape_box(rigid, pos=(0.0, 0.0, 0.0), hx=radius, hy=radius, hz=radius, ke=ke, kd=kd, kf=kf, mu=mu)
builder.joint_q[1] = radius
self.model = builder.finalize(adapter)
self.model.joint_qd.requires_grad = True
self.vel = torch.tensor((1.0, 0.0, 0.0), dtype=torch.float32, device=adapter, requires_grad=True)
self.target = torch.tensor((3.0, 0.2, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/rigid_slide.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#---------------
# run simulation
# construct contacts once at startup
self.model.joint_qd = torch.cat((torch.tensor((0.0, 0.0, 0.0), dtype=torch.float32, device=self.model.adapter), self.vel))
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#com = self.state.joint_q[0:3]
com = self.state.body_X_sm[0, 0:3]
loss = loss + torch.norm(com - self.target)
return loss
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.vel]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
rigid = RigidSlide(adapter='cpu')
#rigid.run()
rigid.train('adam')
| 7,018 | Python | 28.124481 | 141 | 0.526503 |
KallPap/FRL-SHAC-Extension/dflex/tests/test_snu_mlp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class MultiLayerPerceptron(nn.Module):
def __init__(self, n_in, n_out, n_hd, adapter, inference=False):
super(MultiLayerPerceptron,self).__init__()
self.n_in = n_in
self.n_out = n_out
self.n_hd = n_hd
#self.ll = nn.Linear(n_in, n_out)
self.fc1 = nn.Linear(n_in, n_hd).to(adapter)
self.fc2 = nn.Linear(n_hd, n_hd).to(adapter)
self.fc3 = nn.Linear(n_hd, n_out).to(adapter)
self.bn1 = nn.LayerNorm(n_in, elementwise_affine=False).to(adapter)
self.bn2 = nn.LayerNorm(n_hd, elementwise_affine=False).to(adapter)
self.bn3 = nn.LayerNorm(n_out, elementwise_affine=False).to(adapter)
def forward(self, x: torch.Tensor):
x = F.leaky_relu(self.bn2(self.fc1(x)))
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = torch.tanh(self.bn3(self.fc3(x))-2.0)
return x
class HumanoidSNU:
train_iters = 100000000
train_rate = 0.001
train_size = 128
train_batch_size = 4
train_batch_iters = 128
train_batch_count = int(train_size/train_batch_size)
train_data = None
ground = True
name = "humanoid_snu_lower"
regularization = 1.e-3
inference = False
initial_y = 1.0
def __init__(self, depth=1, mode='numpy', render=True, sim_duration=1.0, adapter='cpu', inference=False):
self.sim_duration = sim_duration # seconds
self.sim_substeps = 16
self.sim_dt = (1.0 / 60.0) / self.sim_substeps
self.sim_steps = int(self.sim_duration / self.sim_dt)
self.sim_time = 0.0
torch.manual_seed(41)
np.random.seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL" }
self.ground = False
if self.name == "humanoid_snu_lower":
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.ground = True
self.initial_y = 1.0
if self.name == "humanoid_snu":
self.filter = {}
self.ground = True
self.skeletons = []
self.inference = inference
# if (self.inference):
# self.train_batch_size = 1
for i in range(self.train_batch_size):
skeleton = test_util.Skeleton("assets/snu/arm.xml", "assets/snu/muscle284.xml", builder, self.filter)
# set initial position 1m off the ground
builder.joint_q[skeleton.coord_start + 0] = i*1.5
builder.joint_q[skeleton.coord_start + 1] = self.initial_y
# offset on z-axis
#builder.joint_q[skeleton.coord_start + 2] = 10.0
# initial velcoity
#builder.joint_qd[skeleton.dof_start + 5] = 3.0
self.skeletons.append(skeleton)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
#self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.network = MultiLayerPerceptron(3, len(self.skeletons[0].muscles), 128, adapter)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
# generate training data
targets = []
for i in range(self.train_size):
# generate a random point in -1, 1 away from the head
t = np.random.rand(2)*2.0 - 1.0
t[1] += 0.5
targets.append((t[0], t[1] + 0.5, 1.0))
self.train_data = torch.tensor(targets, dtype=torch.float32, device=self.adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target(torch.tensor((0.75, 0.4, 0.5), dtype=torch.float32, device=self.adapter), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = x
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name, self.render_time)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# apply actions
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# compute activations for each target in the batch
targets = self.train_data[0:self.train_batch_size]
activations = torch.flatten(self.network(targets))
self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# one time collision
self.model.collide(self.state)
for i in range(self.sim_steps):
# apply random actions per-frame
#self.model.muscle_activation = (activations*0.5 + 0.5 + torch.rand_like(activations,dtype=torch.float32, device=self.model.adapter))*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
#if self.inference:
#x = math.cos(self.sim_time*0.5)*0.5
#y = math.sin(self.sim_time*0.5)*0.5
# t = self.sim_time*0.5
# x = math.sin(t)*0.5
# y = math.sin(t)*math.cos(t)*0.5
# self.set_target(torch.tensor((x, y + 0.5, 1.0), dtype=torch.float32, device=self.adapter), "target")
# activations = self.network(self.target)
# self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):#.self.model.muscle_count):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
for i in range(self.train_batch_size):
skel = self.skeletons[i]
head_pos = self.state.body_X_sc[skel.node_map["Head"]][0:3]
head_forward = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], up_dir)
target_dir = self.train_data[i] - head_pos
loss_forward = torch.dot(head_forward, target_dir)*self.target_penalty
loss_up = torch.dot(head_up, up_dir)*self.target_penalty*0.5
loss_penalty = torch.dot(activations, activations)*self.action_penalty
loss = loss - loss_forward - loss_up + loss_penalty
#self.writer.add_scalar("loss_forward", loss_forward.item(), self.step_count)
#self.writer.add_scalar("loss_up", loss_up.item(), self.step_count)
#self.writer.add_scalar("loss_penalty", loss_penalty.item(), self.step_count)
return loss
def run(self):
df.config.no_grad = True
self.inference = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
self.writer = SummaryWriter()
self.writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.best_loss = math.inf
optimizer = None
scheduler = None
params = self.network.parameters()#[self.activations]
def closure():
batch = int(self.step_count/self.train_batch_iters)%self.train_batch_count
print("Batch: " + str(batch) + " Iter: " + str(self.step_count%self.train_batch_iters))
if (optimizer):
optimizer.zero_grad()
# compute loss on all examples
with df.ScopedTimer("forward"):#, detailed=True):
l = self.loss()
# compute gradient
with df.ScopedTimer("backward"):#, detailed=True):
l.backward()
# batch stats
self.writer.add_scalar("loss_batch", l.item(), self.step_count)
self.writer.flush()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
self.stage.Save()
except:
print("USD save error")
# save network
if (l < self.best_loss):
self.save()
self.best_loss = l
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
last_LR = 1e-5
init_LR = 1e-3
decay_LR_steps = 2000
gamma = math.exp(math.log(last_LR/init_LR)/decay_LR_steps)
optimizer = torch.optim.Adam(params, lr=self.train_rate, weight_decay=1e-5)
#scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = gamma)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
if optimizer:
optimizer.step(closure)
if scheduler:
scheduler.step()
# final save
try:
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self, suffix=""):
self.network = torch.load("outputs/" + self.name + suffix + ".pt")
if self.inference:
self.network.eval()
else:
self.network.train()
#---------
#env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda')
#env.train(mode='adam')
env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda', inference=True)
#env.load()
env.run()
| 17,357 | Python | 32.445087 | 235 | 0.526358 |
KallPap/FRL-SHAC-Extension/dflex/tests/test_allegro.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "allegro"
regularization = 1.e-3
env_count = 1
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# allegro
for i in range(self.env_count):
test_util.urdf_load(
builder,
#"assets/franka_description/robots/franka_panda.urdf",
"assets/allegro_hand_description/allegro_hand_description_right.urdf",
df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi*0.5)),
floating=False,
limit_ke=0.0,#1.e+3,
limit_kd=0.0)#1.e+2)
# set fingers to mid-range of their limits
for i in range(len(builder.joint_q_start)):
if (builder.joint_type[i] == df.JOINT_REVOLUTE):
dof = builder.joint_q_start[i]
mid = (builder.joint_limit_lower[dof] + builder.joint_limit_upper[dof])*0.5
builder.joint_q[dof] = mid
builder.joint_target[dof] = mid
builder.joint_target_kd[i] = 0.02
builder.joint_target_ke[i] = 1.0
solid = False
# create FEM block
if (solid):
builder.add_soft_grid(
pos=(-0.05, 0.2, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
vel=(0.0, 0.0, 0.0),
dim_x=10,
dim_y=5,
dim_z=5,
cell_x=0.01,
cell_y=0.01,
cell_z=0.01,
density=1000.0,
k_mu=500.0,
k_lambda=1000.0,
k_damp=1.0)
else:
builder.add_cloth_grid(
pos=(-0.1, 0.2, -0.1),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi*0.5),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=20,
cell_x=0.01,
cell_y=0.01,
mass=0.0125)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.contact_ke = 1.e+3
self.model.contact_kd = 2.0
self.model.contact_kf = 0.1
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
if (solid):
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
else:
self.model.tri_ke = 100.0
self.model.tri_ka = 100.0
self.model.tri_kd = 1.0
self.model.tri_kb = 0.0
self.model.edge_ke = 0.01
self.model.edge_kd = 0.001
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
robot.run()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 10,608 | Python | 27.986339 | 165 | 0.488028 |
KallPap/FRL-SHAC-Extension/dflex/tests/test_adjoint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
| 626 | Python | 25.124999 | 82 | 0.785942 |
KallPap/FRL-SHAC-Extension/dflex/tests/assets/humanoid.xml | <!-- ======================================================
This file is part of MuJoCo.
Copyright 2009-2015 Roboti LLC.
Model :: Humanoid
Mujoco :: Advanced physics simulation engine
Source : www.roboti.us
Version : 1.31
Released : 23Apr16
Author :: Vikash Kumar
Contacts : [email protected]
Last edits : 30Apr'16, 30Nov'15, 26Sept'15
====================================================== -->
<mujoco model='humanoid (v1.31)'>
<compiler inertiafromgeom='true' angle='degree'/>
<default>
<joint limited='true' damping='1' armature='0' />
<geom contype='1' conaffinity='1' condim='1' rgba='0.8 0.6 .4 1'
margin="0.001" solref=".02 1" solimp=".8 .8 .01" material="geom"/>
<motor ctrlrange='-.4 .4' ctrllimited='true'/>
</default>
<option timestep='0.002' iterations="50" solver="PGS">
<flag energy="enable"/>
</option>
<size nkey='5'/>
<visual>
<map fogstart="3" fogend="5" force="0.1"/>
<quality shadowsize="2048"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1=".4 .6 .8"
rgb2="0 0 0"/>
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278"
rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
<texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2"
width="100" height="100"/>
<material name='MatPlane' reflectance='0.5' texture="texplane" texrepeat="1 1" texuniform="true"/>
<material name='geom' texture="texgeom" texuniform="true"/>
</asset>
<worldbody>
<geom name='floor' pos='0 0 0' size='10 10 0.125' type='plane' material="MatPlane" condim='3'/>
<body name='torso' pos='0 0 1.4'>
<light mode='trackcom' directional='false' diffuse='.8 .8 .8' specular='0.3 0.3 0.3' pos='0 0 4.0' dir='0 0 -1'/>
<joint name='root' type='free' pos='0 0 0' limited='false' damping='0' armature='0' stiffness='0'/>
<geom name='torso1' type='capsule' fromto='0 -.07 0 0 .07 0' size='0.07' />
<geom name='head' type='sphere' pos='0 0 .19' size='.09'/>
<geom name='uwaist' type='capsule' fromto='-.01 -.06 -.12 -.01 .06 -.12' size='0.06'/>
<body name='lwaist' pos='-.01 0 -0.260' quat='1.000 0 -0.002 0' >
<geom name='lwaist' type='capsule' fromto='0 -.06 0 0 .06 0' size='0.06' />
<joint name='abdomen_z' type='hinge' pos='0 0 0.065' axis='0 0 1' range='-45 45' damping='5' stiffness='20' armature='0.02' />
<joint name='abdomen_y' type='hinge' pos='0 0 0.065' axis='0 1 0' range='-75 30' damping='5' stiffness='10' armature='0.02' />
<body name='pelvis' pos='0 0 -0.165' quat='1.000 0 -0.002 0' >
<joint name='abdomen_x' type='hinge' pos='0 0 0.1' axis='1 0 0' range='-35 35' damping='5' stiffness='10' armature='0.02' />
<geom name='butt' type='capsule' fromto='-.02 -.07 0 -.02 .07 0' size='0.09' />
<body name='right_thigh' pos='0 -0.1 -0.04' >
<joint name='right_hip_x' type='hinge' pos='0 0 0' axis='1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_z' type='hinge' pos='0 0 0' axis='0 0 1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='right_thigh1' type='capsule' fromto='0 0 0 0 0.01 -.34' size='0.06' />
<body name='right_shin' pos='0 0.01 -0.403' >
<joint name='right_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='right_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='right_foot' pos='0 0 -.39' >
<joint name='right_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='right_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='right_foot_cap1' type='capsule' fromto='-.07 -0.02 0 0.14 -0.04 0' size='0.027' />
<geom name='right_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 0.02 0' size='0.027' />
</body>
</body>
</body>
<body name='left_thigh' pos='0 0.1 -0.04' >
<joint name='left_hip_x' type='hinge' pos='0 0 0' axis='-1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_z' type='hinge' pos='0 0 0' axis='0 0 -1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='left_thigh1' type='capsule' fromto='0 0 0 0 -0.01 -.34' size='0.06' />
<body name='left_shin' pos='0 -0.01 -0.403' >
<joint name='left_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='left_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='left_foot' pos='0 0 -.39' >
<joint name='left_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='left_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='left_foot_cap1' type='capsule' fromto='-.07 0.02 0 0.14 0.04 0' size='0.027' />
<geom name='left_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 -0.02 0' size='0.027' />
</body>
</body>
</body>
</body>
</body>
<body name='right_upper_arm' pos='0 -0.17 0.06' >
<joint name='right_shoulder1' type='hinge' pos='0 0 0' axis='2 1 1' range='-85 60' stiffness='1' armature='0.0068' />
<joint name='right_shoulder2' type='hinge' pos='0 0 0' axis='0 -1 1' range='-85 60' stiffness='1' armature='0.0051' />
<geom name='right_uarm1' type='capsule' fromto='0 0 0 .16 -.16 -.16' size='0.04 0.16' />
<body name='right_lower_arm' pos='.18 -.18 -.18' >
<joint name='right_elbow' type='hinge' pos='0 0 0' axis='0 -1 1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='right_larm' type='capsule' fromto='0.01 0.01 0.01 .17 .17 .17' size='0.031' />
<geom name='right_hand' type='sphere' pos='.18 .18 .18' size='0.04'/>
</body>
</body>
<body name='left_upper_arm' pos='0 0.17 0.06' >
<joint name='left_shoulder1' type='hinge' pos='0 0 0' axis='2 -1 1' range='-60 85' stiffness='1' armature='0.0068' />
<joint name='left_shoulder2' type='hinge' pos='0 0 0' axis='0 1 1' range='-60 85' stiffness='1' armature='0.0051' />
<geom name='left_uarm1' type='capsule' fromto='0 0 0 .16 .16 -.16' size='0.04 0.16' />
<body name='left_lower_arm' pos='.18 .18 -.18' >
<joint name='left_elbow' type='hinge' pos='0 0 0' axis='0 -1 -1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='left_larm' type='capsule' fromto='0.01 -0.01 0.01 .17 -.17 .17' size='0.031' />
<geom name='left_hand' type='sphere' pos='.18 -.18 .18' size='0.04'/>
</body>
</body>
</body>
</worldbody>
<tendon>
<fixed name='left_hipknee'>
<joint joint='left_hip_y' coef='-1'/>
<joint joint='left_knee' coef='1'/>
</fixed>
<fixed name='right_hipknee'>
<joint joint='right_hip_y' coef='-1'/>
<joint joint='right_knee' coef='1'/>
</fixed>
</tendon>
<keyframe>
<key qpos='-0.0233227 0.00247283 0.0784829 0.728141 0.00223397 -0.685422 -0.00181805 -0.000580139 -0.245119 0.0329713 -0.0461148 0.0354257 0.252234 -0.0347763 -0.4663 -0.0313013 0.0285638 0.0147285 0.264063 -0.0346441 -0.559198 0.021724 -0.0333332 -0.718563 0.872778 0.000260393 0.733088 0.872748' />
<key qpos='0.0168601 -0.00192002 0.127167 0.762693 0.00191588 0.646754 -0.00210291 -0.000199049 0.0573113 -4.05731e-005 0.0134177 -0.00468944 0.0985945 -0.282695 -0.0469067 0.00874203 0.0263262 -0.00295056 0.0984851 -0.282098 -0.044293 0.00475795 0.127371 -0.42895 0.882402 -0.0980573 0.428506 0.88193' />
<key qpos='0.000471586 0.0317577 0.210587 0.758805 -0.583984 0.254155 0.136322 -0.0811633 0.0870309 -0.0935227 0.0904958 -0.0278004 -0.00978614 -0.359193 0.139761 -0.240168 0.060149 0.237062 -0.00622109 -0.252598 -0.00376874 -0.160597 0.25253 -0.278634 0.834376 -0.990444 -0.169065 0.652876' />
<key qpos='-0.0602175 0.048078 0.194579 -0.377418 -0.119412 -0.675073 -0.622553 0.139093 0.0710746 -0.0506027 0.0863461 0.196165 -0.0276685 -0.521954 -0.267784 0.179051 0.0371897 0.0560134 -0.032595 -0.0480022 0.0357436 0.108502 0.963806 0.157805 0.873092 -1.01145 -0.796409 0.24736' />
</keyframe>
<actuator>
<motor name='abdomen_y' gear='200' joint='abdomen_y' />
<motor name='abdomen_z' gear='200' joint='abdomen_z' />
<motor name='abdomen_x' gear='200' joint='abdomen_x' />
<motor name='right_hip_x' gear='200' joint='right_hip_x' />
<motor name='right_hip_z' gear='200' joint='right_hip_z' />
<motor name='right_hip_y' gear='600' joint='right_hip_y' />
<motor name='right_knee' gear='400' joint='right_knee' />
<motor name='right_ankle_x' gear='100' joint='right_ankle_x' />
<motor name='right_ankle_y' gear='100' joint='right_ankle_y' />
<motor name='left_hip_x' gear='200' joint='left_hip_x' />
<motor name='left_hip_z' gear='200' joint='left_hip_z' />
<motor name='left_hip_y' gear='600' joint='left_hip_y' />
<motor name='left_knee' gear='400' joint='left_knee' />
<motor name='left_ankle_x' gear='100' joint='left_ankle_x' />
<motor name='left_ankle_y' gear='100' joint='left_ankle_y' />
<motor name='right_shoulder1' gear='100' joint='right_shoulder1' />
<motor name='right_shoulder2' gear='100' joint='right_shoulder2' />
<motor name='right_elbow' gear='200' joint='right_elbow' />
<motor name='left_shoulder1' gear='100' joint='left_shoulder1' />
<motor name='left_shoulder2' gear='100' joint='left_shoulder2' />
<motor name='left_elbow' gear='200' joint='left_elbow' />
</actuator>
</mujoco>
| 11,517 | XML | 68.385542 | 314 | 0.528784 |
KallPap/FRL-SHAC-Extension/dflex/tests/assets/ant.xml | <mujoco model="ant">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.001" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<worldbody>
<body name="torso" pos="0 0 0.75">
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="150"/>
</actuator>
</mujoco> | 4,043 | XML | 61.215384 | 125 | 0.550829 |
KallPap/FRL-SHAC-Extension/dflex/docs/index.rst | Welcome to dFlex's documentation!
==================================
dFlex is a differentiable multiphysics engine for PyTorch. It is written entirely in Python and supports reverse mode differentiation w.r.t. to any simulation inputs.
It includes a USD-based visualization module (:class:`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly.
Prerequisites
-------------
* Python 3.6
* PyTorch 1.4.0 or higher
* Pixar USD lib (for visualization)
Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable.
.. toctree::
:maxdepth: 3
:caption: Contents:
modules/model
modules/sim
modules/render
Quick Start
-----------------
First ensure that the package is installed in your local Python environment (use the -e option if you will be doing development):
.. code-block::
pip install -e dflex
Then, to use the engine you can import the simulation module as follows:
.. code-block::
import dflex
To build physical models there is a helper class available in :class:`dflex.model.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles:
.. code-block::
builder = dflex.model.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1,10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
# add ground plane
builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0)
Once you have built your model you must convert it to a finalized PyTorch simulation data structure using :func:`dflex.model.ModelBuilder.finalize()`:
.. code-block::
model = builder.finalize('cpu')
The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state.
Time Stepping
-------------
To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the :class:`dflex.sim.SemiImplicitIntegrator` class as follows:
.. code-block::
sim_dt = 1.0/60.0
sim_steps = 100
integrator = dflex.sim.SemiImplicitIntegrator()
for i in range(0, sim_steps):
state = integrator.forward(model, state, sim_dt)
Rendering
---------
To visualize the scene dFlex supports a USD-based update via. the :class:`dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model.
.. code-block::
import dflex.render
stage = Usd.Stage.CreateNew("test.usda")
renderer = dflex.render.UsdRenderer(model, stage)
renderer.draw_points = True
renderer.draw_springs = True
renderer.draw_shapes = True
Each frame the renderer should be updated with the current model state and the current elapsed simulation time:
.. code-block::
renderer.update(state, sim_time)
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 3,311 | reStructuredText | 27.8 | 228 | 0.700393 |
KallPap/FRL-SHAC-Extension/dflex/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../dflex'))
# -- Project information -----------------------------------------------------
project = 'dFlex'
copyright = '2020, NVIDIA'
author = 'NVIDIA'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
'sphinx.ext.todo',
'autodocsumm'
]
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = 'description'
# document class *and* __init__ methods
autoclass_content = 'both' #
todo_include_todos = True
intersphinx_mapping = {
'python': ("https://docs.python.org/3", None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'PyTorch': ('http://pytorch.org/docs/master/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 2,515 | Python | 32.105263 | 81 | 0.659245 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/sim.rst | dflex.sim
===========
.. currentmodule:: dflex.sim
.. toctree::
:maxdepth: 2
.. automodule:: dflex.sim
:members:
:undoc-members:
:show-inheritance:
| 171 | reStructuredText | 12.230768 | 28 | 0.567251 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/model.rst | dflex.model
===========
.. currentmodule:: dflex.model
.. toctree::
:maxdepth: 2
model.modelbuilder
model.model
model.state
| 151 | reStructuredText | 10.692307 | 30 | 0.569536 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/model.model.rst | dflex.model.Model
========================
.. autoclasssumm:: dflex.model.Model
.. autoclass:: dflex.model.Model
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/render.rst | dflex.render
============
.. currentmodule:: dflex.render
.. toctree::
:maxdepth: 2
.. automodule:: dflex.render
:members:
:undoc-members:
:show-inheritance:
| 178 | reStructuredText | 11.785713 | 31 | 0.595506 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/model.state.rst | dflex.model.State
========================
.. autoclasssumm:: dflex.model.State
.. autoclass:: dflex.model.State
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
KallPap/FRL-SHAC-Extension/dflex/docs/modules/model.modelbuilder.rst | dflex.model.ModelBuilder
========================
.. autoclasssumm:: dflex.model.ModelBuilder
.. autoclass:: dflex.model.ModelBuilder
:members:
:undoc-members:
:show-inheritance:
| 194 | reStructuredText | 16.727271 | 43 | 0.628866 |
KallPap/FRL-SHAC-Extension/utils/common.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
# if there's overlap between args_list and commandline input, use commandline input
def solve_argv_conflict(args_list):
arguments_to_be_removed = []
arguments_size = []
for argv in sys.argv[1:]:
if argv.startswith('-'):
size_count = 1
for i, args in enumerate(args_list):
if args == argv:
arguments_to_be_removed.append(args)
for more_args in args_list[i+1:]:
if not more_args.startswith('-'):
size_count += 1
else:
break
arguments_size.append(size_count)
break
for args, size in zip(arguments_to_be_removed, arguments_size):
args_index = args_list.index(args)
for _ in range(size):
args_list.pop(args_index)
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
raise RuntimeError
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
from datetime import datetime
def get_time_stamp():
now = datetime.now()
year = now.strftime('%Y')
month = now.strftime('%m')
day = now.strftime('%d')
hour = now.strftime('%H')
minute = now.strftime('%M')
second = now.strftime('%S')
return '{}-{}-{}-{}-{}-{}'.format(month, day, year, hour, minute, second)
import argparse
def parse_model_args(model_args_path):
fp = open(model_args_path, 'r')
model_args = eval(fp.read())
model_args = argparse.Namespace(**model_args)
return model_args
import torch
import numpy as np
import random
import os
def seeding(seed=0, torch_deterministic=False):
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed | 2,965 | Python | 31.23913 | 91 | 0.629005 |
KallPap/FRL-SHAC-Extension/utils/torch_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
# torch quat/vector utils
def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False):
return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([x, y, z, w], dim=-1).view(shape)
return quat
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, :3]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
@torch.jit.script
def quat_axis(q, axis=0):
# type: (Tensor, int) -> Tensor
basis_vec = torch.zeros(q.shape[0], 3, device=q.device)
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def tf_inverse(q, t):
q_inv = quat_conjugate(q)
return q_inv, -quat_apply(q_inv, t)
@torch.jit.script
def tf_apply(q, t, v):
return quat_apply(q, v) + t
@torch.jit.script
def tf_vector(q, v):
return quat_apply(q, v)
@torch.jit.script
def tf_combine(q1, t1, q2, t2):
return quat_mul(q1, q2), quat_apply(q1, t2) + t1
@torch.jit.script
def get_basis_vector(q, v):
return quat_rotate(q, v)
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
def grad_norm(params):
grad_norm = 0.
for p in params:
if p.grad is not None:
grad_norm += torch.sum(p.grad ** 2)
return torch.sqrt(grad_norm)
def print_leaf_nodes(grad_fn, id_set):
if grad_fn is None:
return
if hasattr(grad_fn, 'variable'):
mem_id = id(grad_fn.variable)
if not(mem_id in id_set):
print('is leaf:', grad_fn.variable.is_leaf)
print(grad_fn.variable)
id_set.add(mem_id)
# print(grad_fn)
for i in range(len(grad_fn.next_functions)):
print_leaf_nodes(grad_fn.next_functions[i][0], id_set)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
return kl.mean() | 6,536 | Python | 27.176724 | 114 | 0.568696 |
KallPap/FRL-SHAC-Extension/utils/average_meter.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import numpy as np
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy() | 1,368 | Python | 34.102563 | 82 | 0.65424 |
KallPap/FRL-SHAC-Extension/utils/load_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import torch
import random
import xml.etree.ElementTree as ET
import dflex as df
def set_np_formatting():
np.set_printoptions(edgeitems=30, infstr='inf',
linewidth=4000, nanstr='nan', precision=2,
suppress=False, threshold=10000, formatter=None)
def set_seed(seed, torch_deterministic=False):
if seed == -1 and torch_deterministic:
seed = 42
elif seed == -1:
seed = np.random.randint(0, 10000)
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+4,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=1.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# Mujoco file format parser
def parse_mjcf(
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=1e4,
contact_kd=1e4,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0,
armature=0.01,
radians=False,
load_stiffness=False,
load_armature=False):
file = ET.parse(filename)
root = file.getroot()
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_bool(node, key, default):
if key in node.attrib:
if node.attrib[key] == "true":
return True
else:
return False
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent, last_joint_pos):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
# last_joint_pos = np.zeros(3)
#-----------------
# add body for each joint, we assume the joints attached to one body have the same joint_pos
for i, joint in enumerate(body.findall("joint")):
joint_name = joint.attrib["name"]
joint_type = type_map[joint.attrib.get("type", 'hinge')]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_limited = parse_bool(joint, "limited", True)
if joint_limited:
if radians:
joint_range = parse_vec(joint, "range", (np.deg2rad(-170.), np.deg2rad(170.)))
else:
joint_range = np.deg2rad(parse_vec(joint, "range", (-170.0, 170.0)))
else:
joint_range = np.array([-1.e+6, 1.e+6])
if load_stiffness:
joint_stiffness = parse_float(joint, 'stiffness', stiffness)
else:
joint_stiffness = stiffness
joint_damping = parse_float(joint, 'damping', damping)
if load_armature:
joint_armature = parse_float(joint, "armature", armature)
else:
joint_armature = armature
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
#-----------------
# add body
link = builder.add_link(
parent,
X_pj=df.transform(body_pos + joint_pos - last_joint_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=joint_range[0],
limit_upper=joint_range[1],
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
# assume that each joint is one body in simulation
parent = link
body_pos = [0.0, 0.0, 0.0]
last_joint_pos = joint_pos
#-----------------
# add shapes to the last joint in the body
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos - last_joint_pos, # position relative to the parent frame
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
if ("axisangle" in geom.attrib):
axis_angle = parse_vec(geom, "axisangle", (0.0, 1.0, 0.0, 0.0))
geom_rot = df.quat_from_axis_angle(axis_angle[0:3], axis_angle[3])
if ("quat" in geom.attrib):
q = parse_vec(geom, "quat", df.quat_identity())
geom_rot = q
geom_rot = df.quat_multiply(geom_rot, df.quat_from_axis_angle((0.0, 1.0, 0.0), -math.pi*0.5))
builder.add_shape_capsule(
link,
pos=geom_pos - last_joint_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link, last_joint_pos)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1, np.zeros(3))
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
self.muscle_strength = 0.0
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder,
filter={},
visualize_shapes=True,
stiffness=5.0,
damping=2.0,
contact_ke=5000.0,
contact_kd=2000.0,
contact_kf=1000.0,
contact_mu=0.5,
limit_ke=1000.0,
limit_kd=10.0,
armature = 0.05):
self.armature = armature
self.stiffness = stiffness
self.damping = damping
self.contact_ke = contact_ke
self.contact_kd = contact_kd
self.contact_kf = contact_kf
self.limit_ke = limit_ke
self.limit_kd = limit_kd
self.contact_mu = contact_mu
self.visualize_shapes = visualize_shapes
self.parse_skeleton(skeleton_file, builder, filter)
if muscle_file != None:
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = float(body.attrib["mass"])
x=body_size[0]
y=body_size[1]
z=body_size[2]
density = body_mass / (x*y*z)
max_body_mass = 15.0
mass_scale = body_mass / max_body_mass
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = -1.e+3
joint_upper = 1.e+3
if (joint_type == type_map["Revolute"]):
if ("lower" in joint.attrib):
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")[0]
if ("upper" in joint.attrib):
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")[0]
# print(joint_type, joint_lower, joint_upper)
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
limit_lower=joint_lower,
limit_upper=joint_upper,
limit_ke=self.limit_ke * mass_scale,
limit_kd=self.limit_kd * mass_scale,
damping=self.damping,
stiffness=self.stiffness * math.sqrt(mass_scale),
armature=self.armature)
# armature=self.armature * math.sqrt(mass_scale))
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=x*0.5,
hy=y*0.5,
hz=z*0.5,
density=density,
ke=self.contact_ke,
kd=self.contact_kd,
kf=self.contact_kf,
mu=self.contact_mu)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
m.muscle_strength = unit_f0
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 22,759 | Python | 30.523546 | 130 | 0.482622 |
KallPap/FRL-SHAC-Extension/utils/dataset.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
class CriticDataset:
def __init__(self, batch_size, obs, target_values, shuffle = False, drop_last = False):
self.obs = obs.view(-1, obs.shape[-1])
self.target_values = target_values.view(-1)
self.batch_size = batch_size
if shuffle:
self.shuffle()
if drop_last:
self.length = self.obs.shape[0] // self.batch_size
else:
self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1
def shuffle(self):
index = np.random.permutation(self.obs.shape[0])
self.obs = self.obs[index, :]
self.target_values = self.target_values[index]
def __len__(self):
return self.length
def __getitem__(self, index):
start_idx = index * self.batch_size
end_idx = min((index + 1) * self.batch_size, self.obs.shape[0])
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
| 1,420 | Python | 37.405404 | 110 | 0.645775 |
KallPap/FRL-SHAC-Extension/utils/time_report.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import time
from utils.common import *
class Timer:
def __init__(self, name):
self.name = name
self.start_time = None
self.time_total = 0.
def on(self):
assert self.start_time is None, "Timer {} is already turned on!".format(self.name)
self.start_time = time.time()
def off(self):
assert self.start_time is not None, "Timer {} not started yet!".format(self.name)
self.time_total += time.time() - self.start_time
self.start_time = None
def report(self):
print_info('Time report [{}]: {:.2f} seconds'.format(self.name, self.time_total))
def clear(self):
self.start_time = None
self.time_total = 0.
class TimeReport:
def __init__(self):
self.timers = {}
def add_timer(self, name):
assert name not in self.timers, "Timer {} already exists!".format(name)
self.timers[name] = Timer(name = name)
def start_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].on()
def end_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].off()
def report(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
else:
print_info("------------Time Report------------")
for timer_name in self.timers.keys():
self.timers[timer_name].report()
print_info("-----------------------------------")
def clear_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].clear()
else:
for timer_name in self.timers.keys():
self.timers[timer_name].clear()
def pop_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
del self.timers[name]
else:
self.report()
self.timers = {}
| 2,688 | Python | 34.853333 | 90 | 0.58631 |
KallPap/FRL-SHAC-Extension/utils/running_mean_std.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Tuple
import torch
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = (), device = 'cuda:0'):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = torch.zeros(shape, dtype = torch.float32, device = device)
self.var = torch.ones(shape, dtype = torch.float32, device = device)
self.count = epsilon
def to(self, device):
rms = RunningMeanStd(device = device)
rms.mean = self.mean.to(device).clone()
rms.var = self.var.to(device).clone()
rms.count = self.count
return rms
@torch.no_grad()
def update(self, arr: torch.tensor) -> None:
batch_mean = torch.mean(arr, dim = 0)
batch_var = torch.var(arr, dim = 0, unbiased = False)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: torch.tensor, batch_var: torch.tensor, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + torch.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def normalize(self, arr:torch.tensor, un_norm = False) -> torch.tensor:
if not un_norm:
result = (arr - self.mean) / torch.sqrt(self.var + 1e-5)
else:
result = arr * torch.sqrt(self.var + 1e-5) + self.mean
return result | 2,462 | Python | 40.745762 | 111 | 0.638099 |
greydoubt/nvidia_omniverse_stuff/example_2.py | import omni.ext
import omni.ui as ui
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.spawn_prims] MyExtension startup")
self._window = ui.Window("Spawn Primitives", width=300, height=300)
with self._window.frame:
with ui.VStack():
def on_click():
print("clicked!")
ui.Button("Spawn Cube", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.example.spawn_prims] MyExtension shutdown")
| 978 | Python | 39.791665 | 119 | 0.668712 |
greydoubt/nvidia_omniverse_stuff/example_1.py | import omni.ext
import omni.ui as ui
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.spawn_prims] MyExtension startup")
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
def on_click():
print("clicked!")
ui.Button("Click Me", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.example.spawn_prims] MyExtension shutdown")
| 970 | Python | 37.839998 | 119 | 0.664948 |
greydoubt/nvidia_omniverse_stuff/example_3.py | import omni.ext
import omni.ui as ui
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.spawn_prims] MyExtension startup")
self._window = ui.Window("Spawn Primitives", width=300, height=300)
with self._window.frame:
with ui.VStack():
def on_click():
print("clicked!")
ui.Button("Spawn Cube", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.example.spawn_prims] MyExtension shutdown")
import omni.kit.commands
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
| 1,087 | Python | 36.51724 | 119 | 0.679853 |
greydoubt/nvidia_omniverse_stuff/README.md | # nvidia_omniverse_stuff | 24 | Markdown | 23.999976 | 24 | 0.833333 |
greydoubt/nvidia_omniverse_stuff/example_0.py | # template Extension code:
import omni.ext
import omni.ui as ui
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[myname.example.spawn_prims] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MynameExampleSpawnPrimsExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[myname.example.spawn_prims] myname example spawn_prims startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self._count += 1
label.text = f"count: {self._count}"
def on_reset():
self._count = 0
label.text = "empty"
on_reset()
with ui.HStack():
ui.Button("Add", clicked_fn=on_click)
ui.Button("Reset", clicked_fn=on_reset)
def on_shutdown(self):
print("[myname.example.spawn_prims] myname example spawn_prims shutdown")
| 1,652 | Python | 34.934782 | 119 | 0.616828 |
NVlabs/DiffRL/diffrl_conda.yml | name: shac
channels:
- pytorch
- defaults
dependencies:
- python=3.8.13=h12debd9_0
- pytorch=1.11.0=py3.8_cuda11.3_cudnn8.2.0_0
- torchvision=0.12.0=py38_cu113
- pip:
- pyyaml==6.0
- tensorboard==2.8.0
- tensorboardx==2.5
- urdfpy==0.0.22
- usd-core==22.3
| 288 | YAML | 18.266665 | 46 | 0.611111 |
NVlabs/DiffRL/README.md | # SHAC
This repository contains the implementation for the paper [Accelerated Policy Learning with Parallel Differentiable Simulation](https://short-horizon-actor-critic.github.io/) (ICLR 2022).
In this paper, we present a GPU-based differentiable simulation and propose a policy learning method named SHAC leveraging the developed differentiable simulation. We provide a comprehensive benchmark set for policy learning with differentiable simulation. The benchmark set contains six robotic control problems for now as shown in the figure below.
<p align="center">
<img src="figures/envs.png" alt="envs" width="800" />
</p>
## Installation
- `git clone https://github.com/NVlabs/DiffRL.git --recursive`
- The code has been tested on
- Operating System: Ubuntu 16.04, 18.04, 20.04, 21.10, 22.04
- Python Version: 3.7, 3.8
- GPU: TITAN X, RTX 1080, RTX 2080, RTX 3080, RTX 3090, RTX 3090 Ti
#### Prerequisites
- In the project folder, create a virtual environment in Anaconda:
```
conda env create -f diffrl_conda.yml
conda activate shac
```
- dflex
```
cd dflex
pip install -e .
```
- rl_games, forked from [rl-games](https://github.com/Denys88/rl_games) (used for PPO and SAC training):
````
cd externals/rl_games
pip install -e .
````
- Install an older version of protobuf required for TensorboardX:
````
pip install protobuf==3.20.0
````
#### Test Examples
A test example can be found in the `examples` folder.
```
python test_env.py --env AntEnv
```
If the console outputs `Finish Successfully` in the last line, the code installation succeeds.
## Training
Running the following commands in `examples` folder allows to train Ant with SHAC.
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --logdir ./logs/Ant/shac
```
We also provide a one-line script in the `examples/train_script.sh` folder to replicate the results reported in the paper for both our method and for baseline method. The results might slightly differ from the paper due to the randomness of the cuda and different Operating System/GPU/Python versions. The plot reported in paper is produced with TITAN X on Ubuntu 16.04.
#### SHAC (Our Method)
For example, running the following commands in `examples` folder allows to train Ant and SNU Humanoid (Humanoid MTU in the paper) environments with SHAC respectively for 5 individual seeds.
```
python train_script.py --env Ant --algo shac --num-seeds 5
```
```
python train_script.py --env SNUHumanoid --algo shac --num-seeds 5
```
#### Baseline Algorithms
For example, running the following commands in `examples` folder allows to train Ant environment with PPO implemented in RL_games for 5 individual seeds,
```
python train_script.py --env Ant --algo ppo --num-seeds 5
```
## Testing
To test the trained policy, you can input the policy checkpoint into the training script and use a `--play` flag to indicate it is for testing. For example, the following command allows to test a trained policy (assume the policy is located in `logs/Ant/shac/policy.pt`)
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --checkpoint ./logs/Ant/shac/policy.pt --play [--render]
```
The `--render` flag indicates whether to export the video of the task execution. If does, the exported video is encoded in `.usd` format, and stored in the `examples/output` folder. To visualize the exported `.usd` file, refer to [USD at NVIDIA](https://developer.nvidia.com/usd).
## Citation
If you find our paper or code is useful, please consider citing:
```kvk
@inproceedings{xu2021accelerated,
title={Accelerated Policy Learning with Parallel Differentiable Simulation},
author={Xu, Jie and Makoviychuk, Viktor and Narang, Yashraj and Ramos, Fabio and Matusik, Wojciech and Garg, Animesh and Macklin, Miles},
booktitle={International Conference on Learning Representations},
year={2021}
}
``` | 3,885 | Markdown | 34.327272 | 370 | 0.73668 |
NVlabs/DiffRL/examples/train_script.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import argparse
configs = {'Ant': 'ant.yaml', 'CartPole': 'cartpole_swing_up.yaml', 'Hopper': 'hopper.yaml', 'Cheetah': 'cheetah.yaml', 'Humanoid': 'humanoid.yaml', 'SNUHumanoid': 'snu_humanoid.yaml'}
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Ant', choices=['Ant', 'CartPole', 'Hopper', 'Cheetah', 'Humanoid', 'SNUHumanoid'])
parser.add_argument('--algo', type=str, default='shac', choices=['shac', 'ppo', 'sac', 'bptt'])
parser.add_argument('--num-seeds', type=int, default=5)
parser.add_argument('--save-dir', type=str, default='./logs/')
args = parser.parse_args()
''' generate seeds '''
seeds = []
for i in range(args.num_seeds):
seeds.append(i * 10)
''' generate commands '''
commands = []
for i in range(len(seeds)):
seed = seeds[i]
save_dir = os.path.join(args.save_dir, args.env, args.algo, str(seed))
config_path = os.path.join('./cfg', args.algo, configs[args.env])
if args.algo == 'shac':
script_name = 'train_shac.py'
elif args.algo == 'ppo' or args.algo == 'sac':
script_name = 'train_rl.py'
elif args.algo == 'bptt':
script_name = 'train_bptt.py'
else:
raise NotImplementedError
cmd = 'python {} '\
'--cfg {} '\
'--seed {} '\
'--logdir {} '\
'--no-time-stamp'\
.format(script_name, config_path, seed, save_dir)
commands.append(cmd)
for command in commands:
os.system(command) | 1,898 | Python | 34.830188 | 184 | 0.653319 |
NVlabs/DiffRL/examples/train_bptt.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# gradient-based policy optimization by actor critic method
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import argparse
import envs
import algorithms.bptt as bptt
import os
import sys
import yaml
import torch
import numpy as np
import copy
from utils.common import *
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--cfg", "type": str, "default": "./cfg/ac/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights"},
{"name": "--logdir", "type": str, "default": "logs/tmp/ac/"},
{"name": "--save-interval", "type": int, "default": 0},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"},
{"name": "--device", "type": str, "default": "cuda:0"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."}]
# parse arguments
args = parse_arguments(
description="BPTT",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
args.device = torch.device(args.device)
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
traj_optimizer = bptt.BPTT(cfg_train)
if args.train:
traj_optimizer.train()
else:
traj_optimizer.play(cfg_train) | 4,024 | Python | 34.307017 | 124 | 0.601392 |
NVlabs/DiffRL/examples/combine_batch_logs.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
based on https://stackoverflow.com/questions/43068200/how-to-display-the-average-of-multiple-runs-on-tensorboard
'''
import os
from collections import defaultdict
import numpy as np
import shutil
import tensorflow as tf
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from tensorboardX import SummaryWriter
import argparse
tag_mapping = {#'rewards0/frame': 'policy_loss/step', 'rewards0/iter': 'policy_loss/iter', 'rewards0/time': 'policy_loss/time',
'rewards0/frame': 'rewards/step', 'rewards0/iter': 'rewards/iter', 'rewards0/time': 'rewards/time',
# 'rewards/frame': 'policy_loss/step', 'rewards/iter': 'policy_loss/iter', 'rewards/time': 'policy_loss/time',
'rewards/frame': 'rewards/step', 'rewards/step': 'rewards/step', 'rewards/iter': 'rewards/iter', 'rewards/time': 'rewards/time',
'policy_loss/step': 'policy_loss/step', 'policy_loss/iter': 'policy_loss/iter', 'policy_loss/time': 'policy_loss/time',
'actor_loss/iter': 'actor_loss/iter', 'actor_loss/step': 'actor_loss/step',
# 'policy_loss/step': 'rewards/step', 'policy_loss/iter': 'rewards/iter', 'policy_loss/time': 'rewards/time',
'training_loss/step': 'training_loss/step', 'training_loss/iter': 'training_loss/iter', 'training_loss/time': 'training_loss/time',
'best_policy_loss/step': 'best_policy_loss/step',
'episode_lengths/iter': 'episode_lengths/iter', 'episode_lengths/step': 'episode_lengths/step', 'episode_lengths/frame': 'episode_lengths/step',
'value_loss/step': 'value_loss/step', 'value_loss/iter': 'value_loss/iter'}
def tabulate_events(dpath):
summary_iterators = []
for dname in os.listdir(dpath):
for subfolder_name in args.subfolder_names:
if os.path.exists(os.path.join(dpath, dname, subfolder_name)):
summary_iterators.append(EventAccumulator(os.path.join(dpath, dname, subfolder_name)).Reload())
break
tags = summary_iterators[0].Tags()['scalars']
# for it in summary_iterators:
# assert it.Tags()['scalars'] == tags
out_values = dict()
out_steps = dict()
for tag in tags:
if tag not in tag_mapping.keys():
continue
# gathering steps
steps_set = set()
for summary in summary_iterators:
for event in summary.Scalars(tag):
steps_set.add(event.step)
is_reward = ('reward' in tag)
is_loss = ('loss' in tag)
steps = list(steps_set)
steps.sort()
# steps = steps[:500]
new_tag_name = tag_mapping[tag]
out_values[new_tag_name] = np.zeros((len(steps), len(summary_iterators)))
out_steps[new_tag_name] = np.array(steps)
for summary_id, summary in enumerate(summary_iterators):
events = summary.Scalars(tag)
i = 0
for step_id, step in enumerate(steps):
while i + 1 < len(events) and events[i + 1].step <= step:
i += 1
# if events[i].value > 100000. or events[i].value < -100000.:
# import IPython
# IPython.embed()
out_values[new_tag_name][step_id, summary_id] = events[i].value
return out_steps, out_values
def write_combined_events(dpath, acc_steps, acc_values, dname='combined'):
fpath = os.path.join(dpath, dname)
if os.path.exists(fpath):
shutil.rmtree(fpath)
writer = SummaryWriter(fpath)
tags = acc_values.keys()
for tag in tags:
for i in range(len(acc_values[tag])):
mean = np.array(acc_values[tag][i]).mean()
writer.add_scalar(tag, mean, acc_steps[tag][i])
writer.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--batch-folder', type = str, default='path/to/batch/folder')
parser.add_argument('--subfolder-names', type = str, nargs = '+', default=['log', 'runs']) # 'runs' for rl
args = parser.parse_args()
dpath = args.batch_folder
acc_steps, acc_values = tabulate_events(dpath)
write_combined_events(dpath, acc_steps, acc_values) | 4,683 | Python | 39.730434 | 160 | 0.62823 |
NVlabs/DiffRL/examples/test_env.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import torch
import random
import envs
from utils.common import *
import argparse
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--env', type = str, default = 'AntEnv')
parser.add_argument('--num-envs', type = int, default = 64)
parser.add_argument('--render', default = False, action = 'store_true')
args = parser.parse_args()
seeding()
env_fn = getattr(envs, args.env)
env = env_fn(num_envs = args.num_envs, \
device = 'cuda:0', \
render = args.render, \
seed = 0, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad = True)
obs = env.reset()
num_actions = env.num_actions
t_start = time.time()
reward_episode = 0.
for i in range(1000):
actions = torch.randn((args.num_envs, num_actions), device = 'cuda:0')
obs, reward, done, info = env.step(actions)
reward_episode += reward
t_end = time.time()
print('fps = ', 1000 * args.num_envs / (t_end - t_start))
print('mean reward = ', reward_episode.mean().detach().cpu().item())
print('Finish Successfully')
| 1,731 | Python | 25.646153 | 76 | 0.677643 |
NVlabs/DiffRL/examples/train_shac.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# gradient-based policy optimization by actor critic method
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import argparse
import envs
import algorithms.shac as shac
import os
import sys
import yaml
import torch
import numpy as np
import copy
from utils.common import *
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--cfg", "type": str, "default": "./cfg/shac/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights"},
{"name": "--logdir", "type": str, "default": "logs/tmp/shac/"},
{"name": "--save-interval", "type": int, "default": 0},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"},
{"name": "--device", "type": str, "default": "cuda:0"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."}]
# parse arguments
args = parse_arguments(
description="SHAC",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
args.device = torch.device(args.device)
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
traj_optimizer = shac.SHAC(cfg_train)
if args.train:
traj_optimizer.train()
else:
traj_optimizer.play(cfg_train) | 4,024 | Python | 34.307017 | 124 | 0.602386 |
NVlabs/DiffRL/examples/train_rl.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
from rl_games.common import env_configurations, experiment, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.torch_runner import Runner
from rl_games.algos_torch import torch_ext
import argparse
import envs
import os
import sys
import yaml
import numpy as np
import copy
import torch
from utils.common import *
def create_dflex_env(**kwargs):
env_fn = getattr(envs, cfg_train["params"]["diff_env"]["name"])
env = env_fn(num_envs=cfg_train["params"]["config"]["num_actors"], \
render=args.render, seed=args.seed, \
episode_length=cfg_train["params"]["diff_env"].get("episode_length", 1000), \
no_grad=True, stochastic_init=cfg_train['params']['diff_env']['stochastic_env'], \
MM_caching_frequency=cfg_train['params']['diff_env'].get('MM_caching_frequency', 1))
print('num_envs = ', env.num_envs)
print('num_actions = ', env.num_actions)
print('num_obs = ', env.num_obs)
frames = kwargs.pop('frames', 1)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.full_state = {}
self.rl_device = "cuda:0"
self.full_state["obs"] = self.env.reset(force_reset=True).to(self.rl_device)
print(self.full_state["obs"].shape)
def step(self, actions):
self.full_state["obs"], reward, is_done, info = self.env.step(actions.to(self.env.device))
return self.full_state["obs"].to(self.rl_device), reward.to(self.rl_device), is_done.to(self.rl_device), info
def reset(self):
self.full_state["obs"] = self.env.reset(force_reset=True)
return self.full_state["obs"].to(self.rl_device)
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
print(info['action_space'], info['observation_space'])
return info
vecenv.register('DFLEX', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('dflex', {
'env_creator': lambda **kwargs: create_dflex_env(**kwargs),
'vecenv_type': 'DFLEX'})
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--num_envs", "type": int, "default": 0, "help": "Number of envirnments"},
{"name": "--cfg", "type": str, "default": "./cfg/rl/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights, only for rl_games RL library"},
{"name": "--rl_device", "type": str, "default": "cuda:0",
"help": "Choose CPU or GPU device for inferencing policy network"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."},
{"name": "--logdir", "type": str, "default": "logs/tmp/rl/"},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"}]
# parse arguments
args = parse_arguments(
description="RL Policy",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
if args.num_envs > 0:
cfg_train["params"]["config"]["num_actors"] = args.num_envs
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
# save config
if cfg_train['params']['general']['train']:
log_dir = cfg_train["params"]["general"]["logdir"]
os.makedirs(log_dir, exist_ok = True)
# save config
yaml.dump(cfg_train, open(os.path.join(log_dir, 'cfg.yaml'), 'w'))
runner = Runner()
runner.load(cfg_train)
runner.reset()
runner.run(vargs)
| 6,658 | Python | 34.801075 | 124 | 0.611745 |
NVlabs/DiffRL/examples/cfg/sac/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/hopper.pth
config:
name: 'Hopper_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'ant' | 982 | YAML | 17.203703 | 31 | 0.602851 |
NVlabs/DiffRL/examples/cfg/sac/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 512, 512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/humanoid_mtu.pth
config:
name: 'Humanoid_SNU_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 10000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.0002
actor_lr: 0.0003
critic_lr: 0.0003
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 2
replay_buffer_size: 1000000
num_actors: 256
env_config:
env_name: 'snu_humanoid' | 1,011 | YAML | 17.74074 | 33 | 0.614243 |
NVlabs/DiffRL/examples/cfg/sac/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/humanoid.pth
config:
name: 'Humanoid_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.0002
actor_lr: 0.0003
critic_lr: 0.0003
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 2
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'humanoid' | 985 | YAML | 17.259259 | 31 | 0.611168 |
NVlabs/DiffRL/examples/cfg/sac/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: 'Ant_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 128
env_config:
env_name: 'ant' | 974 | YAML | 17.055555 | 31 | 0.599589 |
NVlabs/DiffRL/examples/cfg/sac/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [64, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/cartpole_swing_up.pth
config:
name: 'CartPoleSwingUp_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 1000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 1024
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 32
env_config:
env_name: 'ant' | 1,003 | YAML | 17.592592 | 37 | 0.611166 |
NVlabs/DiffRL/examples/cfg/sac/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/cheetah.pth
config:
name: 'Cheetah_SAC'
env_name: dflex
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 5000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'ant' | 985 | YAML | 17.259259 | 31 | 0.604061 |
NVlabs/DiffRL/examples/cfg/bptt/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam # ['gd', 'adam', 'sgd', 'lbfgs']
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_hopp_bptt
env_name: dflex
actor_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 128
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 727 | YAML | 18.675675 | 48 | 0.580468 |
NVlabs/DiffRL/examples/cfg/bptt/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [512, 256]
activation: elu
actor_logstd_init: -1.0
config:
name: df_humanoid_ac
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 16
save_interval: 200
player:
determenistic: True
games_num: 4
num_actors: 1
print_stats: True
| 716 | YAML | 17.868421 | 48 | 0.599162 |
NVlabs/DiffRL/examples/cfg/bptt/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [256, 128]
activation: elu
actor_logstd_init: -1.0
config:
name: df_humanoid_bptt
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 32
save_interval: 200
player:
determenistic: True
games_num: 4
num_actors: 1
print_stats: True
| 723 | YAML | 18.052631 | 48 | 0.593361 |
NVlabs/DiffRL/examples/cfg/bptt/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_ant_bptt
env_name: dflex
actor_learning_rate: 4e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 691 | YAML | 17.702702 | 48 | 0.586107 |
NVlabs/DiffRL/examples/cfg/bptt/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [64, 64]
activation: elu
actor_logstd_init: -1.0
config:
name: df_cartpole_swing_up_bptt
env_name: dflex
actor_learning_rate: 1e-2 # adam with linear schedule
lr_schedule: linear # ['constant', 'linear']
betas: [0.7, 0.95] # adam
max_epochs: 500
steps_num: 240
grad_norm: 1.0
truncate_grads: True
num_actors: 64
player:
# render: True
determenistic: True
games_num: 12
num_actors: 4
print_stats: True
| 711 | YAML | 18.777777 | 57 | 0.609001 |
NVlabs/DiffRL/examples/cfg/bptt/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam # ['gd', 'adam', 'sgd', 'lbfgs']
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_cheetah_bptt
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 128
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 731 | YAML | 18.783783 | 48 | 0.582763 |
NVlabs/DiffRL/examples/cfg/ppo/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_hopper_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 1,517 | YAML | 17.975 | 33 | 0.588003 |
NVlabs/DiffRL/examples/cfg/ppo/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_hum_mtu_ppo.pth
config:
name: df_hum_mtu_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 20000
save_best_after: 100
save_frequency: 1000
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 6
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 1,530 | YAML | 18.1375 | 34 | 0.590196 |
NVlabs/DiffRL/examples/cfg/ppo/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_humanoid_ppo.pth
config:
name: df_humanoid_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 50
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 5
num_actors: 1
print_stats: True | 1,529 | YAML | 18.367088 | 35 | 0.589274 |
NVlabs/DiffRL/examples/cfg/ppo/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_ant_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 2048
steps_num: 32
minibatch_size: 16384
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 24
num_actors: 3
print_stats: True
| 1,513 | YAML | 17.925 | 33 | 0.586913 |
NVlabs/DiffRL/examples/cfg/ppo/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_cartpole_swing.pth
config:
name: df_cartpole_swing_up
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 500
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
steps_num: 240
num_actors: 32
minibatch_size: 1920
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
# render: True
determenistic: True
games_num: 12
num_actors: 4
print_stats: True
| 1,552 | YAML | 18.172839 | 37 | 0.590851 |
NVlabs/DiffRL/examples/cfg/ppo/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_cheetah_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 1,519 | YAML | 18 | 33 | 0.588545 |
NVlabs/DiffRL/examples/cfg/shac/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_hopper_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 256
save_interval: 400
player:
determenistic: False
games_num: 1
num_actors: 1
print_stats: True
| 902 | YAML | 19.066666 | 48 | 0.600887 |
NVlabs/DiffRL/examples/cfg/shac/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
network:
actor: ActorStochasticMLP
actor_mlp:
units: [512, 256]
activation: elu
critic: CriticMLP
critic_mlp:
units: [256, 256]
activation: elu
config:
name: df_snu_humanoid_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 5e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.995
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 910 | YAML | 19.244444 | 48 | 0.606593 |
NVlabs/DiffRL/examples/cfg/shac/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
network:
actor: ActorStochasticMLP
actor_mlp:
units: [256, 128]
activation: elu
critic: CriticMLP
critic_mlp:
units: [128, 128]
activation: elu
config:
name: df_humanoid_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 5e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.995
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 908 | YAML | 19.2 | 48 | 0.602423 |
NVlabs/DiffRL/examples/cfg/shac/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP # ActorDeterministicMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_ant_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 946 | YAML | 20.044444 | 56 | 0.602537 |
NVlabs/DiffRL/examples/cfg/shac/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
network:
actor: ActorStochasticMLP #ActorDeterministicMLP
actor_mlp:
units: [64, 64]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_cartpole_swing_up_shac
actor_learning_rate: 1e-2 # adam
critic_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 500
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 100
player:
determenistic: True
games_num: 4
num_actors: 4
print_stats: True
| 961 | YAML | 20.377777 | 56 | 0.611863 |
NVlabs/DiffRL/examples/cfg/shac/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP # ActorDeterministicMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_cheetah_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 958 | YAML | 20.311111 | 56 | 0.60334 |
NVlabs/DiffRL/envs/cartpole_swing_up.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class CartPoleSwingUpEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=1024, seed=0, episode_length=240, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 5
num_act = 1
super(CartPoleSwingUpEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# action parameters
self.action_strength = 1000.
# loss related
self.pole_angle_penalty = 1.0
self.pole_velocity_penalty = 0.1
self.cart_position_penalty = 0.05
self.cart_velocity_penalty = 0.1
self.cart_action_penalty = 0.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "CartPoleSwingUp_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1. / 60.
self.sim_substeps = 4
self.sim_dt = self.dt
if self.visualize:
self.env_dist = 1.0
else:
self.env_dist = 0.0
self.num_joint_q = 2
self.num_joint_qd = 2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.urdf_load(self.builder,
os.path.join(asset_folder, 'cartpole.urdf'),
df.transform((0.0, 2.5, 0.0 + self.env_dist * i), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
shape_kd=1e4,
limit_kd=1.)
self.builder.joint_q[i * self.num_joint_q + 1] = -math.pi
self.model = self.builder.finalize(self.device)
self.model.ground = False
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype = torch.float, device = self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
self.start_joint_q = self.state.joint_q.clone()
self.start_joint_qd = self.state.joint_qd.clone()
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 40):
try:
self.stage.Save()
except:
print('USD save error')
self.num_frames -= 40
def step(self, actions):
with df.ScopedTimer("simulate", active=False, detailed=False):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions
self.state.joint_act.view(self.num_envs, -1)[:, 0:1] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
#self.obs_buf_before_reset = self.obs_buf.clone()
with df.ScopedTimer("reset", active=False, detailed=False):
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", active=False, detailed=False):
self.render()
#self.extras = {'obs_before_reset': self.obs_buf_before_reset}
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = self.start_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = self.start_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] \
+ np.pi * (torch.rand(size=(len(env_ids), self.num_joint_q), device=self.device) - 0.5)
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] \
+ 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self):
with torch.no_grad(): # TODO: check with Miles
current_joint_q = self.state.joint_q.clone()
current_joint_qd = self.state.joint_qd.clone()
current_joint_act = self.state.joint_act.clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.state.joint_act = current_joint_act
'''
This function starts collecting a new trajectory from the current states but cut off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and return the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def calculateObservations(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0:1]
theta = self.state.joint_q.view(self.num_envs, -1)[:, 1:2]
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0:1]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1:2]
# observations: [x, xdot, sin(theta), cos(theta), theta_dot]
self.obs_buf = torch.cat([x, xdot, torch.sin(theta), torch.cos(theta), theta_dot], dim = -1)
def calculateReward(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0]
theta = tu.normalize_angle(self.state.joint_q.view(self.num_envs, -1)[:, 1])
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1]
self.rew_buf = -torch.pow(theta, 2.) * self.pole_angle_penalty \
- torch.pow(theta_dot, 2.) * self.pole_velocity_penalty \
- torch.pow(x, 2.) * self.cart_position_penalty \
- torch.pow(xdot, 2.) * self.cart_velocity_penalty \
- torch.sum(self.actions ** 2, dim = -1) * self.cart_action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 9,011 | Python | 38.876106 | 187 | 0.582399 |
NVlabs/DiffRL/envs/__init__.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
from envs.ant import AntEnv
from envs.cheetah import CheetahEnv
from envs.hopper import HopperEnv
from envs.snu_humanoid import SNUHumanoidEnv
from envs.cartpole_swing_up import CartPoleSwingUpEnv
from envs.humanoid import HumanoidEnv | 694 | Python | 48.642854 | 76 | 0.832853 |
NVlabs/DiffRL/envs/snu_humanoid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd, UsdGeom, Gf
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class SNUHumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.skeletons = []
self.muscle_strengths = []
self.mtu_actuations = True
self.inv_control_freq = 1
# "humanoid_snu_lower"
self.num_joint_q = 29
self.num_joint_qd = 24
self.num_dof = self.num_joint_q - 7 # 22
self.num_muscles = 152
self.str_scale = 0.6
num_act = self.num_joint_qd - 6 # 18
num_obs = 71 # 13 + 22 + 18 + 18
if self.mtu_actuations:
num_obs = 53 # 71 - 18
if self.mtu_actuations:
num_act = self.num_muscles
super(SNUHumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.init_sim()
# other parameters
self.termination_height = 0.46
self.termination_tolerance = 0.05
self.height_rew_scale = 4.0
self.action_strength = 100.0
self.action_penalty = -0.001
self.joint_vel_obs_scaling = 0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + "HumanoidSNU_Low_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([10000.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.0
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.0
self.asset_folder = os.path.join(os.path.dirname(__file__), 'assets/snu')
asset_path = os.path.join(self.asset_folder, "human.xml")
muscle_path = os.path.join(self.asset_folder, "muscle284.xml")
for i in range(self.num_environments):
if self.mtu_actuations:
skeleton = lu.Skeleton(asset_path, muscle_path, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
else:
skeleton = lu.Skeleton(asset_path, None, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
# set initial position 1m off the ground
self.builder.joint_q[skeleton.coord_start + 2] = i * self.env_dist
self.builder.joint_q[skeleton.coord_start + 1] = start_height
self.builder.joint_q[skeleton.coord_start + 3:skeleton.coord_start + 7] = self.start_rot
self.start_pos.append([self.builder.joint_q[skeleton.coord_start], start_height, self.builder.joint_q[skeleton.coord_start + 2]])
self.skeletons.append(skeleton)
num_muscles = len(self.skeletons[0].muscles)
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
print("Num muscles: ", num_muscles)
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
for m in self.skeletons[0].muscles:
self.muscle_strengths.append(self.str_scale * m.muscle_strength)
for mi in range(len(self.muscle_strengths)):
self.muscle_strengths[mi] = self.str_scale * self.muscle_strengths[mi]
self.muscle_strengths = tu.to_torch(self.muscle_strengths, device=self.device).repeat(self.num_envs)
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
mesh_path = os.path.join(self.asset_folder, "OBJ/" + mesh + ".usd")
self.renderer.add_mesh(mesh, mesh_path, X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strengths[m], 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
self.render_time += self.dt * self.inv_control_freq
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
actions = actions * 0.5 + 0.5
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
for ci in range(self.inv_control_freq):
if self.mtu_actuations:
self.model.muscle_activation = actions.view(-1) * self.muscle_strengths
else:
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", False):
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {} # NOTE: any other things to restore?
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:33
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 33:51
up_vec[:, 1:2], # 51
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1)], # 52
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 51]
heading_reward = self.obs_buf[:, 52]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward) # JIE: not smooth
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
act_penalty = torch.sum(torch.abs(self.actions), dim = -1) * self.action_penalty #torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + act_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.rew_buf[invalid_masks] = 0.
| 19,037 | Python | 42.967667 | 248 | 0.563429 |
NVlabs/DiffRL/envs/cheetah.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class CheetahEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 17
num_act = 6
super(CheetahEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.action_strength = 200.0
self.action_penalty = -0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Cheetah_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 9
self.num_joint_qd = 9
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rotation = torch.tensor([0.], device = self.device, requires_grad = False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.potentials = tu.to_torch([0.], device=self.device, requires_grad=False).repeat(self.num_envs)
self.prev_potentials = self.potentials.clone()
self.start_pos = []
self.start_joint_q = [0., 0., 0., 0., 0., 0.]
self.start_joint_target = [0., 0., 0., 0., 0., 0.]
start_height = -0.2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
link_start = len(self.builder.joint_type)
lu.parse_mjcf(os.path.join(asset_folder, "half_cheetah.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=2.e+4,
contact_kd=1.e+3,
contact_kf=1.e+3,
contact_mu=1.,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.1,
radians=True, load_stiffness=True)
self.builder.joint_X_pj[link_start] = df.transform((0.0, 1.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5))
# base transform
self.start_pos.append([0.0, start_height])
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 9] = [0., 0., 0., 0., 0., 0.]
self.builder.joint_target[i*self.num_joint_q + 3:i*self.num_joint_q + 9] = [0., 0., 0., 0., 0., 0.]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 3:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] + 0.1 * (torch.rand(size=(len(env_ids), 2), device=self.device) - 0.5) * 2.
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = (torch.rand(len(env_ids), device = self.device) - 0.5) * 0.2
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] + 0.1 * (torch.rand(size=(len(env_ids), self.num_joint_q - 3), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
self.obs_buf = torch.cat([self.state.joint_q.view(self.num_envs, -1)[:, 1:], self.state.joint_qd.view(self.num_envs, -1)], dim = -1)
def calculateReward(self):
progress_reward = self.obs_buf[:, 8]
self.rew_buf = progress_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 10,563 | Python | 39.1673 | 226 | 0.594717 |
NVlabs/DiffRL/envs/humanoid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class HumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
num_obs = 76
num_act = 21
super(HumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.init_sim()
# other parameters
self.termination_height = 0.74
self.motor_strengths = [
200,
200,
200,
200,
200,
600,
400,
100,
100,
200,
200,
600,
400,
100,
100,
100,
100,
200,
100,
100,
200]
self.motor_scale = 0.35
self.motor_strengths = tu.to_torch(self.motor_strengths, dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.action_penalty = -0.002
self.joint_vel_obs_scaling = 0.1
self.termination_tolerance = 0.1
self.height_rew_scale = 10.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Humanoid_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 28
self.num_joint_qd = 27
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([200.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.5
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.35
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.parse_mjcf(os.path.join(asset_folder, "humanoid.xml"), self.builder,
stiffness=5.0,
damping=0.1,
contact_ke=2.e+4,
contact_kd=5.e+3,
contact_kf=1.e+3,
contact_mu=0.75,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.007,
load_stiffness=True,
load_armature=True)
# base transform
start_pos_z = i*self.env_dist
self.start_pos.append([0.0, start_height, start_pos_z])
self.builder.joint_q[i*self.num_joint_q:i*self.num_joint_q + 3] = self.start_pos[-1]
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 7] = self.start_rot
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
num_act = int(len(self.state.joint_act) / self.num_environments) - 6
print('num_act = ', num_act)
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
# todo - make clip range a parameter
actions = torch.clip(actions, -1., 1.)
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.motor_scale * self.motor_strengths
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] + 0.2 * (torch.rand(size=(len(env_ids), self.num_joint_q - 7), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:32
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 32:53
up_vec[:, 1:2], # 53:54
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 54:55
self.actions.clone()], # 55:76
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 53]
heading_reward = self.obs_buf[:, 54]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.rew_buf[invalid_masks] = 0. | 15,758 | Python | 41.707317 | 248 | 0.582054 |
NVlabs/DiffRL/envs/ant.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class AntEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = True):
num_obs = 37
num_act = 8
super(AntEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.termination_height = 0.27
self.action_strength = 200.0
self.action_penalty = 0.0
self.joint_vel_obs_scaling = 0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Ant_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 15
self.num_joint_qd = 14
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([10000.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
self.start_joint_q = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.start_joint_target = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
if self.visualize:
self.env_dist = 2.5
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 0.75
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.parse_mjcf(os.path.join(asset_folder, "ant.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=4.e+4,
contact_kd=1.e+4,
contact_kf=3.e+3,
contact_mu=0.75,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.05)
# base transform
start_pos_z = i*self.env_dist
self.start_pos.append([0.0, start_height, start_pos_z])
self.builder.joint_q[i*self.num_joint_q:i*self.num_joint_q + 3] = self.start_pos[-1]
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 7] = self.start_rot
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 7:i*self.num_joint_q + 15] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.builder.joint_target[i*self.num_joint_q + 7:i*self.num_joint_q + 15] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] + 0.2 * (torch.rand(size=(len(env_ids), self.num_joint_q - 7), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), 14), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:19
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 19:27
up_vec[:, 1:2], # 27
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 28
self.actions.clone()], # 29:37
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 27]
heading_reward = self.obs_buf[:, 28]
height_reward = self.obs_buf[:, 0] - self.termination_height
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
if self.early_termination:
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 13,087 | Python | 41.631922 | 226 | 0.585008 |
NVlabs/DiffRL/envs/dflex_env.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import torch
import dflex as df
import xml.etree.ElementTree as ET
from gym import spaces
class DFlexEnv:
def __init__(self, num_envs, num_obs, num_act, episode_length, MM_caching_frequency = 1, seed=0, no_grad=True, render=False, device='cuda:0'):
self.seed = seed
self.no_grad = no_grad
df.config.no_grad = self.no_grad
self.episode_length = episode_length
self.device = device
self.visualize = render
self.sim_time = 0.0
self.num_frames = 0 # record the number of frames for rendering
self.num_environments = num_envs
self.num_agents = 1
self.MM_caching_frequency = MM_caching_frequency
# initialize observation and action space
self.num_observations = num_obs
self.num_actions = num_act
self.obs_space = spaces.Box(np.ones(self.num_observations) * -np.Inf, np.ones(self.num_observations) * np.Inf)
self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
# allocate buffers
self.obs_buf = torch.zeros(
(self.num_envs, self.num_observations), device=self.device, dtype=torch.float, requires_grad=False)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float, requires_grad=False)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
# end of the episode
self.termination_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device = self.device, dtype = torch.float, requires_grad = False)
self.extras = {}
def get_number_of_agents(self):
return self.num_agents
@property
def observation_space(self):
return self.obs_space
@property
def action_space(self):
return self.act_space
@property
def num_envs(self):
return self.num_environments
@property
def num_acts(self):
return self.num_actions
@property
def num_obs(self):
return self.num_observations
def get_state(self):
return self.state.joint_q.clone(), self.state.joint_qd.clone()
def reset_with_state(self, init_joint_q, init_joint_qd, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = init_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = init_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf | 3,847 | Python | 33.981818 | 146 | 0.641799 |
NVlabs/DiffRL/envs/hopper.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#from numpy.lib.function_base import angle
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from copy import deepcopy
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class HopperEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = True):
num_obs = 11
num_act = 3
super(HopperEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.termination_height = -0.45
self.termination_angle = np.pi / 6.
self.termination_height_tolerance = 0.15
self.termination_angle_tolerance = 0.05
self.height_rew_scale = 1.0
self.action_strength = 200.0
self.action_penalty = -1e-1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Hopper_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 6
self.num_joint_qd = 6
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rotation = torch.tensor([0.], device = self.device, requires_grad = False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.start_pos = []
self.start_joint_q = [0., 0., 0.]
self.start_joint_target = [0., 0., 0.]
start_height = 0.0
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
link_start = len(self.builder.joint_type)
lu.parse_mjcf(os.path.join(asset_folder, "hopper.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=2.0,
contact_ke=2.e+4,
contact_kd=1.e+3,
contact_kf=1.e+3,
contact_mu=0.9,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=1.0,
radians=True, load_stiffness=True)
self.builder.joint_X_pj[link_start] = df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5))
# base transform
self.start_pos.append([0.0, start_height])
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 6] = [0., 0., 0.]
self.builder.joint_target[i*self.num_joint_q + 3:i*self.num_joint_q + 6] = [0., 0., 0., 0.]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 3:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] + 0.05 * (torch.rand(size=(len(env_ids), 2), device=self.device) - 0.5) * 2.
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = (torch.rand(len(env_ids), device = self.device) - 0.5) * 0.1
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] + 0.05 * (torch.rand(size=(len(env_ids), self.num_joint_q - 3), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.05 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5) * 2.
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
self.obs_buf = torch.cat([self.state.joint_q.view(self.num_envs, -1)[:, 1:], self.state.joint_qd.view(self.num_envs, -1)], dim = -1)
def calculateReward(self):
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_height_tolerance)
height_reward = torch.clip(height_diff, -1.0, 0.3)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
angle_reward = 1. * (-self.obs_buf[:, 1] ** 2 / (self.termination_angle ** 2) + 1.)
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + height_reward + angle_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
if self.early_termination:
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf) | 11,333 | Python | 39.916967 | 227 | 0.599224 |
NVlabs/DiffRL/envs/assets/humanoid.xml | <mujoco model="humanoid">
<statistic extent="2" center="0 0 1"/>
<option timestep="0.00555"/>
<default>
<motor ctrlrange="-1 1" ctrllimited="true"/>
<default class="body">
<geom type="capsule" condim="1" friction="1.0 0.05 0.05" solimp=".9 .99 .003" solref=".015 1" material="self"/>
<joint limited="true" type="hinge" damping="0.1" stiffness="5" armature=".007" solimplimit="0 .99 .01"/>
<site size=".04" group="3"/>
<default class="force-torque">
<site type="box" size=".01 .01 .02" rgba="1 0 0 1" />
</default>
<default class="touch">
<site type="capsule" rgba="0 0 1 .3"/>
</default>
</default>
</default>
<worldbody>
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
<body name="torso" pos="0 0 1.5" childclass="body">
<light name="top" pos="0 0 2" mode="trackcom"/>
<camera name="back" pos="-3 0 1" xyaxes="0 -1 0 1 0 2" mode="trackcom"/>
<camera name="side" pos="0 -3 1" xyaxes="1 0 0 0 1 2" mode="trackcom"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<site name="root" class="force-torque"/>
<geom name="torso" type="capsule" fromto="0 -.07 0 0 .07 0" size=".07"/>
<geom name="upper_waist" type="capsule" fromto="-.01 -.06 -.12 -.01 .06 -.12" size=".06"/>
<site name="torso" class="touch" type="box" pos="0 0 -.05" size=".075 .14 .13"/>
<geom name="head" type="sphere" size=".09" pos="0 0 .19"/>
<body name="lower_waist" pos="-.01 0 -.260" quat="1.000 0 -.002 0">
<geom name="lower_waist" type="capsule" fromto="0 -.06 0 0 .06 0" size=".06"/>
<site name="lower_waist" class="touch" size=".061 .06" zaxis="0 1 0"/>
<joint limited="true" name="abdomen_z" pos="0 0 .065" axis="0 0 1" range="-45 45" damping="5" stiffness="20" armature=".02"/>
<joint limited="true" name="abdomen_y" pos="0 0 .065" axis="0 1 0" range="-75 30" damping="5" stiffness="20" armature=".01"/>
<body name="pelvis" pos="0 0 -.165" quat="1.000 0 -.002 0">
<joint limited="true" name="abdomen_x" pos="0 0 .1" axis="1 0 0" range="-35 35" damping="5" stiffness="10" armature=".01"/>
<geom name="butt" type="capsule" fromto="-.02 -.07 0 -.02 .07 0" size=".09"/>
<site name="butt" class="touch" size=".091 .07" pos="-.02 0 0" zaxis="0 1 0"/>
<body name="right_thigh" pos="0 -.1 -.04">
<site name="right_hip" class="force-torque"/>
<joint limited="true" name="right_hip_x" axis="1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_hip_z" axis="0 0 1" range="-60 35" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/>
<geom name="right_thigh" type="capsule" fromto="0 0 0 0 .01 -.34" size=".06"/>
<site name="right_thigh" class="touch" pos="0 .005 -.17" size=".061 .17" zaxis="0 -1 34"/>
<body name="right_shin" pos="0 .01 -.403">
<site name="right_knee" class="force-torque" pos="0 0 .02"/>
<joint limited="true" name="right_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="right_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="right_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="right_foot" pos="0 0 -.39">
<site name="right_ankle" class="force-torque"/>
<joint limited="true" name="right_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<joint limited="true" name="right_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="right_right_foot" type="capsule" fromto="-.07 -.02 0 .14 -.04 0" size=".027"/>
<geom name="left_right_foot" type="capsule" fromto="-.07 0 0 .14 .02 0" size=".027"/>
<site name="right_right_foot" class="touch" pos=".035 -.03 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_right_foot" class="touch" pos=".035 .01 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
<body name="left_thigh" pos="0 .1 -.04">
<site name="left_hip" class="force-torque"/>
<joint limited="true" name="left_hip_x" axis="-1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_hip_z" axis="0 0 -1" range="-60 35" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/>
<geom name="left_thigh" type="capsule" fromto="0 0 0 0 -.01 -.34" size=".06"/>
<site name="left_thigh" class="touch" pos="0 -.005 -.17" size=".061 .17" zaxis="0 1 34"/>
<body name="left_shin" pos="0 -.01 -.403">
<site name="left_knee" class="force-torque" pos="0 0 .02"/>
<joint limited="true" name="left_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="left_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="left_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="left_foot" pos="0 0 -.39">
<site name="left_ankle" class="force-torque"/>
<joint limited="true" name="left_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<joint limited="true" name="left_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="left_left_foot" type="capsule" fromto="-.07 .02 0 .14 .04 0" size=".027"/>
<geom name="right_left_foot" type="capsule" fromto="-.07 0 0 .14 -.02 0" size=".027"/>
<site name="right_left_foot" class="touch" pos=".035 -.01 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_left_foot" class="touch" pos=".035 .03 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
</body>
</body>
<body name="right_upper_arm" pos="0 -.17 .06">
<joint limited="true" name="right_shoulder1" axis="2 1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_shoulder2" axis="0 -1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<geom name="right_upper_arm" type="capsule" fromto="0 0 0 .16 -.16 -.16" size=".04 .16"/>
<site name="right_upper_arm" class="touch" pos=".08 -.08 -.08" size=".041 .14" zaxis="1 -1 -1"/>
<body name="right_lower_arm" pos=".18 -.18 -.18">
<joint limited="true" name="right_elbow" axis="0 -1 1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="right_lower_arm" type="capsule" fromto=".01 .01 .01 .17 .17 .17" size=".031"/>
<site name="right_lower_arm" class="touch" pos=".09 .09 .09" size=".032 .14" zaxis="1 1 1"/>
<geom name="right_hand" type="sphere" size=".04" pos=".18 .18 .18"/>
</body>
</body>
<body name="left_upper_arm" pos="0 .17 .06">
<joint limited="true" name="left_shoulder1" axis="-2 1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_shoulder2" axis="0 -1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<geom name="left_upper_arm" type="capsule" fromto="0 0 0 .16 .16 -.16" size=".04 .16"/>
<site name="left_upper_arm" class="touch" pos=".08 .08 -.08" size=".041 .14" zaxis="1 1 -1"/>
<body name="left_lower_arm" pos=".18 .18 -.18">
<joint limited="true" name="left_elbow" axis="0 -1 -1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="left_lower_arm" type="capsule" fromto=".01 -.01 .01 .17 -.17 .17" size=".031"/>
<site name="left_lower_arm" class="touch" pos=".09 -.09 .09" size=".032 .14" zaxis="1 -1 1"/>
<geom name="left_hand" type="sphere" size=".04" pos=".18 -.18 .18"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor name='abdomen_y' gear='67.5' joint='abdomen_y'/>
<motor name='abdomen_z' gear='67.5' joint='abdomen_z'/>
<motor name='abdomen_x' gear='67.5' joint='abdomen_x'/>
<motor name='right_hip_x' gear='45.0' joint='right_hip_x'/>
<motor name='right_hip_z' gear='45.0' joint='right_hip_z'/>
<motor name='right_hip_y' gear='135.0' joint='right_hip_y'/>
<motor name='right_knee' gear='90.0' joint='right_knee'/>
<motor name='right_ankle_x' gear='22.5' joint='right_ankle_x'/>
<motor name='right_ankle_y' gear='22.5' joint='right_ankle_y'/>
<motor name='left_hip_x' gear='45.0' joint='left_hip_x'/>
<motor name='left_hip_z' gear='45.0' joint='left_hip_z'/>
<motor name='left_hip_y' gear='135.0' joint='left_hip_y'/>
<motor name='left_knee' gear='90.0' joint='left_knee'/>
<motor name='left_ankle_x' gear='22.5' joint='left_ankle_x'/>
<motor name='left_ankle_y' gear='22.5' joint='left_ankle_y'/>
<motor name='right_shoulder1' gear='67.5' joint='right_shoulder1'/>
<motor name='right_shoulder2' gear='67.5' joint='right_shoulder2'/>
<motor name='right_elbow' gear='45.0' joint='right_elbow'/>
<motor name='left_shoulder1' gear='67.5' joint='left_shoulder1'/>
<motor name='left_shoulder2' gear='67.5' joint='left_shoulder2'/>
<motor name='left_elbow' gear='45.0' joint='left_elbow'/>
</actuator>
<sensor>
<subtreelinvel name="torso_subtreelinvel" body="torso"/>
<accelerometer name="torso_accel" site="root"/>
<velocimeter name="torso_vel" site="root"/>
<gyro name="torso_gyro" site="root"/>
<force name="left_ankle_force" site="left_ankle"/>
<force name="right_ankle_force" site="right_ankle"/>
<force name="left_knee_force" site="left_knee"/>
<force name="right_knee_force" site="right_knee"/>
<force name="left_hip_force" site="left_hip"/>
<force name="right_hip_force" site="right_hip"/>
<torque name="left_ankle_torque" site="left_ankle"/>
<torque name="right_ankle_torque" site="right_ankle"/>
<torque name="left_knee_torque" site="left_knee"/>
<torque name="right_knee_torque" site="right_knee"/>
<torque name="left_hip_torque" site="left_hip"/>
<torque name="right_hip_torque" site="right_hip"/>
<touch name="torso_touch" site="torso"/>
<touch name="head_touch" site="head"/>
<touch name="lower_waist_touch" site="lower_waist"/>
<touch name="butt_touch" site="butt"/>
<touch name="right_thigh_touch" site="right_thigh"/>
<touch name="right_shin_touch" site="right_shin"/>
<touch name="right_right_foot_touch" site="right_right_foot"/>
<touch name="left_right_foot_touch" site="left_right_foot"/>
<touch name="left_thigh_touch" site="left_thigh"/>
<touch name="left_shin_touch" site="left_shin"/>
<touch name="right_left_foot_touch" site="right_left_foot"/>
<touch name="left_left_foot_touch" site="left_left_foot"/>
<touch name="right_upper_arm_touch" site="right_upper_arm"/>
<touch name="right_lower_arm_touch" site="right_lower_arm"/>
<touch name="right_hand_touch" site="right_hand"/>
<touch name="left_upper_arm_touch" site="left_upper_arm"/>
<touch name="left_lower_arm_touch" site="left_lower_arm"/>
<touch name="left_hand_touch" site="left_hand"/>
</sensor>
</mujoco>
| 12,020 | XML | 64.331521 | 147 | 0.562396 |
NVlabs/DiffRL/envs/assets/hopper.xml | <mujoco model="hopper">
<compiler angle="radian" />
<option integrator="RK4" />
<size njmax="500" nconmax="100" />
<visual>
<map znear="0.02" />
</visual>
<default class="main">
<joint limited="true" armature="1" damping="1" />
<geom condim="1" solimp="0.8 0.8 0.01 0.5 2" margin="0.001" material="geom" rgba="0.8 0.6 0.4 1" />
<general ctrllimited="true" ctrlrange="-0.4 0.4" />
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="0.4 0.5 0.6" rgb2="0 0 0" width="100" height="600" />
<texture type="cube" name="texgeom" builtin="flat" mark="cross" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" width="127" height="762" />
<texture type="2d" name="texplane" builtin="checker" rgb1="0 0 0" rgb2="0.8 0.8 0.8" width="100" height="100" />
<material name="MatPlane" texture="texplane" texrepeat="60 60" specular="1" shininess="1" reflectance="0.5" />
<material name="geom" texture="texgeom" texuniform="true" />
</asset>
<worldbody>
<geom name="floor" size="20 20 0.125" type="plane" condim="3" material="MatPlane" rgba="0.8 0.9 0.8 1" />
<light pos="0 0 1.3" dir="0 0 -1" directional="true" cutoff="100" exponent="1" diffuse="1 1 1" specular="0.1 0.1 0.1" />
<body name="torso" pos="0 0 1.25">
<joint name="rootx" pos="0 0 -1.25" axis="1 0 0" type="slide" limited="false" armature="0" damping="0" />
<joint name="rootz" pos="0 0 0" axis="0 0 1" type="slide" ref="1.25" limited="false" armature="0" damping="0" />
<joint name="rooty" pos="0 0 0" axis="0 1 0" limited="false" type="hinge" armature="0" damping="0" />
<geom name="torso_geom" size="0.05 0.2" type="capsule" friction="0.9 0.005 0.0001" />
<body name="thigh" pos="0 0 -0.2">
<joint name="thigh_joint" pos="0 0 0" type="hinge" axis="0 -1 0" range="-2.61799 0" />
<geom name="thigh_geom" size="0.05 0.225" pos="0 0 -0.225" type="capsule" friction="0.9 0.005 0.0001" />
<body name="leg" pos="0 0 -0.7">
<joint name="leg_joint" pos="0 0 0.25" type="hinge" axis="0 -1 0" range="-2.61799 0" />
<geom name="leg_geom" size="0.04 0.25" type="capsule" friction="0.9 0.005 0.0001" />
<body name="foot" pos="0.0 0 -0.25">
<joint name="foot_joint" pos="0 0 0.0" type="hinge" axis="0 -1 0" range="-0.785398 0.785398" />
<geom name="foot_geom" size="0.06 0.195" pos="0.06 0 0.0" quat="0.707107 0 -0.707107 0" type="capsule" friction="2 0.005 0.0001" />
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<general joint="thigh_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
<general joint="leg_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
<general joint="foot_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
</actuator>
</mujoco>
| 3,049 | XML | 62.541665 | 155 | 0.544441 |
NVlabs/DiffRL/envs/assets/half_cheetah.xml | <!-- Cheetah Model
The state space is populated with joints in the order that they are
defined in this file. The actuators also operate on joints.
State-Space (name/joint/parameter):
- rootx slider position (m)
- rootz slider position (m)
- rooty hinge angle (rad)
- bthigh hinge angle (rad)
- bshin hinge angle (rad)
- bfoot hinge angle (rad)
- fthigh hinge angle (rad)
- fshin hinge angle (rad)
- ffoot hinge angle (rad)
- rootx slider velocity (m/s)
- rootz slider velocity (m/s)
- rooty hinge angular velocity (rad/s)
- bthigh hinge angular velocity (rad/s)
- bshin hinge angular velocity (rad/s)
- bfoot hinge angular velocity (rad/s)
- fthigh hinge angular velocity (rad/s)
- fshin hinge angular velocity (rad/s)
- ffoot hinge angular velocity (rad/s)
Actuators (name/actuator/parameter):
- bthigh hinge torque (N m)
- bshin hinge torque (N m)
- bfoot hinge torque (N m)
- fthigh hinge torque (N m)
- fshin hinge torque (N m)
- ffoot hinge torque (N m)
-->
<mujoco model="cheetah">
<compiler angle="radian" coordinate="local" inertiafromgeom="true" settotalmass="14"/>
<default>
<joint armature=".1" damping=".01" limited="true" solimplimit="0 .8 .03" solreflimit=".02 1" stiffness="8"/>
<geom conaffinity="0" condim="3" contype="1" friction="0.8 .1 .1" rgba="0.8 0.6 .4 1" solimp="0.0 0.8 0.01" solref="0.02 1"/>
<motor ctrllimited="true" ctrlrange="-1 1"/>
</default>
<size nstack="300000" nuser_geom="1"/>
<option gravity="0 0 -9.81" timestep="0.01"/>
<worldbody>
<body name="torso" pos="0 0 0">
<joint armature="0" axis="1 0 0" damping="0" limited="false" name="ignorex" pos="0 0 0" stiffness="0" type="slide"/>
<joint armature="0" axis="0 0 1" damping="0" limited="false" name="ignorez" pos="0 0 0" stiffness="0" type="slide"/>
<joint armature="0" axis="0 1 0" damping="0" limited="false" name="ignorey" pos="0 0 0" stiffness="0" type="hinge"/>
<geom fromto="-.5 0 0 .5 0 0" name="torso" size="0.046" type="capsule"/>
<geom axisangle="0 1 0 .87" name="head" pos=".6 0 .1" size="0.046 .15" type="capsule"/>
<!-- <site name='tip' pos='.15 0 .11'/>-->
<body name="bthigh" pos="-.5 0 0">
<joint axis="0 1 0" damping="6" name="bthigh" pos="0 0 0" range="-.52 1.05" stiffness="240" type="hinge"/>
<geom axisangle="0 1 0 -3.8" name="bthigh" pos=".1 0 -.13" size="0.046 .145" type="capsule"/>
<body name="bshin" pos=".16 0 -.25">
<joint axis="0 1 0" damping="4.5" name="bshin" pos="0 0 0" range="-.785 .785" stiffness="180" type="hinge"/>
<geom axisangle="0 1 0 -2.03" name="bshin" pos="-.14 0 -.07" rgba="0.9 0.6 0.6 1" size="0.046 .15" type="capsule"/>
<body name="bfoot" pos="-.28 0 -.14">
<joint axis="0 1 0" damping="3" name="bfoot" pos="0 0 0" range="-.4 .785" stiffness="120" type="hinge"/>
<geom axisangle="0 1 0 -.27" name="bfoot" pos=".03 0 -.097" rgba="0.9 0.6 0.6 1" size="0.046 .094" type="capsule"/>
<inertial mass="10"/>
</body>
</body>
</body>
<body name="fthigh" pos=".5 0 0">
<joint axis="0 1 0" damping="4.5" name="fthigh" pos="0 0 0" range="-1.5 0.8" stiffness="180" type="hinge"/>
<geom axisangle="0 1 0 .52" name="fthigh" pos="-.07 0 -.12" size="0.046 .133" type="capsule"/>
<body name="fshin" pos="-.14 0 -.24">
<joint axis="0 1 0" damping="3" name="fshin" pos="0 0 0" range="-1.2 1.1" stiffness="120" type="hinge"/>
<geom axisangle="0 1 0 -.6" name="fshin" pos=".065 0 -.09" rgba="0.9 0.6 0.6 1" size="0.046 .106" type="capsule"/>
<body name="ffoot" pos=".13 0 -.18">
<joint axis="0 1 0" damping="1.5" name="ffoot" pos="0 0 0" range="-3.1 -0.3" stiffness="60" type="hinge"/>
<geom axisangle="0 1 0 -.6" name="ffoot" pos=".045 0 -.07" rgba="0.9 0.6 0.6 1" size="0.046 .07" type="capsule"/>
<inertial mass="10"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor gear="120" joint="bthigh" name="bthigh"/>
<motor gear="90" joint="bshin" name="bshin"/>
<motor gear="60" joint="bfoot" name="bfoot"/>
<motor gear="120" joint="fthigh" name="fthigh"/>
<motor gear="60" joint="fshin" name="fshin"/>
<motor gear="30" joint="ffoot" name="ffoot"/>
</actuator>
</mujoco>
| 4,788 | XML | 52.808988 | 129 | 0.540518 |
NVlabs/DiffRL/envs/assets/ant.xml | <mujoco model="ant">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.001" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<worldbody>
<body name="torso" pos="0 0 0.75">
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="150"/>
</actuator>
</mujoco> | 4,043 | XML | 61.215384 | 125 | 0.550829 |
NVlabs/DiffRL/envs/assets/snu/human.xml | <Skeleton name="Human">
<Node name="Pelvis" parent="None" >
<Body type="Box" mass="15.0" size="0.2083 0.1454 0.1294" contact="Off" color="0.6 0.6 1.5 1.0" obj="Pelvis.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Body>
<Joint type="Free" bvh="Character1_Hips">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Joint>
</Node>
<Node name="FemurR" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 -0.0175 -0.9997 -0.0172 -0.21 0.0172 -0.9998 " translation="-0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaR" parent="FemurR" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 0.0349 -0.9956 0.0871 0.0 -0.0872 -0.9962 " translation="-0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_RightLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusR" parent="TibiaR" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.3 0.3 1.5 1.0" obj="R_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 0.0199 -0.9994 0.0295 0.2079 -0.0247 -0.9778 " translation="-0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.08 0.0776 -0.0419"/>
</Joint>
</Node>
<Node name="FootThumbR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootThumb.obj">
<Transformation linear="0.9847 -0.0097 0.1739 -0.0129 -0.9998 0.0177 0.1737 -0.0196 -0.9846 " translation="-0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0781 0.0201 0.0692"/>
</Joint>
</Node>
<Node name="FootPinkyR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootPinky.obj">
<Transformation linear="0.9402 0.0126 0.3405 0.0083 -0.9999 0.0142 0.3407 -0.0105 -0.9401 " translation="-0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1227 0.0142 0.0494"/>
</Joint>
</Node>
<Node name="FemurL" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 0.0175 0.9997 0.0172 0.21 -0.0172 0.9998 " translation="0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaL" parent="FemurL" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 -0.0349 0.9956 -0.0871 -0.0 0.0872 0.9962 " translation="0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_LeftLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusL" parent="TibiaL" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.6 0.6 1.5 1.0" obj="L_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 -0.0199 0.9994 -0.0295 -0.2079 0.0247 0.9778 " translation="0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.08 0.0776 -0.0419 "/>
</Joint>
</Node>
<Node name="FootThumbL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootThumb.obj">
<Transformation linear="0.9402 0.0126 0.3405 -0.0083 0.9999 -0.0142 -0.3407 0.0105 0.9401 " translation="0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1215 0.0116 0.0494 "/>
</Joint>
</Node>
<Node name="FootPinkyL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootPinky.obj">
<Transformation linear="0.9847 -0.0097 0.1739 0.0129 0.9998 -0.0177 -0.1737 0.0196 0.9846 " translation="0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0756 0.0118 0.0676 "/>
</Joint>
</Node>
<Node name="Spine" parent="Pelvis" >
<Body type="Box" mass="5.0" size="0.1170 0.0976 0.0984" contact="Off" color="0.6 0.6 1.5 1.0" obj="Spine.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 " translation="0.0 1.1204 -0.0401 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.0675 -0.0434 "/>
</Joint>
</Node>
<Node name="Torso" parent="Spine" >
<Body type="Box" mass="10.0" size="0.1798 0.2181 0.1337" contact="Off" color="0.6 0.6 1.5 1.0" obj="Torso.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 -0.0092 0.0 0.0092 1.0 " translation="0.0 1.3032 -0.0398 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine1" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.1761 -0.0498 "/>
</Joint>
</Node>
<Node name="Neck" parent="Torso" >
<Body type="Box" mass="2.0" size="0.0793 0.0728 0.0652" contact="Off" color="0.6 0.6 1.5 1.0" obj="Neck.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9732 -0.2301 0.0 0.2301 0.9732 " translation="0.0 1.5297 -0.0250 "/>
</Body>
<Joint type="Ball" bvh="Character1_Neck" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.4844 -0.0436 "/>
</Joint>
</Node>
<Node name="Head" parent="Neck" endeffector="True">
<Body type="Box" mass="2.0" size="0.1129 0.1144 0.1166" contact="Off" color="0.6 0.6 1.5 1.0" obj="Skull.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9895 -0.1447 0.0 0.1447 0.9895 " translation="0.0 1.6527 -0.0123 "/>
</Body>
<Joint type="Ball" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.5652 -0.0086 "/>
</Joint>
</Node>
<Node name="ShoulderR" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 -0.0047 -1.0 -0.0011 0.0549 0.0008 -0.9985 " translation="-0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0147 1.4535 -0.0381 "/>
</Joint>
</Node>
<Node name="ArmR" parent="ShoulderR" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 -0.0669 -0.2971 -0.952500 -0.0585 0.9542 -0.2936 " translation="-0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1995 1.4350 -0.0353 "/>
</Joint>
</Node>
<Node name="ForeArmR" parent="ArmR" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 -0.0517 -0.3492 -0.9356 -0.1069 0.9334 -0.3424 " translation="-0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_RightForeArm" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.5234 1.4607 -0.0105 "/>
</Joint>
</Node>
<Node name="HandR" parent="ForeArmR" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 0.2243 -0.9540 -0.1990 -0.0806 0.1853 -0.9794 " translation="-0.8810 1.4647 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.8102 1.469 0.0194 "/>
</Joint>
</Node>
<Node name="ShoulderL" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 0.0047 1.0000 0.0011 -0.0549 -0.0008 0.9985 " translation="0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0147 1.4535 -0.0381"/>
</Joint>
</Node>
<Node name="ArmL" parent="ShoulderL" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 0.0669 0.2971 0.9525 0.0585 -0.9542 0.2936 " translation="0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1995 1.4350 -0.0353"/>
</Joint>
</Node>
<Node name="ForeArmL" parent="ArmL" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 0.0517 0.3492 0.9356 0.1069 -0.9334 0.3424 " translation="0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_LeftForeArm" lower="-2.3" upper="0.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.5234 1.4607 -0.0105"/>
</Joint>
</Node>
<Node name="HandL" parent="ForeArmL" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 -0.2243 0.9540 0.1990 0.0806 -0.1853 0.9794 " translation="0.8813 1.4640 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.8102 1.4694 0.0194"/>
</Joint>
</Node>
</Skeleton> | 12,775 | XML | 65.541666 | 148 | 0.570176 |
NVlabs/DiffRL/envs/assets/snu/arm.xml | <Skeleton name="Human">
<Node name="Pelvis" parent="None" >
<Body type="Box" mass="15.0" size="0.2083 0.1454 0.1294" contact="Off" color="0.6 0.6 1.5 1.0" obj="Pelvis.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Body>
<Joint type="Free" bvh="Character1_Hips">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Joint>
</Node>
<Node name="FemurR" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 -0.0175 -0.9997 -0.0172 -0.21 0.0172 -0.9998 " translation="-0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaR" parent="FemurR" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 0.0349 -0.9956 0.0871 0.0 -0.0872 -0.9962 " translation="-0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_RightLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusR" parent="TibiaR" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.3 0.3 1.5 1.0" obj="R_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 0.0199 -0.9994 0.0295 0.2079 -0.0247 -0.9778 " translation="-0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.08 0.0776 -0.0419"/>
</Joint>
</Node>
<Node name="FootThumbR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootThumb.obj">
<Transformation linear="0.9847 -0.0097 0.1739 -0.0129 -0.9998 0.0177 0.1737 -0.0196 -0.9846 " translation="-0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0781 0.0201 0.0692"/>
</Joint>
</Node>
<Node name="FootPinkyR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootPinky.obj">
<Transformation linear="0.9402 0.0126 0.3405 0.0083 -0.9999 0.0142 0.3407 -0.0105 -0.9401 " translation="-0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1227 0.0142 0.0494"/>
</Joint>
</Node>
<Node name="FemurL" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 0.0175 0.9997 0.0172 0.21 -0.0172 0.9998 " translation="0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaL" parent="FemurL" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 -0.0349 0.9956 -0.0871 -0.0 0.0872 0.9962 " translation="0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_LeftLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusL" parent="TibiaL" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.6 0.6 1.5 1.0" obj="L_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 -0.0199 0.9994 -0.0295 -0.2079 0.0247 0.9778 " translation="0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.08 0.0776 -0.0419 "/>
</Joint>
</Node>
<Node name="FootThumbL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootThumb.obj">
<Transformation linear="0.9402 0.0126 0.3405 -0.0083 0.9999 -0.0142 -0.3407 0.0105 0.9401 " translation="0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1215 0.0116 0.0494 "/>
</Joint>
</Node>
<Node name="FootPinkyL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootPinky.obj">
<Transformation linear="0.9847 -0.0097 0.1739 0.0129 0.9998 -0.0177 -0.1737 0.0196 0.9846 " translation="0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0756 0.0118 0.0676 "/>
</Joint>
</Node>
<Node name="Spine" parent="Pelvis" >
<Body type="Box" mass="5.0" size="0.1170 0.0976 0.0984" contact="Off" color="0.6 0.6 1.5 1.0" obj="Spine.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 " translation="0.0 1.1204 -0.0401 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.0675 -0.0434 "/>
</Joint>
</Node>
<Node name="Torso" parent="Spine" >
<Body type="Box" mass="10.0" size="0.1798 0.2181 0.1337" contact="Off" color="0.6 0.6 1.5 1.0" obj="Torso.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 -0.0092 0.0 0.0092 1.0 " translation="0.0 1.3032 -0.0398 "/>
</Body>
<Joint type="Fixed" bvh="Character1_Spine1" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.1761 -0.0498 "/>
</Joint>
</Node>
<Node name="Neck" parent="Torso" >
<Body type="Box" mass="2.0" size="0.0793 0.0728 0.0652" contact="Off" color="0.6 0.6 1.5 1.0" obj="Neck.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9732 -0.2301 0.0 0.2301 0.9732 " translation="0.0 1.5297 -0.0250 "/>
</Body>
<Joint type="Ball" bvh="Character1_Neck" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.4844 -0.0436 "/>
</Joint>
</Node>
<Node name="Head" parent="Neck" endeffector="True">
<Body type="Box" mass="2.0" size="0.1129 0.1144 0.1166" contact="Off" color="0.6 0.6 1.5 1.0" obj="Skull.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9895 -0.1447 0.0 0.1447 0.9895 " translation="0.0 1.6527 -0.0123 "/>
</Body>
<Joint type="Ball" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.5652 -0.0086 "/>
</Joint>
</Node>
<Node name="ShoulderR" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 -0.0047 -1.0 -0.0011 0.0549 0.0008 -0.9985 " translation="-0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0147 1.4535 -0.0381 "/>
</Joint>
</Node>
<Node name="ArmR" parent="ShoulderR" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 -0.0669 -0.2971 -0.952500 -0.0585 0.9542 -0.2936 " translation="-0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1995 1.4350 -0.0353 "/>
</Joint>
</Node>
<Node name="ForeArmR" parent="ArmR" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 -0.0517 -0.3492 -0.9356 -0.1069 0.9334 -0.3424 " translation="-0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_RightForeArm" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.5234 1.4607 -0.0105 "/>
</Joint>
</Node>
<Node name="HandR" parent="ForeArmR" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 0.2243 -0.9540 -0.1990 -0.0806 0.1853 -0.9794 " translation="-0.8810 1.4647 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.8102 1.469 0.0194 "/>
</Joint>
</Node>
<Node name="ShoulderL" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 0.0047 1.0000 0.0011 -0.0549 -0.0008 0.9985 " translation="0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0147 1.4535 -0.0381"/>
</Joint>
</Node>
<Node name="ArmL" parent="ShoulderL" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 0.0669 0.2971 0.9525 0.0585 -0.9542 0.2936 " translation="0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1995 1.4350 -0.0353"/>
</Joint>
</Node>
<Node name="ForeArmL" parent="ArmL" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 0.0517 0.3492 0.9356 0.1069 -0.9334 0.3424 " translation="0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_LeftForeArm" lower="-2.3" upper="0.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.5234 1.4607 -0.0105"/>
</Joint>
</Node>
<Node name="HandL" parent="ForeArmL" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 -0.2243 0.9540 0.1990 0.0806 -0.1853 0.9794 " translation="0.8813 1.4640 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.8102 1.4694 0.0194"/>
</Joint>
</Node>
</Skeleton> | 12,782 | XML | 63.560606 | 148 | 0.569942 |
NVlabs/DiffRL/envs/assets/snu/muscle284.xml | <Muscle>
<Unit name="L_Abductor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.629400 1.471000 -0.014000 " />
<Waypoint body="ForeArmL" p="0.732300 1.488400 0.018000 " />
<Waypoint body="ForeArmL" p="0.786300 1.491600 0.024800 " />
<Waypoint body="HandL" p="0.822700 1.472900 0.061900 " />
</Unit>
<Unit name="R_Abductor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.629400 1.471000 -0.014000 " />
<Waypoint body="ForeArmR" p="-0.732300 1.488400 0.018000 " />
<Waypoint body="ForeArmR" p="-0.786300 1.491600 0.024800 " />
<Waypoint body="HandR" p="-0.822700 1.472900 0.061900 " />
</Unit>
<Unit name="L_Adductor_Brevis" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.031900 0.919600 0.041600 " />
<Waypoint body="FemurL" p="0.083100 0.833800 0.004900 " />
<Waypoint body="FemurL" p="0.110400 0.826200 -0.008400 " />
</Unit>
<Unit name="R_Adductor_Brevis" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.031900 0.919600 0.041600 " />
<Waypoint body="FemurR" p="-0.083100 0.833800 0.004900 " />
<Waypoint body="FemurR" p="-0.110400 0.826200 -0.008400 " />
</Unit>
<Unit name="L_Adductor_Brevis1" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.014100 0.911600 0.042700 " />
<Waypoint body="FemurL" p="0.076700 0.756500 -0.000700 " />
<Waypoint body="FemurL" p="0.104000 0.730500 0.002500 " />
</Unit>
<Unit name="R_Adductor_Brevis1" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.014100 0.911600 0.042700 " />
<Waypoint body="FemurR" p="-0.076700 0.756500 -0.000700 " />
<Waypoint body="FemurR" p="-0.104000 0.730500 0.002500 " />
</Unit>
<Unit name="L_Adductor_Longus" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.030200 0.921600 0.042700 " />
<Waypoint body="FemurL" p="0.100300 0.738600 0.002700 " />
<Waypoint body="FemurL" p="0.109600 0.701000 0.001400 " />
</Unit>
<Unit name="R_Adductor_Longus" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.030200 0.921600 0.042700 " />
<Waypoint body="FemurR" p="-0.100300 0.738600 0.002700 " />
<Waypoint body="FemurR" p="-0.109600 0.701000 0.001400 " />
</Unit>
<Unit name="L_Adductor_Longus1" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.014000 0.914800 0.048900 " />
<Waypoint body="FemurL" p="0.050500 0.729800 0.005100 " />
<Waypoint body="FemurL" p="0.099100 0.634300 0.001400 " />
</Unit>
<Unit name="R_Adductor_Longus1" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.014000 0.914800 0.048900 " />
<Waypoint body="FemurR" p="-0.050500 0.729800 0.005100 " />
<Waypoint body="FemurR" p="-0.099100 0.634300 0.001400 " />
</Unit>
<Unit name="L_Adductor_Magnus" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.022300 0.891300 0.013400 " />
<Waypoint body="FemurL" p="0.106400 0.837500 -0.017200 " />
<Waypoint body="FemurL" p="0.133800 0.833900 -0.017600 " />
</Unit>
<Unit name="R_Adductor_Magnus" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.022300 0.891300 0.013400 " />
<Waypoint body="FemurR" p="-0.106400 0.837500 -0.017200 " />
<Waypoint body="FemurR" p="-0.133800 0.833900 -0.017600 " />
</Unit>
<Unit name="L_Adductor_Magnus1" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.023500 0.881300 0.013000 " />
<Waypoint body="FemurL" p="0.097700 0.800600 -0.023300 " />
<Waypoint body="FemurL" p="0.124400 0.759600 -0.002000 " />
</Unit>
<Unit name="R_Adductor_Magnus1" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.023500 0.881300 0.013000 " />
<Waypoint body="FemurR" p="-0.097700 0.800600 -0.023300 " />
<Waypoint body="FemurR" p="-0.124400 0.759600 -0.002000 " />
</Unit>
<Unit name="L_Adductor_Magnus2" f0="259.380000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.035600 0.870400 -0.025800 " />
<Waypoint body="FemurL" p="0.069900 0.809100 -0.024200 " />
<Waypoint body="FemurL" p="0.102600 0.745100 -0.024800 " />
<Waypoint body="FemurL" p="0.116600 0.719600 0.001200 " />
</Unit>
<Unit name="R_Adductor_Magnus2" f0="259.380000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.035600 0.870400 -0.025800 " />
<Waypoint body="FemurR" p="-0.069900 0.809100 -0.024200 " />
<Waypoint body="FemurR" p="-0.102600 0.745100 -0.024800 " />
<Waypoint body="FemurR" p="-0.116600 0.719600 0.001200 " />
</Unit>
<Unit name="L_Adductor_Magnus3" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.047500 0.869700 -0.043600 " />
<Waypoint body="FemurL" p="0.074400 0.781900 -0.034000 " />
<Waypoint body="FemurL" p="0.102400 0.704000 -0.022500 " />
<Waypoint body="FemurL" p="0.105400 0.641800 -0.002200 " />
</Unit>
<Unit name="R_Adductor_Magnus3" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.047500 0.869700 -0.043600 " />
<Waypoint body="FemurR" p="-0.074400 0.781900 -0.034000 " />
<Waypoint body="FemurR" p="-0.102400 0.704000 -0.022500 " />
<Waypoint body="FemurR" p="-0.105400 0.641800 -0.002200 " />
</Unit>
<Unit name="L_Adductor_Magnus4" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068700 0.877200 -0.056000 " />
<Waypoint body="Pelvis" p="0.063000 0.844300 -0.048200 " />
<Waypoint body="FemurL" p="0.063700 0.641200 -0.031400 " />
<Waypoint body="FemurL" p="0.065300 0.555500 -0.028900 " />
</Unit>
<Unit name="R_Adductor_Magnus4" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068700 0.877200 -0.056000 " />
<Waypoint body="Pelvis" p="-0.063000 0.844300 -0.048200 " />
<Waypoint body="FemurR" p="-0.063700 0.641200 -0.031400 " />
<Waypoint body="FemurR" p="-0.065300 0.555500 -0.028900 " />
</Unit>
<Unit name="L_Anconeous" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.506400 1.482400 -0.009500 " />
<Waypoint body="ForeArmL" p="0.537100 1.479700 -0.026300 " />
<Waypoint body="ForeArmL" p="0.571200 1.468800 -0.029500 " />
</Unit>
<Unit name="R_Anconeous" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.506400 1.482400 -0.009500 " />
<Waypoint body="ForeArmR" p="-0.537100 1.479700 -0.026300 " />
<Waypoint body="ForeArmR" p="-0.571200 1.468800 -0.029500 " />
</Unit>
<Unit name="L_Bicep_Brachii_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.169300 1.443700 -0.036900 " />
<Waypoint body="ArmL" p="0.177900 1.421700 -0.033000 " />
<Waypoint body="ArmL" p="0.181000 1.432000 -0.018300 " />
<Waypoint body="ArmL" p="0.191100 1.434300 -0.008400 " />
<Waypoint body="ArmL" p="0.214500 1.434800 -0.007100 " />
<Waypoint body="ArmL" p="0.259100 1.434100 -0.002400 " />
<Waypoint body="ForeArmL" p="0.529000 1.448300 0.025000 " />
<Waypoint body="ForeArmL" p="0.583200 1.462500 0.001900 " />
</Unit>
<Unit name="R_Bicep_Brachii_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.169300 1.443700 -0.036900 " />
<Waypoint body="ArmR" p="-0.177900 1.421700 -0.033000 " />
<Waypoint body="ArmR" p="-0.181000 1.432000 -0.018300 " />
<Waypoint body="ArmR" p="-0.191100 1.434300 -0.008400 " />
<Waypoint body="ArmR" p="-0.214500 1.434800 -0.007100 " />
<Waypoint body="ArmR" p="-0.259100 1.434100 -0.002400 " />
<Waypoint body="ForeArmR" p="-0.529000 1.448300 0.025000 " />
<Waypoint body="ForeArmR" p="-0.583200 1.462500 0.001900 " />
</Unit>
<Unit name="L_Bicep_Brachii_Short_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.168400 1.434700 -0.007400 " />
<Waypoint body="ArmL" p="0.252000 1.411300 -0.007700 " />
<Waypoint body="ArmL" p="0.489000 1.425300 0.023400 " />
<Waypoint body="ForeArmL" p="0.585400 1.461400 -0.001300 " />
</Unit>
<Unit name="R_Bicep_Brachii_Short_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.168400 1.434700 -0.007400 " />
<Waypoint body="ArmR" p="-0.252000 1.411300 -0.007700 " />
<Waypoint body="ArmR" p="-0.489000 1.425300 0.023400 " />
<Waypoint body="ForeArmR" p="-0.585400 1.461400 -0.001300 " />
</Unit>
<Unit name="L_Bicep_Femoris_Longus" f0="705.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.070900 0.900200 -0.063600 " />
<Waypoint body="FemurL" p="0.096500 0.854800 -0.046300 " />
<Waypoint body="FemurL" p="0.139900 0.574300 -0.029200 " />
<Waypoint body="FemurL" p="0.144100 0.541600 -0.032800 " />
<Waypoint body="TibiaL" p="0.138200 0.488800 -0.038800 " />
</Unit>
<Unit name="R_Bicep_Femoris_Longus" f0="705.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.070900 0.900200 -0.063600 " />
<Waypoint body="FemurR" p="-0.096500 0.854800 -0.046300 " />
<Waypoint body="FemurR" p="-0.139900 0.574300 -0.029200 " />
<Waypoint body="FemurR" p="-0.144100 0.541600 -0.032800 " />
<Waypoint body="TibiaR" p="-0.138200 0.488800 -0.038800 " />
</Unit>
<Unit name="L_Bicep_Femoris_Short" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118200 0.729800 0.000200 " />
<Waypoint body="FemurL" p="0.143500 0.545000 -0.029700 " />
<Waypoint body="TibiaL" p="0.139800 0.489100 -0.034100 " />
</Unit>
<Unit name="R_Bicep_Femoris_Short" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118200 0.729800 0.000200 " />
<Waypoint body="FemurR" p="-0.143500 0.545000 -0.029700 " />
<Waypoint body="TibiaR" p="-0.139800 0.489100 -0.034100 " />
</Unit>
<Unit name="L_Bicep_Femoris_Short1" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.111800 0.618400 0.001900 " />
<Waypoint body="FemurL" p="0.141600 0.532000 -0.019900 " />
<Waypoint body="TibiaL" p="0.137900 0.488500 -0.030700 " />
</Unit>
<Unit name="R_Bicep_Femoris_Short1" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.111800 0.618400 0.001900 " />
<Waypoint body="FemurR" p="-0.141600 0.532000 -0.019900 " />
<Waypoint body="TibiaR" p="-0.137900 0.488500 -0.030700 " />
</Unit>
<Unit name="L_Brachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.332100 1.460400 -0.019000 " />
<Waypoint body="ArmL" p="0.350000 1.471800 -0.008100 " />
<Waypoint body="ArmL" p="0.496300 1.460600 0.017500 " />
<Waypoint body="ForeArmL" p="0.557200 1.461900 -0.011000 " />
</Unit>
<Unit name="R_Brachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.332100 1.460400 -0.019000 " />
<Waypoint body="ArmR" p="-0.350000 1.471800 -0.008100 " />
<Waypoint body="ArmR" p="-0.496300 1.460600 0.017500 " />
<Waypoint body="ForeArmR" p="-0.557200 1.461900 -0.011000 " />
</Unit>
<Unit name="L_Brachioradialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.442800 1.465200 -0.020900 " />
<Waypoint body="ArmL" p="0.465100 1.490300 -0.008200 " />
<Waypoint body="ArmL" p="0.499700 1.478900 0.025100 " />
<Waypoint body="ForeArmL" p="0.561800 1.460900 0.037700 " />
<Waypoint body="ForeArmL" p="0.708600 1.474300 0.036200 " />
<Waypoint body="ForeArmL" p="0.786700 1.488000 0.030200 " />
</Unit>
<Unit name="R_Brachioradialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.442800 1.465200 -0.020900 " />
<Waypoint body="ArmR" p="-0.465100 1.490300 -0.008200 " />
<Waypoint body="ArmR" p="-0.499700 1.478900 0.025100 " />
<Waypoint body="ForeArmR" p="-0.561800 1.460900 0.037700 " />
<Waypoint body="ForeArmR" p="-0.708600 1.474300 0.036200 " />
<Waypoint body="ForeArmR" p="-0.786700 1.488000 0.030200 " />
</Unit>
<Unit name="L_Coracobrachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.168100 1.432600 -0.008300 " />
<Waypoint body="ArmL" p="0.228900 1.407100 -0.019200 " />
<Waypoint body="ArmL" p="0.312100 1.429100 -0.019400 " />
<Waypoint body="ArmL" p="0.338600 1.441800 -0.016700 " />
</Unit>
<Unit name="R_Coracobrachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.168100 1.432600 -0.008300 " />
<Waypoint body="ArmR" p="-0.228900 1.407100 -0.019200 " />
<Waypoint body="ArmR" p="-0.312100 1.429100 -0.019400 " />
<Waypoint body="ArmR" p="-0.338600 1.441800 -0.016700 " />
</Unit>
<Unit name="L_Deltoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.143200 1.466200 -0.019000 " />
<Waypoint body="ShoulderL" p="0.160700 1.447600 0.001500 " />
<Waypoint body="ArmL" p="0.221300 1.411900 0.013700 " />
<Waypoint body="ArmL" p="0.268700 1.443100 0.014100 " />
<Waypoint body="ArmL" p="0.299600 1.446200 -0.010700 " />
</Unit>
<Unit name="R_Deltoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.143200 1.466200 -0.019000 " />
<Waypoint body="ShoulderR" p="-0.160700 1.447600 0.001500 " />
<Waypoint body="ArmR" p="-0.221300 1.411900 0.013700 " />
<Waypoint body="ArmR" p="-0.268700 1.443100 0.014100 " />
<Waypoint body="ArmR" p="-0.299600 1.446200 -0.010700 " />
</Unit>
<Unit name="L_Deltoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.197700 1.465900 -0.025700 " />
<Waypoint body="ArmL" p="0.186600 1.450500 -0.008600 " />
<Waypoint body="ArmL" p="0.227700 1.467700 0.006400 " />
<Waypoint body="ArmL" p="0.278600 1.469800 0.007400 " />
<Waypoint body="ArmL" p="0.318300 1.452900 -0.008100 " />
</Unit>
<Unit name="R_Deltoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.197700 1.465900 -0.025700 " />
<Waypoint body="ArmR" p="-0.186600 1.450500 -0.008600 " />
<Waypoint body="ArmR" p="-0.227700 1.467700 0.006400 " />
<Waypoint body="ArmR" p="-0.278600 1.469800 0.007400 " />
<Waypoint body="ArmR" p="-0.318300 1.452900 -0.008100 " />
</Unit>
<Unit name="L_Deltoid2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.203700 1.459300 -0.052000 " />
<Waypoint body="ShoulderL" p="0.193300 1.466900 -0.038600 " />
<Waypoint body="ArmL" p="0.236700 1.485800 -0.026200 " />
<Waypoint body="ArmL" p="0.295100 1.477600 -0.016200 " />
<Waypoint body="ArmL" p="0.324100 1.456900 -0.011200 " />
</Unit>
<Unit name="R_Deltoid2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.203700 1.459300 -0.052000 " />
<Waypoint body="ShoulderR" p="-0.193300 1.466900 -0.038600 " />
<Waypoint body="ArmR" p="-0.236700 1.485800 -0.026200 " />
<Waypoint body="ArmR" p="-0.295100 1.477600 -0.016200 " />
<Waypoint body="ArmR" p="-0.324100 1.456900 -0.011200 " />
</Unit>
<Unit name="L_Extensor_Carpi_Radialis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.478900 1.470500 -0.017300 " />
<Waypoint body="ArmL" p="0.501100 1.489700 -0.001000 " />
<Waypoint body="ForeArmL" p="0.552500 1.490000 0.029900 " />
<Waypoint body="ForeArmL" p="0.720600 1.483000 0.027900 " />
<Waypoint body="ForeArmL" p="0.782100 1.488200 0.013300 " />
<Waypoint body="HandL" p="0.829300 1.485400 0.038500 " />
</Unit>
<Unit name="R_Extensor_Carpi_Radialis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.478900 1.470500 -0.017300 " />
<Waypoint body="ArmR" p="-0.501100 1.489700 -0.001000 " />
<Waypoint body="ForeArmR" p="-0.552500 1.490000 0.029900 " />
<Waypoint body="ForeArmR" p="-0.720600 1.483000 0.027900 " />
<Waypoint body="ForeArmR" p="-0.782100 1.488200 0.013300 " />
<Waypoint body="HandR" p="-0.829300 1.485400 0.038500 " />
</Unit>
<Unit name="L_Extensor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.518600 1.483100 -0.006700 " />
<Waypoint body="ForeArmL" p="0.559300 1.490700 -0.017100 " />
<Waypoint body="ForeArmL" p="0.652300 1.470700 -0.029700 " />
<Waypoint body="ForeArmL" p="0.785500 1.449400 0.000900 " />
<Waypoint body="HandL" p="0.825500 1.477700 0.001000 " />
</Unit>
<Unit name="R_Extensor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.518600 1.483100 -0.006700 " />
<Waypoint body="ForeArmR" p="-0.559300 1.490700 -0.017100 " />
<Waypoint body="ForeArmR" p="-0.652300 1.470700 -0.029700 " />
<Waypoint body="ForeArmR" p="-0.785500 1.449400 0.000900 " />
<Waypoint body="HandR" p="-0.825500 1.477700 0.001000 " />
</Unit>
<Unit name="L_Extensor_Digiti_Minimi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.520400 1.483700 -0.005400 " />
<Waypoint body="ForeArmL" p="0.548300 1.490000 -0.007600 " />
<Waypoint body="ForeArmL" p="0.783200 1.463200 -0.003600 " />
<Waypoint body="HandL" p="0.821600 1.482100 0.001400 " />
<Waypoint body="HandL" p="0.884700 1.462100 -0.005200 " />
<Waypoint body="HandL" p="0.927800 1.443100 -0.002500 " />
</Unit>
<Unit name="R_Extensor_Digiti_Minimi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.520400 1.483700 -0.005400 " />
<Waypoint body="ForeArmR" p="-0.548300 1.490000 -0.007600 " />
<Waypoint body="ForeArmR" p="-0.783200 1.463200 -0.003600 " />
<Waypoint body="HandR" p="-0.821600 1.482100 0.001400 " />
<Waypoint body="HandR" p="-0.884700 1.462100 -0.005200 " />
<Waypoint body="HandR" p="-0.927800 1.443100 -0.002500 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.123300 0.482800 -0.012800 " />
<Waypoint body="TibiaL" p="0.124900 0.447400 -0.025500 " />
<Waypoint body="TibiaL" p="0.094400 0.112800 -0.025500 " />
<Waypoint body="TalusL" p="0.091900 0.084400 -0.015300 " />
<Waypoint body="TalusL" p="0.090000 0.027700 0.067600 " />
<Waypoint body="FootThumbL" p="0.092000 0.021200 0.096100 " />
<Waypoint body="FootThumbL" p="0.093800 0.013000 0.112100 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.123300 0.482800 -0.012800 " />
<Waypoint body="TibiaR" p="-0.124900 0.447400 -0.025500 " />
<Waypoint body="TibiaR" p="-0.094400 0.112800 -0.025500 " />
<Waypoint body="TalusR" p="-0.091900 0.084400 -0.015300 " />
<Waypoint body="TalusR" p="-0.090000 0.027700 0.067600 " />
<Waypoint body="FootThumbR" p="-0.092000 0.021200 0.096100 " />
<Waypoint body="FootThumbR" p="-0.093800 0.013000 0.112100 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus1" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.128600 0.491900 -0.010000 " />
<Waypoint body="TibiaL" p="0.133600 0.407000 -0.020000 " />
<Waypoint body="TibiaL" p="0.097300 0.113900 -0.023900 " />
<Waypoint body="TalusL" p="0.098400 0.080700 -0.011500 " />
<Waypoint body="TalusL" p="0.104700 0.024500 0.061600 " />
<Waypoint body="FootPinkyL" p="0.107400 0.019500 0.079600 " />
<Waypoint body="FootPinkyL" p="0.112000 0.010600 0.103200 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus1" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.128600 0.491900 -0.010000 " />
<Waypoint body="TibiaR" p="-0.133600 0.407000 -0.020000 " />
<Waypoint body="TibiaR" p="-0.097300 0.113900 -0.023900 " />
<Waypoint body="TalusR" p="-0.098400 0.080700 -0.011500 " />
<Waypoint body="TalusR" p="-0.104700 0.024500 0.061600 " />
<Waypoint body="FootPinkyR" p="-0.107400 0.019500 0.079600 " />
<Waypoint body="FootPinkyR" p="-0.112000 0.010600 0.103200 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus2" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.127100 0.488400 -0.009500 " />
<Waypoint body="TibiaL" p="0.140800 0.406700 -0.014400 " />
<Waypoint body="TibiaL" p="0.098500 0.113700 -0.024500 " />
<Waypoint body="TalusL" p="0.101300 0.077500 -0.010600 " />
<Waypoint body="FootPinkyL" p="0.118000 0.026000 0.054300 " />
<Waypoint body="FootPinkyL" p="0.121400 0.022400 0.068700 " />
<Waypoint body="FootPinkyL" p="0.125200 0.012900 0.084600 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus2" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.127100 0.488400 -0.009500 " />
<Waypoint body="TibiaR" p="-0.140800 0.406700 -0.014400 " />
<Waypoint body="TibiaR" p="-0.098500 0.113700 -0.024500 " />
<Waypoint body="TalusR" p="-0.101300 0.077500 -0.010600 " />
<Waypoint body="FootPinkyR" p="-0.118000 0.026000 0.054300 " />
<Waypoint body="FootPinkyR" p="-0.121400 0.022400 0.068700 " />
<Waypoint body="FootPinkyR" p="-0.125200 0.012900 0.084600 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus3" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.130000 0.493100 -0.011700 " />
<Waypoint body="TibiaL" p="0.131500 0.407000 -0.033100 " />
<Waypoint body="TibiaL" p="0.103700 0.082400 -0.017500 " />
<Waypoint body="TalusL" p="0.114200 0.059400 0.000900 " />
<Waypoint body="TalusL" p="0.130700 0.028300 0.039500 " />
<Waypoint body="FootPinkyL" p="0.137100 0.009300 0.074500 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus3" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.130000 0.493100 -0.011700 " />
<Waypoint body="TibiaR" p="-0.131500 0.407000 -0.033100 " />
<Waypoint body="TibiaR" p="-0.103700 0.082400 -0.017500 " />
<Waypoint body="TalusR" p="-0.114200 0.059400 0.000900 " />
<Waypoint body="TalusR" p="-0.130700 0.028300 0.039500 " />
<Waypoint body="FootPinkyR" p="-0.137100 0.009300 0.074500 " />
</Unit>
<Unit name="L_Extensor_Digitorum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.519300 1.487900 -0.001600 " />
<Waypoint body="ForeArmL" p="0.745800 1.482600 0.005500 " />
<Waypoint body="ForeArmL" p="0.782100 1.478400 0.002300 " />
<Waypoint body="HandL" p="0.824700 1.491700 0.026300 " />
<Waypoint body="HandL" p="0.895700 1.481400 0.034000 " />
<Waypoint body="HandL" p="0.960600 1.441800 0.044200 " />
</Unit>
<Unit name="R_Extensor_Digitorum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.519300 1.487900 -0.001600 " />
<Waypoint body="ForeArmR" p="-0.745800 1.482600 0.005500 " />
<Waypoint body="ForeArmR" p="-0.782100 1.478400 0.002300 " />
<Waypoint body="HandR" p="-0.824700 1.491700 0.026300 " />
<Waypoint body="HandR" p="-0.895700 1.481400 0.034000 " />
<Waypoint body="HandR" p="-0.960600 1.441800 0.044200 " />
</Unit>
<Unit name="L_Extensor_Hallucis_Longus" f0="165.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.115100 0.380600 -0.028300 " />
<Waypoint body="TibiaL" p="0.097000 0.119900 -0.023000 " />
<Waypoint body="TalusL" p="0.083400 0.082500 -0.015100 " />
<Waypoint body="TalusL" p="0.072400 0.063500 0.027400 " />
<Waypoint body="TalusL" p="0.065600 0.031800 0.071700 " />
<Waypoint body="FootThumbL" p="0.060600 0.012900 0.112800 " />
</Unit>
<Unit name="R_Extensor_Hallucis_Longus" f0="165.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.115100 0.380600 -0.028300 " />
<Waypoint body="TibiaR" p="-0.097000 0.119900 -0.023000 " />
<Waypoint body="TalusR" p="-0.083400 0.082500 -0.015100 " />
<Waypoint body="TalusR" p="-0.072400 0.063500 0.027400 " />
<Waypoint body="TalusR" p="-0.065600 0.031800 0.071700 " />
<Waypoint body="FootThumbR" p="-0.060600 0.012900 0.112800 " />
</Unit>
<Unit name="L_Extensor_Pollicis_Brevis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.700700 1.470500 0.008700 " />
<Waypoint body="ForeArmL" p="0.791900 1.490900 0.019900 " />
<Waypoint body="HandL" p="0.816700 1.482000 0.054200 " />
<Waypoint body="HandL" p="0.855900 1.457500 0.079600 " />
</Unit>
<Unit name="R_Extensor_Pollicis_Brevis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.700700 1.470500 0.008700 " />
<Waypoint body="ForeArmR" p="-0.791900 1.490900 0.019900 " />
<Waypoint body="HandR" p="-0.816700 1.482000 0.054200 " />
<Waypoint body="HandR" p="-0.855900 1.457500 0.079600 " />
</Unit>
<Unit name="L_Extensor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.671800 1.469500 -0.007300 " />
<Waypoint body="ForeArmL" p="0.770900 1.479600 0.005500 " />
<Waypoint body="HandL" p="0.815100 1.490300 0.039500 " />
<Waypoint body="HandL" p="0.847400 1.466000 0.075500 " />
<Waypoint body="HandL" p="0.877000 1.446000 0.087800 " />
</Unit>
<Unit name="R_Extensor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.671800 1.469500 -0.007300 " />
<Waypoint body="ForeArmR" p="-0.770900 1.479600 0.005500 " />
<Waypoint body="HandR" p="-0.815100 1.490300 0.039500 " />
<Waypoint body="HandR" p="-0.847400 1.466000 0.075500 " />
<Waypoint body="HandR" p="-0.877000 1.446000 0.087800 " />
</Unit>
<Unit name="L_Flexor_Carpi_Radialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.518400 1.426200 -0.016200 " />
<Waypoint body="ForeArmL" p="0.741200 1.458600 0.027000 " />
<Waypoint body="ForeArmL" p="0.784600 1.465300 0.028700 " />
<Waypoint body="HandL" p="0.832400 1.474100 0.039100 " />
</Unit>
<Unit name="R_Flexor_Carpi_Radialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.518400 1.426200 -0.016200 " />
<Waypoint body="ForeArmR" p="-0.741200 1.458600 0.027000 " />
<Waypoint body="ForeArmR" p="-0.784600 1.465300 0.028700 " />
<Waypoint body="HandR" p="-0.832400 1.474100 0.039100 " />
</Unit>
<Unit name="L_Flexor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.525500 1.425600 -0.022000 " />
<Waypoint body="ForeArmL" p="0.581900 1.436100 -0.034700 " />
<Waypoint body="ForeArmL" p="0.759400 1.450100 0.006800 " />
<Waypoint body="HandL" p="0.805300 1.467100 0.009900 " />
</Unit>
<Unit name="R_Flexor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.525500 1.425600 -0.022000 " />
<Waypoint body="ForeArmR" p="-0.581900 1.436100 -0.034700 " />
<Waypoint body="ForeArmR" p="-0.759400 1.450100 0.006800 " />
<Waypoint body="HandR" p="-0.805300 1.467100 0.009900 " />
</Unit>
<Unit name="L_Flexor_Digiti_Minimi_Brevis_Foot" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FootPinkyL" p="0.136400 0.011200 0.049600 " />
<Waypoint body="TalusL" p="0.120100 0.023600 -0.009200 " />
</Unit>
<Unit name="R_Flexor_Digiti_Minimi_Brevis_Foot" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FootPinkyR" p="-0.136400 0.011200 0.049600 " />
<Waypoint body="TalusR" p="-0.120100 0.023600 -0.009200 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaL" p="0.062700 0.111200 -0.055500 " />
<Waypoint body="TalusL" p="0.063700 0.040400 -0.022200 " />
<Waypoint body="TalusL" p="0.083100 0.032200 -0.001400 " />
<Waypoint body="TalusL" p="0.086700 0.009400 0.059100 " />
<Waypoint body="FootThumbL" p="0.092700 0.008800 0.108400 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaR" p="-0.062700 0.111200 -0.055500 " />
<Waypoint body="TalusR" p="-0.063700 0.040400 -0.022200 " />
<Waypoint body="TalusR" p="-0.083100 0.032200 -0.001400 " />
<Waypoint body="TalusR" p="-0.086700 0.009400 0.059100 " />
<Waypoint body="FootThumbR" p="-0.092700 0.008800 0.108400 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus1" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaL" p="0.065700 0.111000 -0.056200 " />
<Waypoint body="TalusL" p="0.064900 0.040300 -0.023900 " />
<Waypoint body="TalusL" p="0.085000 0.031700 -0.008900 " />
<Waypoint body="TalusL" p="0.101600 0.007000 0.053000 " />
<Waypoint body="FootPinkyL" p="0.110200 0.009200 0.099700 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus1" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaR" p="-0.065700 0.111000 -0.056200 " />
<Waypoint body="TalusR" p="-0.064900 0.040300 -0.023900 " />
<Waypoint body="TalusR" p="-0.085000 0.031700 -0.008900 " />
<Waypoint body="TalusR" p="-0.101600 0.007000 0.053000 " />
<Waypoint body="FootPinkyR" p="-0.110200 0.009200 0.099700 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus2" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089600 0.389300 -0.023100 " />
<Waypoint body="TibiaL" p="0.066600 0.115900 -0.056200 " />
<Waypoint body="TalusL" p="0.063400 0.043000 -0.025700 " />
<Waypoint body="TalusL" p="0.091200 0.030200 -0.006400 " />
<Waypoint body="TalusL" p="0.115100 0.008900 0.042100 " />
<Waypoint body="FootPinkyL" p="0.124100 0.009500 0.083100 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus2" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089600 0.389300 -0.023100 " />
<Waypoint body="TibiaR" p="-0.066600 0.115900 -0.056200 " />
<Waypoint body="TalusR" p="-0.063400 0.043000 -0.025700 " />
<Waypoint body="TalusR" p="-0.091200 0.030200 -0.006400 " />
<Waypoint body="TalusR" p="-0.115100 0.008900 0.042100 " />
<Waypoint body="FootPinkyR" p="-0.124100 0.009500 0.083100 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus3" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.083900 0.388100 -0.018800 " />
<Waypoint body="TibiaL" p="0.068200 0.120700 -0.056400 " />
<Waypoint body="TalusL" p="0.059800 0.051000 -0.027300 " />
<Waypoint body="TalusL" p="0.106800 0.026000 -0.001100 " />
<Waypoint body="TalusL" p="0.130900 0.008800 0.039000 " />
<Waypoint body="FootPinkyL" p="0.136400 0.007100 0.070500 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus3" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.083900 0.388100 -0.018800 " />
<Waypoint body="TibiaR" p="-0.068200 0.120700 -0.056400 " />
<Waypoint body="TalusR" p="-0.059800 0.051000 -0.027300 " />
<Waypoint body="TalusR" p="-0.106800 0.026000 -0.001100 " />
<Waypoint body="TalusR" p="-0.130900 0.008800 0.039000 " />
<Waypoint body="FootPinkyR" p="-0.136400 0.007100 0.070500 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Profundus2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.594200 1.465300 -0.009100 " />
<Waypoint body="ForeArmL" p="0.651800 1.456600 0.000400 " />
<Waypoint body="ForeArmL" p="0.783100 1.459500 0.023800 " />
<Waypoint body="HandL" p="0.828300 1.470900 0.028400 " />
<Waypoint body="HandL" p="0.955500 1.442100 0.043300 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Profundus2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.594200 1.465300 -0.009100 " />
<Waypoint body="ForeArmR" p="-0.651800 1.456600 0.000400 " />
<Waypoint body="ForeArmR" p="-0.783100 1.459500 0.023800 " />
<Waypoint body="HandR" p="-0.828300 1.470900 0.028400 " />
<Waypoint body="HandR" p="-0.955500 1.442100 0.043300 " />
</Unit>
<Unit name="L_Flexor_Hallucis" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaL" p="0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusL" p="0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusL" p="0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusL" p="0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbL" p="0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="R_Flexor_Hallucis" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaR" p="-0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusR" p="-0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusR" p="-0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusR" p="-0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbR" p="-0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="L_Flexor_Hallucis1" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaL" p="0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusL" p="0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusL" p="0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusL" p="0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbL" p="0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="R_Flexor_Hallucis1" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaR" p="-0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusR" p="-0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusR" p="-0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusR" p="-0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbR" p="-0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="L_Flexor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.677200 1.471300 0.022400 " />
<Waypoint body="ForeArmL" p="0.784600 1.465900 0.028100 " />
<Waypoint body="HandL" p="0.813900 1.469600 0.030800 " />
<Waypoint body="HandL" p="0.830500 1.466600 0.057100 " />
<Waypoint body="HandL" p="0.878900 1.445600 0.083700 " />
</Unit>
<Unit name="R_Flexor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.677200 1.471300 0.022400 " />
<Waypoint body="ForeArmR" p="-0.784600 1.465900 0.028100 " />
<Waypoint body="HandR" p="-0.813900 1.469600 0.030800 " />
<Waypoint body="HandR" p="-0.830500 1.466600 0.057100 " />
<Waypoint body="HandR" p="-0.878900 1.445600 0.083700 " />
</Unit>
<Unit name="L_Gastrocnemius_Lateral_Head" f0="606.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.126400 0.562000 -0.005900 " />
<Waypoint body="FemurL" p="0.121900 0.554700 -0.038300 " />
<Waypoint body="TibiaL" p="0.126200 0.505900 -0.066200 " />
<Waypoint body="TibiaL" p="0.112000 0.302400 -0.091700 " />
</Unit>
<Unit name="R_Gastrocnemius_Lateral_Head" f0="606.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.126400 0.562000 -0.005900 " />
<Waypoint body="FemurR" p="-0.121900 0.554700 -0.038300 " />
<Waypoint body="TibiaR" p="-0.126200 0.505900 -0.066200 " />
<Waypoint body="TibiaR" p="-0.112000 0.302400 -0.091700 " />
</Unit>
<Unit name="L_Gastrocnemius_Medial_Head" f0="1308.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.075000 0.567300 -0.014400 " />
<Waypoint body="FemurL" p="0.095200 0.550700 -0.046600 " />
<Waypoint body="TibiaL" p="0.092400 0.505800 -0.069100 " />
<Waypoint body="TibiaL" p="0.060300 0.273200 -0.059200 " />
</Unit>
<Unit name="R_Gastrocnemius_Medial_Head" f0="1308.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.075000 0.567300 -0.014400 " />
<Waypoint body="FemurR" p="-0.095200 0.550700 -0.046600 " />
<Waypoint body="TibiaR" p="-0.092400 0.505800 -0.069100 " />
<Waypoint body="TibiaR" p="-0.060300 0.273200 -0.059200 " />
</Unit>
<Unit name="L_Gluteus_Maximus" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.053900 1.035800 -0.096200 " />
<Waypoint body="Pelvis" p="0.111500 1.013300 -0.089300 " />
<Waypoint body="FemurL" p="0.153100 0.939700 -0.046600 " />
<Waypoint body="FemurL" p="0.148200 0.872600 -0.016900 " />
</Unit>
<Unit name="R_Gluteus_Maximus" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.053900 1.035800 -0.096200 " />
<Waypoint body="Pelvis" p="-0.111500 1.013300 -0.089300 " />
<Waypoint body="FemurR" p="-0.153100 0.939700 -0.046600 " />
<Waypoint body="FemurR" p="-0.148200 0.872600 -0.016900 " />
</Unit>
<Unit name="L_Gluteus_Maximus1" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.038200 0.988600 -0.099300 " />
<Waypoint body="Pelvis" p="0.103800 0.968800 -0.110800 " />
<Waypoint body="FemurL" p="0.155300 0.900100 -0.049300 " />
<Waypoint body="FemurL" p="0.141600 0.845900 -0.011300 " />
</Unit>
<Unit name="R_Gluteus_Maximus1" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.038200 0.988600 -0.099300 " />
<Waypoint body="Pelvis" p="-0.103800 0.968800 -0.110800 " />
<Waypoint body="FemurR" p="-0.155300 0.900100 -0.049300 " />
<Waypoint body="FemurR" p="-0.141600 0.845900 -0.011300 " />
</Unit>
<Unit name="L_Gluteus_Maximus2" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.029700 0.949800 -0.094300 " />
<Waypoint body="Pelvis" p="0.051700 0.942200 -0.120100 " />
<Waypoint body="Pelvis" p="0.122100 0.906900 -0.097000 " />
<Waypoint body="FemurL" p="0.149300 0.840100 -0.036100 " />
<Waypoint body="FemurL" p="0.134200 0.818200 -0.008900 " />
</Unit>
<Unit name="R_Gluteus_Maximus2" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.029700 0.949800 -0.094300 " />
<Waypoint body="Pelvis" p="-0.051700 0.942200 -0.120100 " />
<Waypoint body="Pelvis" p="-0.122100 0.906900 -0.097000 " />
<Waypoint body="FemurR" p="-0.149300 0.840100 -0.036100 " />
<Waypoint body="FemurR" p="-0.134200 0.818200 -0.008900 " />
</Unit>
<Unit name="L_Gluteus_Maximus3" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.035200 0.919200 -0.080700 " />
<Waypoint body="Pelvis" p="0.066500 0.880800 -0.111700 " />
<Waypoint body="FemurL" p="0.124400 0.851200 -0.076200 " />
<Waypoint body="FemurL" p="0.130200 0.789300 -0.001200 " />
</Unit>
<Unit name="R_Gluteus_Maximus3" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.035200 0.919200 -0.080700 " />
<Waypoint body="Pelvis" p="-0.066500 0.880800 -0.111700 " />
<Waypoint body="FemurR" p="-0.124400 0.851200 -0.076200 " />
<Waypoint body="FemurR" p="-0.130200 0.789300 -0.001200 " />
</Unit>
<Unit name="L_Gluteus_Maximus4" f0="370.520000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.045000 0.896000 -0.064800 " />
<Waypoint body="Pelvis" p="0.064500 0.848700 -0.073000 " />
<Waypoint body="FemurL" p="0.115600 0.809100 -0.040200 " />
<Waypoint body="FemurL" p="0.129100 0.772300 0.002800 " />
</Unit>
<Unit name="R_Gluteus_Maximus4" f0="370.520000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.045000 0.896000 -0.064800 " />
<Waypoint body="Pelvis" p="-0.064500 0.848700 -0.073000 " />
<Waypoint body="FemurR" p="-0.115600 0.809100 -0.040200 " />
<Waypoint body="FemurR" p="-0.129100 0.772300 0.002800 " />
</Unit>
<Unit name="L_Gluteus_Medius" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.129500 1.013800 0.028700 " />
<Waypoint body="FemurL" p="0.157200 0.945600 -0.005300 " />
<Waypoint body="FemurL" p="0.157400 0.923400 -0.006700 " />
</Unit>
<Unit name="R_Gluteus_Medius" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.129500 1.013800 0.028700 " />
<Waypoint body="FemurR" p="-0.157200 0.945600 -0.005300 " />
<Waypoint body="FemurR" p="-0.157400 0.923400 -0.006700 " />
</Unit>
<Unit name="L_Gluteus_Medius1" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.128200 1.067300 -0.029900 " />
<Waypoint body="FemurL" p="0.155500 0.950900 -0.026500 " />
<Waypoint body="FemurL" p="0.165600 0.891400 -0.008800 " />
</Unit>
<Unit name="R_Gluteus_Medius1" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.128200 1.067300 -0.029900 " />
<Waypoint body="FemurR" p="-0.155500 0.950900 -0.026500 " />
<Waypoint body="FemurR" p="-0.165600 0.891400 -0.008800 " />
</Unit>
<Unit name="L_Gluteus_Medius2" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.079200 1.064500 -0.069600 " />
<Waypoint body="Pelvis" p="0.122200 1.028400 -0.073600 " />
<Waypoint body="FemurL" p="0.159000 0.918600 -0.029900 " />
<Waypoint body="FemurL" p="0.159700 0.891200 -0.021000 " />
</Unit>
<Unit name="R_Gluteus_Medius2" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.079200 1.064500 -0.069600 " />
<Waypoint body="Pelvis" p="-0.122200 1.028400 -0.073600 " />
<Waypoint body="FemurR" p="-0.159000 0.918600 -0.029900 " />
<Waypoint body="FemurR" p="-0.159700 0.891200 -0.021000 " />
</Unit>
<Unit name="L_Gluteus_Medius3" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.061100 1.008700 -0.087500 " />
<Waypoint body="Pelvis" p="0.088300 0.988400 -0.082900 " />
<Waypoint body="FemurL" p="0.139700 0.936300 -0.048200 " />
<Waypoint body="FemurL" p="0.147400 0.899400 -0.033100 " />
</Unit>
<Unit name="R_Gluteus_Medius3" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.061100 1.008700 -0.087500 " />
<Waypoint body="Pelvis" p="-0.088300 0.988400 -0.082900 " />
<Waypoint body="FemurR" p="-0.139700 0.936300 -0.048200 " />
<Waypoint body="FemurR" p="-0.147400 0.899400 -0.033100 " />
</Unit>
<Unit name="L_Gluteus_Minimus" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068600 0.992600 -0.066800 " />
<Waypoint body="Pelvis" p="0.097800 0.971500 -0.059200 " />
<Waypoint body="FemurL" p="0.152300 0.932100 -0.011500 " />
<Waypoint body="FemurL" p="0.160700 0.905400 -0.004900 " />
</Unit>
<Unit name="R_Gluteus_Minimus" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068600 0.992600 -0.066800 " />
<Waypoint body="Pelvis" p="-0.097800 0.971500 -0.059200 " />
<Waypoint body="FemurR" p="-0.152300 0.932100 -0.011500 " />
<Waypoint body="FemurR" p="-0.160700 0.905400 -0.004900 " />
</Unit>
<Unit name="L_Gluteus_Minimus1" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.098200 1.046000 -0.041700 " />
<Waypoint body="Pelvis" p="0.125700 1.015900 -0.040000 " />
<Waypoint body="FemurL" p="0.156400 0.933100 -0.001700 " />
<Waypoint body="FemurL" p="0.158300 0.893000 0.002200 " />
</Unit>
<Unit name="R_Gluteus_Minimus1" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.098200 1.046000 -0.041700 " />
<Waypoint body="Pelvis" p="-0.125700 1.015900 -0.040000 " />
<Waypoint body="FemurR" p="-0.156400 0.933100 -0.001700 " />
<Waypoint body="FemurR" p="-0.158300 0.893000 0.002200 " />
</Unit>
<Unit name="L_Gluteus_Minimus2" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.133400 1.037300 0.009000 " />
<Waypoint body="FemurL" p="0.154800 0.933000 0.005900 " />
<Waypoint body="FemurL" p="0.151600 0.897400 0.004600 " />
</Unit>
<Unit name="R_Gluteus_Minimus2" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.133400 1.037300 0.009000 " />
<Waypoint body="FemurR" p="-0.154800 0.933000 0.005900 " />
<Waypoint body="FemurR" p="-0.151600 0.897400 0.004600 " />
</Unit>
<Unit name="L_Gracilis" f0="137.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.011300 0.903100 0.030700 " />
<Waypoint body="FemurL" p="0.048900 0.529700 -0.042600 " />
<Waypoint body="TibiaL" p="0.061600 0.479800 -0.021700 " />
<Waypoint body="TibiaL" p="0.077600 0.465700 -0.003300 " />
</Unit>
<Unit name="R_Gracilis" f0="137.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.011300 0.903100 0.030700 " />
<Waypoint body="FemurR" p="-0.048900 0.529700 -0.042600 " />
<Waypoint body="TibiaR" p="-0.061600 0.479800 -0.021700 " />
<Waypoint body="TibiaR" p="-0.077600 0.465700 -0.003300 " />
</Unit>
<Unit name="L_Inferior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.066200 0.885700 -0.062100 " />
<Waypoint body="FemurL" p="0.124300 0.908300 -0.046900 " />
<Waypoint body="FemurL" p="0.135700 0.908900 -0.033200 " />
</Unit>
<Unit name="R_Inferior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.066200 0.885700 -0.062100 " />
<Waypoint body="FemurR" p="-0.124300 0.908300 -0.046900 " />
<Waypoint body="FemurR" p="-0.135700 0.908900 -0.033200 " />
</Unit>
<Unit name="L_Infraspinatus1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.091600 1.368800 -0.127100 " />
<Waypoint body="ShoulderL" p="0.187000 1.423800 -0.075700 " />
<Waypoint body="ShoulderL" p="0.203800 1.458100 -0.046900 " />
<Waypoint body="ShoulderL" p="0.198000 1.461500 -0.027000 " />
</Unit>
<Unit name="R_Infraspinatus1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.091600 1.368800 -0.127100 " />
<Waypoint body="ShoulderR" p="-0.187000 1.423800 -0.075700 " />
<Waypoint body="ShoulderR" p="-0.203800 1.458100 -0.046900 " />
<Waypoint body="ShoulderR" p="-0.198000 1.461500 -0.027000 " />
</Unit>
<Unit name="L_Latissimus_Dorsi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.311400 -0.126600 " />
<Waypoint body="ShoulderL" p="0.115800 1.327800 -0.129300 " />
<Waypoint body="ShoulderL" p="0.152500 1.353400 -0.094600 " />
<Waypoint body="ArmL" p="0.244800 1.415400 -0.039800 " />
<Waypoint body="ArmL" p="0.224400 1.432000 -0.016800 " />
</Unit>
<Unit name="R_Latissimus_Dorsi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.311400 -0.126600 " />
<Waypoint body="ShoulderR" p="-0.115800 1.327800 -0.129300 " />
<Waypoint body="ShoulderR" p="-0.152500 1.353400 -0.094600 " />
<Waypoint body="ArmR" p="-0.244800 1.415400 -0.039800 " />
<Waypoint body="ArmR" p="-0.224400 1.432000 -0.016800 " />
</Unit>
<Unit name="L_Latissimus_Dorsi3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.000600 1.103500 -0.092000 " />
<Waypoint body="Torso" p="0.101200 1.233200 -0.119000 " />
<Waypoint body="Torso" p="0.153700 1.300700 -0.098800 " />
<Waypoint body="ArmL" p="0.279500 1.420700 -0.045900 " />
<Waypoint body="ArmL" p="0.264300 1.422600 -0.024800 " />
<Waypoint body="ArmL" p="0.250400 1.435600 -0.016200 " />
</Unit>
<Unit name="R_Latissimus_Dorsi3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.000600 1.103500 -0.092000 " />
<Waypoint body="Torso" p="-0.101200 1.233200 -0.119000 " />
<Waypoint body="Torso" p="-0.153700 1.300700 -0.098800 " />
<Waypoint body="ArmR" p="-0.279500 1.420700 -0.045900 " />
<Waypoint body="ArmR" p="-0.264300 1.422600 -0.024800 " />
<Waypoint body="ArmR" p="-0.250400 1.435600 -0.016200 " />
</Unit>
<Unit name="L_Latissimus_Dorsi5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.077400 1.063600 -0.076000 " />
<Waypoint body="Torso" p="0.117900 1.178400 -0.077500 " />
<Waypoint body="Torso" p="0.169200 1.298600 -0.060000 " />
<Waypoint body="ArmL" p="0.282700 1.416800 -0.032700 " />
<Waypoint body="ArmL" p="0.259200 1.435500 -0.017400 " />
</Unit>
<Unit name="R_Latissimus_Dorsi5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.077400 1.063600 -0.076000 " />
<Waypoint body="Torso" p="-0.117900 1.178400 -0.077500 " />
<Waypoint body="Torso" p="-0.169200 1.298600 -0.060000 " />
<Waypoint body="ArmR" p="-0.282700 1.416800 -0.032700 " />
<Waypoint body="ArmR" p="-0.259200 1.435500 -0.017400 " />
</Unit>
<Unit name="L_Longissimus_Capitis3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.026200 1.428700 -0.102300 " />
<Waypoint body="Torso" p="0.030500 1.500200 -0.074800 " />
<Waypoint body="Neck" p="0.031100 1.565300 -0.039000 " />
<Waypoint body="Head" p="0.057000 1.608800 -0.017300 " />
</Unit>
<Unit name="R_Longissimus_Capitis3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.026200 1.428700 -0.102300 " />
<Waypoint body="Torso" p="-0.030500 1.500200 -0.074800 " />
<Waypoint body="Neck" p="-0.031100 1.565300 -0.039000 " />
<Waypoint body="Head" p="-0.057000 1.608800 -0.017300 " />
</Unit>
<Unit name="L_Longissimus_Thoracis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.003600 0.898800 -0.072600 " />
<Waypoint body="Pelvis" p="0.020100 1.003800 -0.104000 " />
<Waypoint body="Spine" p="0.020800 1.092300 -0.080300 " />
<Waypoint body="Torso" p="0.029200 1.198600 -0.095400 " />
<Waypoint body="Torso" p="0.034500 1.274000 -0.119600 " />
<Waypoint body="Torso" p="0.036400 1.393700 -0.115200 " />
<Waypoint body="Torso" p="0.034300 1.454000 -0.093800 " />
<Waypoint body="Neck" p="0.032000 1.501100 -0.040400 " />
</Unit>
<Unit name="R_Longissimus_Thoracis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.003600 0.898800 -0.072600 " />
<Waypoint body="Pelvis" p="-0.020100 1.003800 -0.104000 " />
<Waypoint body="Spine" p="-0.020800 1.092300 -0.080300 " />
<Waypoint body="Torso" p="-0.029200 1.198600 -0.095400 " />
<Waypoint body="Torso" p="-0.034500 1.274000 -0.119600 " />
<Waypoint body="Torso" p="-0.036400 1.393700 -0.115200 " />
<Waypoint body="Torso" p="-0.034300 1.454000 -0.093800 " />
<Waypoint body="Neck" p="-0.032000 1.501100 -0.040400 " />
</Unit>
<Unit name="L_Longus_Capitis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.019100 1.526900 -0.012500 " />
<Waypoint body="Neck" p="0.010400 1.588100 0.011300 " />
<Waypoint body="Head" p="0.002100 1.622700 0.010300 " />
</Unit>
<Unit name="R_Longus_Capitis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.019100 1.526900 -0.012500 " />
<Waypoint body="Neck" p="-0.010400 1.588100 0.011300 " />
<Waypoint body="Head" p="-0.002100 1.622700 0.010300 " />
</Unit>
<Unit name="L_Multifidus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.009100 0.923600 -0.091700 " />
<Waypoint body="Pelvis" p="0.011100 0.974800 -0.110600 " />
<Waypoint body="Pelvis" p="0.011700 1.013300 -0.100100 " />
<Waypoint body="Spine" p="0.009300 1.107200 -0.077700 " />
<Waypoint body="Torso" p="0.005600 1.179500 -0.085200 " />
<Waypoint body="Torso" p="0.000500 1.284600 -0.120700 " />
</Unit>
<Unit name="R_Multifidus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.009100 0.923600 -0.091700 " />
<Waypoint body="Pelvis" p="-0.011100 0.974800 -0.110600 " />
<Waypoint body="Pelvis" p="-0.011700 1.013300 -0.100100 " />
<Waypoint body="Spine" p="-0.009300 1.107200 -0.077700 " />
<Waypoint body="Torso" p="-0.005600 1.179500 -0.085200 " />
<Waypoint body="Torso" p="-0.000500 1.284600 -0.120700 " />
</Unit>
<Unit name="L_Obturator_Externus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.021200 0.911400 0.024700 " />
<Waypoint body="Pelvis" p="0.068400 0.894500 -0.028500 " />
<Waypoint body="FemurL" p="0.138000 0.909800 -0.026500 " />
</Unit>
<Unit name="R_Obturator_Externus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.021200 0.911400 0.024700 " />
<Waypoint body="Pelvis" p="-0.068400 0.894500 -0.028500 " />
<Waypoint body="FemurR" p="-0.138000 0.909800 -0.026500 " />
</Unit>
<Unit name="L_Obturator_Internus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.018500 0.905800 0.013900 " />
<Waypoint body="Pelvis" p="0.051600 0.905300 -0.058800 " />
<Waypoint body="Pelvis" p="0.074000 0.904600 -0.070500 " />
<Waypoint body="FemurL" p="0.138600 0.914000 -0.030600 " />
</Unit>
<Unit name="R_Obturator_Internus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.018500 0.905800 0.013900 " />
<Waypoint body="Pelvis" p="-0.051600 0.905300 -0.058800 " />
<Waypoint body="Pelvis" p="-0.074000 0.904600 -0.070500 " />
<Waypoint body="FemurR" p="-0.138600 0.914000 -0.030600 " />
</Unit>
<Unit name="L_Omohyoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.125400 1.456800 -0.062000 " />
<Waypoint body="ShoulderL" p="0.111000 1.479500 -0.032300 " />
<Waypoint body="Torso" p="0.046600 1.491300 0.000000 " />
<Waypoint body="ShoulderL" p="0.018300 1.506200 0.025200 " />
<Waypoint body="Head" p="0.013200 1.560100 0.043100 " />
</Unit>
<Unit name="R_Omohyoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.125400 1.456800 -0.062000 " />
<Waypoint body="ShoulderR" p="-0.111000 1.479500 -0.032300 " />
<Waypoint body="Torso" p="-0.046600 1.491300 0.000000 " />
<Waypoint body="ShoulderR" p="-0.018300 1.506200 0.025200 " />
<Waypoint body="Head" p="-0.013200 1.560100 0.043100 " />
</Unit>
<Unit name="L_Palmaris_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.522100 1.424400 -0.018800 " />
<Waypoint body="ForeArmL" p="0.643800 1.433500 0.000000 " />
<Waypoint body="ForeArmL" p="0.784200 1.459100 0.025400 " />
<Waypoint body="HandL" p="0.886300 1.461800 0.033000 " />
</Unit>
<Unit name="R_Palmaris_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.522100 1.424400 -0.018800 " />
<Waypoint body="ForeArmR" p="-0.643800 1.433500 0.000000 " />
<Waypoint body="ForeArmR" p="-0.784200 1.459100 0.025400 " />
<Waypoint body="HandR" p="-0.886300 1.461800 0.033000 " />
</Unit>
<Unit name="L_Pectineus" f0="177.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.040400 0.927800 0.032700 " />
<Waypoint body="Pelvis" p="0.057200 0.917900 0.046900 " />
<Waypoint body="FemurL" p="0.101100 0.836800 -0.007700 " />
<Waypoint body="FemurL" p="0.112200 0.830300 -0.004200 " />
</Unit>
<Unit name="R_Pectineus" f0="177.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.040400 0.927800 0.032700 " />
<Waypoint body="Pelvis" p="-0.057200 0.917900 0.046900 " />
<Waypoint body="FemurR" p="-0.101100 0.836800 -0.007700 " />
<Waypoint body="FemurR" p="-0.112200 0.830300 -0.004200 " />
</Unit>
<Unit name="L_Pectoralis_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.054800 1.462800 0.020200 " />
<Waypoint body="Torso" p="0.102100 1.436100 0.043400 " />
<Waypoint body="Torso" p="0.151800 1.405700 0.027600 " />
<Waypoint body="ArmL" p="0.244900 1.401200 0.003200 " />
<Waypoint body="ArmL" p="0.274200 1.446800 -0.009800 " />
</Unit>
<Unit name="R_Pectoralis_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.054800 1.462800 0.020200 " />
<Waypoint body="Torso" p="-0.102100 1.436100 0.043400 " />
<Waypoint body="Torso" p="-0.151800 1.405700 0.027600 " />
<Waypoint body="ArmR" p="-0.244900 1.401200 0.003200 " />
<Waypoint body="ArmR" p="-0.274200 1.446800 -0.009800 " />
</Unit>
<Unit name="L_Pectoralis_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.004300 1.367700 0.077300 " />
<Waypoint body="Torso" p="0.076600 1.371900 0.084300 " />
<Waypoint body="Torso" p="0.146000 1.374200 0.050500 " />
<Waypoint body="ArmL" p="0.248300 1.409600 -0.002500 " />
<Waypoint body="ArmL" p="0.247700 1.443900 -0.011600 " />
</Unit>
<Unit name="R_Pectoralis_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.004300 1.367700 0.077300 " />
<Waypoint body="Torso" p="-0.076600 1.371900 0.084300 " />
<Waypoint body="Torso" p="-0.146000 1.374200 0.050500 " />
<Waypoint body="ArmR" p="-0.248300 1.409600 -0.002500 " />
<Waypoint body="ArmR" p="-0.247700 1.443900 -0.011600 " />
</Unit>
<Unit name="L_Pectoralis_Minor1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.085500 1.347400 0.079900 " />
<Waypoint body="Torso" p="0.114400 1.373600 0.059700 " />
<Waypoint body="ShoulderL" p="0.159200 1.448100 -0.017700 " />
</Unit>
<Unit name="R_Pectoralis_Minor1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.085500 1.347400 0.079900 " />
<Waypoint body="Torso" p="-0.114400 1.373600 0.059700 " />
<Waypoint body="ShoulderR" p="-0.159200 1.448100 -0.017700 " />
</Unit>
<Unit name="L_Peroneus_Brevis" f0="305.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.117900 0.283900 -0.044900 " />
<Waypoint body="TibiaL" p="0.112200 0.109700 -0.067800 " />
<Waypoint body="TalusL" p="0.101900 0.067700 -0.069000 " />
<Waypoint body="TalusL" p="0.116900 0.024300 -0.015100 " />
</Unit>
<Unit name="R_Peroneus_Brevis" f0="305.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.117900 0.283900 -0.044900 " />
<Waypoint body="TibiaR" p="-0.112200 0.109700 -0.067800 " />
<Waypoint body="TalusR" p="-0.101900 0.067700 -0.069000 " />
<Waypoint body="TalusR" p="-0.116900 0.024300 -0.015100 " />
</Unit>
<Unit name="L_Peroneus_Longus" f0="653.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.140500 0.479500 -0.026300 " />
<Waypoint body="TibiaL" p="0.152700 0.366000 -0.037900 " />
<Waypoint body="TibiaL" p="0.115600 0.103700 -0.063600 " />
<Waypoint body="TalusL" p="0.104900 0.059000 -0.068700 " />
<Waypoint body="TalusL" p="0.111800 0.039900 -0.041200 " />
<Waypoint body="TalusL" p="0.085000 0.037700 -0.011400 " />
<Waypoint body="TalusL" p="0.072100 0.036600 0.025000 " />
</Unit>
<Unit name="R_Peroneus_Longus" f0="653.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.140500 0.479500 -0.026300 " />
<Waypoint body="TibiaR" p="-0.152700 0.366000 -0.037900 " />
<Waypoint body="TibiaR" p="-0.115600 0.103700 -0.063600 " />
<Waypoint body="TalusR" p="-0.104900 0.059000 -0.068700 " />
<Waypoint body="TalusR" p="-0.111800 0.039900 -0.041200 " />
<Waypoint body="TalusR" p="-0.085000 0.037700 -0.011400 " />
<Waypoint body="TalusR" p="-0.072100 0.036600 0.025000 " />
</Unit>
<Unit name="L_Peroneus_Tertius" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaL" p="0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusL" p="0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="R_Peroneus_Tertius" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaR" p="-0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusR" p="-0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="L_Peroneus_Tertius1" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaL" p="0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusL" p="0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="R_Peroneus_Tertius1" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaR" p="-0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusR" p="-0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="L_Piriformis" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.031600 0.981400 -0.089800 " />
<Waypoint body="FemurL" p="0.137200 0.930900 -0.025700 " />
</Unit>
<Unit name="R_Piriformis" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.031600 0.981400 -0.089800 " />
<Waypoint body="FemurR" p="-0.137200 0.930900 -0.025700 " />
</Unit>
<Unit name="L_Piriformis1" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.016000 0.936300 -0.088100 " />
<Waypoint body="FemurL" p="0.139700 0.920500 -0.022800 " />
</Unit>
<Unit name="R_Piriformis1" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.016000 0.936300 -0.088100 " />
<Waypoint body="FemurR" p="-0.139700 0.920500 -0.022800 " />
</Unit>
<Unit name="L_Plantaris" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.119300 0.565800 -0.013200 " />
<Waypoint body="FemurL" p="0.111500 0.549400 -0.037000 " />
<Waypoint body="TibiaL" p="0.106800 0.498000 -0.049400 " />
<Waypoint body="TibiaL" p="0.073700 0.102800 -0.079600 " />
<Waypoint body="TalusL" p="0.075100 0.037300 -0.098000 " />
</Unit>
<Unit name="R_Plantaris" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.119300 0.565800 -0.013200 " />
<Waypoint body="FemurR" p="-0.111500 0.549400 -0.037000 " />
<Waypoint body="TibiaR" p="-0.106800 0.498000 -0.049400 " />
<Waypoint body="TibiaR" p="-0.073700 0.102800 -0.079600 " />
<Waypoint body="TalusR" p="-0.075100 0.037300 -0.098000 " />
</Unit>
<Unit name="L_Platysma1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.089300 1.451000 0.033000 " />
<Waypoint body="ShoulderL" p="0.047400 1.475800 0.018700 " />
<Waypoint body="Neck" p="0.030800 1.542500 0.022700 " />
<Waypoint body="Head" p="0.028400 1.555400 0.037200 " />
<Waypoint body="Head" p="0.033500 1.562100 0.068400 " />
</Unit>
<Unit name="R_Platysma1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.089300 1.451000 0.033000 " />
<Waypoint body="ShoulderR" p="-0.047400 1.475800 0.018700 " />
<Waypoint body="Neck" p="-0.030800 1.542500 0.022700 " />
<Waypoint body="Head" p="-0.028400 1.555400 0.037200 " />
<Waypoint body="Head" p="-0.033500 1.562100 0.068400 " />
</Unit>
<Unit name="L_Popliteus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.137300 0.540300 -0.012900 " />
<Waypoint body="FemurL" p="0.136300 0.526900 -0.033300 " />
<Waypoint body="TibiaL" p="0.116500 0.500900 -0.042900 " />
<Waypoint body="TibiaL" p="0.080500 0.455000 -0.018800 " />
</Unit>
<Unit name="R_Popliteus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.137300 0.540300 -0.012900 " />
<Waypoint body="FemurR" p="-0.136300 0.526900 -0.033300 " />
<Waypoint body="TibiaR" p="-0.116500 0.500900 -0.042900 " />
<Waypoint body="TibiaR" p="-0.080500 0.455000 -0.018800 " />
</Unit>
<Unit name="L_Psoas_Major" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.014600 1.222700 -0.048100 " />
<Waypoint body="Pelvis" p="0.092000 1.073400 -0.031100 " />
<Waypoint body="Pelvis" p="0.087100 0.931100 0.044900 " />
<Waypoint body="FemurL" p="0.094500 0.881500 0.001300 " />
<Waypoint body="FemurL" p="0.109600 0.850500 -0.015600 " />
</Unit>
<Unit name="R_Psoas_Major" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.014600 1.222700 -0.048100 " />
<Waypoint body="Pelvis" p="-0.092000 1.073400 -0.031100 " />
<Waypoint body="Pelvis" p="-0.087100 0.931100 0.044900 " />
<Waypoint body="FemurR" p="-0.094500 0.881500 0.001300 " />
<Waypoint body="FemurR" p="-0.109600 0.850500 -0.015600 " />
</Unit>
<Unit name="L_Psoas_Major1" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.021400 1.132400 -0.037200 " />
<Waypoint body="Pelvis" p="0.068300 1.033300 -0.020900 " />
<Waypoint body="Pelvis" p="0.074400 0.930400 0.043900 " />
<Waypoint body="FemurL" p="0.092400 0.877400 -0.007300 " />
<Waypoint body="FemurL" p="0.109800 0.856700 -0.009200 " />
</Unit>
<Unit name="R_Psoas_Major1" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.021400 1.132400 -0.037200 " />
<Waypoint body="Pelvis" p="-0.068300 1.033300 -0.020900 " />
<Waypoint body="Pelvis" p="-0.074400 0.930400 0.043900 " />
<Waypoint body="FemurR" p="-0.092400 0.877400 -0.007300 " />
<Waypoint body="FemurR" p="-0.109800 0.856700 -0.009200 " />
</Unit>
<Unit name="L_Psoas_Major2" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.018400 1.048500 -0.037400 " />
<Waypoint body="Pelvis" p="0.053600 1.010400 -0.032900 " />
<Waypoint body="Pelvis" p="0.068500 0.929500 0.036600 " />
<Waypoint body="FemurL" p="0.092400 0.879400 0.001500 " />
<Waypoint body="FemurL" p="0.108500 0.856300 -0.014800 " />
</Unit>
<Unit name="R_Psoas_Major2" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.018400 1.048500 -0.037400 " />
<Waypoint body="Pelvis" p="-0.053600 1.010400 -0.032900 " />
<Waypoint body="Pelvis" p="-0.068500 0.929500 0.036600 " />
<Waypoint body="FemurR" p="-0.092400 0.879400 0.001500 " />
<Waypoint body="FemurR" p="-0.108500 0.856300 -0.014800 " />
</Unit>
<Unit name="L_Psoas_Minor" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.011300 1.221400 -0.045600 " />
<Waypoint body="Spine" p="0.055300 1.120100 -0.011600 " />
<Waypoint body="Pelvis" p="0.063300 0.999200 -0.005400 " />
<Waypoint body="Pelvis" p="0.057800 0.938700 0.019800 " />
</Unit>
<Unit name="R_Psoas_Minor" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.011300 1.221400 -0.045600 " />
<Waypoint body="Spine" p="-0.055300 1.120100 -0.011600 " />
<Waypoint body="Pelvis" p="-0.063300 0.999200 -0.005400 " />
<Waypoint body="Pelvis" p="-0.057800 0.938700 0.019800 " />
</Unit>
<Unit name="L_Quadratus_Femoris" f0="254.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.085900 0.917600 -0.043300 " />
<Waypoint body="Pelvis" p="0.108700 0.897900 -0.049000 " />
<Waypoint body="FemurL" p="0.136100 0.879700 -0.028600 " />
</Unit>
<Unit name="R_Quadratus_Femoris" f0="254.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.085900 0.917600 -0.043300 " />
<Waypoint body="Pelvis" p="-0.108700 0.897900 -0.049000 " />
<Waypoint body="FemurR" p="-0.136100 0.879700 -0.028600 " />
</Unit>
<Unit name="L_Quadratus_Lumborum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.077300 1.068600 -0.069300 " />
<Waypoint body="Torso" p="0.047900 1.184700 -0.083700 " />
</Unit>
<Unit name="R_Quadratus_Lumborum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.077300 1.068600 -0.069300 " />
<Waypoint body="Torso" p="-0.047900 1.184700 -0.083700 " />
</Unit>
<Unit name="L_Rectus_Femoris" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.107500 0.980300 0.014400 " />
<Waypoint body="FemurL" p="0.116500 0.941600 0.031000 " />
<Waypoint body="FemurL" p="0.104500 0.602800 0.043200 " />
<Waypoint body="TibiaL" p="0.110800 0.542200 0.034900 " />
</Unit>
<Unit name="R_Rectus_Femoris" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.107500 0.980300 0.014400 " />
<Waypoint body="FemurR" p="-0.116500 0.941600 0.031000 " />
<Waypoint body="FemurR" p="-0.104500 0.602800 0.043200 " />
<Waypoint body="TibiaR" p="-0.110800 0.542200 0.034900 " />
</Unit>
<Unit name="L_Rectus_Femoris1" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.105900 0.973500 0.016500 " />
<Waypoint body="FemurL" p="0.106500 0.926300 0.031800 " />
<Waypoint body="FemurL" p="0.081600 0.606600 0.043100 " />
<Waypoint body="TibiaL" p="0.075700 0.539900 0.032000 " />
</Unit>
<Unit name="R_Rectus_Femoris1" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.105900 0.973500 0.016500 " />
<Waypoint body="FemurR" p="-0.106500 0.926300 0.031800 " />
<Waypoint body="FemurR" p="-0.081600 0.606600 0.043100 " />
<Waypoint body="TibiaR" p="-0.075700 0.539900 0.032000 " />
</Unit>
<Unit name="L_Rhomboid_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.426100 -0.119400 " />
<Waypoint body="Torso" p="0.040400 1.408500 -0.128600 " />
<Waypoint body="ShoulderL" p="0.086800 1.391200 -0.123400 " />
</Unit>
<Unit name="R_Rhomboid_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.426100 -0.119400 " />
<Waypoint body="Torso" p="-0.040400 1.408500 -0.128600 " />
<Waypoint body="ShoulderR" p="-0.086800 1.391200 -0.123400 " />
</Unit>
<Unit name="L_Rhomboid_Minor" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.507800 -0.087400 " />
<Waypoint body="Torso" p="0.022900 1.494400 -0.088100 " />
<Waypoint body="ShoulderL" p="0.090700 1.461100 -0.089900 " />
</Unit>
<Unit name="R_Rhomboid_Minor" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.507800 -0.087400 " />
<Waypoint body="Torso" p="-0.022900 1.494400 -0.088100 " />
<Waypoint body="ShoulderR" p="-0.090700 1.461100 -0.089900 " />
</Unit>
<Unit name="L_Sartorius" f0="113.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.124100 1.009800 0.031400 " />
<Waypoint body="FemurL" p="0.035200 0.707300 0.026000 " />
<Waypoint body="TibiaL" p="0.054400 0.496500 -0.022400 " />
<Waypoint body="TibiaL" p="0.090700 0.453900 0.009200 " />
</Unit>
<Unit name="R_Sartorius" f0="113.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.124100 1.009800 0.031400 " />
<Waypoint body="FemurR" p="-0.035200 0.707300 0.026000 " />
<Waypoint body="TibiaR" p="-0.054400 0.496500 -0.022400 " />
<Waypoint body="TibiaR" p="-0.090700 0.453900 0.009200 " />
</Unit>
<Unit name="L_Scalene_Anterior1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.058400 1.467000 -0.005800 " />
<Waypoint body="Neck" p="0.035000 1.504000 -0.009500 " />
<Waypoint body="Neck" p="0.018300 1.523600 -0.017300 " />
</Unit>
<Unit name="R_Scalene_Anterior1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.058400 1.467000 -0.005800 " />
<Waypoint body="Neck" p="-0.035000 1.504000 -0.009500 " />
<Waypoint body="Neck" p="-0.018300 1.523600 -0.017300 " />
</Unit>
<Unit name="L_Scalene_Middle4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.055600 1.481100 -0.034400 " />
<Waypoint body="Neck" p="0.039900 1.548400 -0.010800 " />
<Waypoint body="Neck" p="0.026700 1.571200 -0.006000 " />
</Unit>
<Unit name="R_Scalene_Middle4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.055600 1.481100 -0.034400 " />
<Waypoint body="Neck" p="-0.039900 1.548400 -0.010800 " />
<Waypoint body="Neck" p="-0.026700 1.571200 -0.006000 " />
</Unit>
<Unit name="L_Semimembranosus" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.075100 0.901700 -0.057400 " />
<Waypoint body="Pelvis" p="0.070100 0.846200 -0.039100 " />
<Waypoint body="FemurL" p="0.053400 0.544300 -0.049600 " />
<Waypoint body="TibiaL" p="0.056700 0.511900 -0.042000 " />
<Waypoint body="TibiaL" p="0.062100 0.490300 -0.029700 " />
</Unit>
<Unit name="R_Semimembranosus" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.075100 0.901700 -0.057400 " />
<Waypoint body="Pelvis" p="-0.070100 0.846200 -0.039100 " />
<Waypoint body="FemurR" p="-0.053400 0.544300 -0.049600 " />
<Waypoint body="TibiaR" p="-0.056700 0.511900 -0.042000 " />
<Waypoint body="TibiaR" p="-0.062100 0.490300 -0.029700 " />
</Unit>
<Unit name="L_Semimembranosus1" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.078400 0.905300 -0.053300 " />
<Waypoint body="FemurL" p="0.093700 0.862300 -0.034300 " />
<Waypoint body="FemurL" p="0.104400 0.560200 -0.047900 " />
<Waypoint body="FemurL" p="0.081200 0.527200 -0.056200 " />
<Waypoint body="TibiaL" p="0.082000 0.495000 -0.042200 " />
</Unit>
<Unit name="R_Semimembranosus1" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.078400 0.905300 -0.053300 " />
<Waypoint body="FemurR" p="-0.093700 0.862300 -0.034300 " />
<Waypoint body="FemurR" p="-0.104400 0.560200 -0.047900 " />
<Waypoint body="FemurR" p="-0.081200 0.527200 -0.056200 " />
<Waypoint body="TibiaR" p="-0.082000 0.495000 -0.042200 " />
</Unit>
<Unit name="L_Semispinalis_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.026000 1.431100 -0.100400 " />
<Waypoint body="Neck" p="0.014600 1.512500 -0.066300 " />
<Waypoint body="Neck" p="0.010900 1.566200 -0.054700 " />
<Waypoint body="Head" p="0.008700 1.614700 -0.069800 " />
</Unit>
<Unit name="R_Semispinalis_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.026000 1.431100 -0.100400 " />
<Waypoint body="Neck" p="-0.014600 1.512500 -0.066300 " />
<Waypoint body="Neck" p="-0.010900 1.566200 -0.054700 " />
<Waypoint body="Head" p="-0.008700 1.614700 -0.069800 " />
</Unit>
<Unit name="L_Semitendinosus" f0="301.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068000 0.894100 -0.065200 " />
<Waypoint body="Pelvis" p="0.088100 0.853300 -0.046300 " />
<Waypoint body="FemurL" p="0.085600 0.565300 -0.061100 " />
<Waypoint body="TibiaL" p="0.070400 0.494600 -0.047500 " />
<Waypoint body="TibiaL" p="0.065500 0.471600 -0.026400 " />
<Waypoint body="TibiaL" p="0.079800 0.448400 -0.003800 " />
</Unit>
<Unit name="R_Semitendinosus" f0="301.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068000 0.894100 -0.065200 " />
<Waypoint body="Pelvis" p="-0.088100 0.853300 -0.046300 " />
<Waypoint body="FemurR" p="-0.085600 0.565300 -0.061100 " />
<Waypoint body="TibiaR" p="-0.070400 0.494600 -0.047500 " />
<Waypoint body="TibiaR" p="-0.065500 0.471600 -0.026400 " />
<Waypoint body="TibiaR" p="-0.079800 0.448400 -0.003800 " />
</Unit>
<Unit name="L_Serratus_Anterior2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.090100 1.410200 -0.117700 " />
<Waypoint body="ShoulderL" p="0.104600 1.410000 -0.100000 " />
<Waypoint body="Torso" p="0.131200 1.404600 -0.043300 " />
<Waypoint body="Torso" p="0.120600 1.412000 -0.023900 " />
</Unit>
<Unit name="R_Serratus_Anterior2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.090100 1.410200 -0.117700 " />
<Waypoint body="ShoulderR" p="-0.104600 1.410000 -0.100000 " />
<Waypoint body="Torso" p="-0.131200 1.404600 -0.043300 " />
<Waypoint body="Torso" p="-0.120600 1.412000 -0.023900 " />
</Unit>
<Unit name="L_Serratus_Anterior4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.093900 1.348200 -0.128300 " />
<Waypoint body="Torso" p="0.115300 1.354700 -0.095600 " />
<Waypoint body="Torso" p="0.142600 1.328400 -0.011900 " />
<Waypoint body="Torso" p="0.126400 1.312800 0.047600 " />
</Unit>
<Unit name="R_Serratus_Anterior4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.093900 1.348200 -0.128300 " />
<Waypoint body="Torso" p="-0.115300 1.354700 -0.095600 " />
<Waypoint body="Torso" p="-0.142600 1.328400 -0.011900 " />
<Waypoint body="Torso" p="-0.126400 1.312800 0.047600 " />
</Unit>
<Unit name="L_Soleus" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.087500 0.468500 -0.023700 " />
<Waypoint body="TibiaL" p="0.087000 0.419000 -0.059200 " />
<Waypoint body="TibiaL" p="0.071100 0.150700 -0.060800 " />
<Waypoint body="TibiaL" p="0.073400 0.098300 -0.077500 " />
<Waypoint body="TalusL" p="0.072900 0.029900 -0.095200 " />
</Unit>
<Unit name="R_Soleus" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.087500 0.468500 -0.023700 " />
<Waypoint body="TibiaR" p="-0.087000 0.419000 -0.059200 " />
<Waypoint body="TibiaR" p="-0.071100 0.150700 -0.060800 " />
<Waypoint body="TibiaR" p="-0.073400 0.098300 -0.077500 " />
<Waypoint body="TalusR" p="-0.072900 0.029900 -0.095200 " />
</Unit>
<Unit name="L_Soleus1" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.136600 0.490900 -0.045500 " />
<Waypoint body="TibiaL" p="0.130800 0.393000 -0.075700 " />
<Waypoint body="TalusL" p="0.085300 0.086300 -0.085500 " />
<Waypoint body="TalusL" p="0.087600 0.029800 -0.098200 " />
</Unit>
<Unit name="R_Soleus1" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.136600 0.490900 -0.045500 " />
<Waypoint body="TibiaR" p="-0.130800 0.393000 -0.075700 " />
<Waypoint body="TalusR" p="-0.085300 0.086300 -0.085500 " />
<Waypoint body="TalusR" p="-0.087600 0.029800 -0.098200 " />
</Unit>
<Unit name="L_Splenius_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.502100 -0.086200 " />
<Waypoint body="Neck" p="0.022400 1.555700 -0.056700 " />
<Waypoint body="Head" p="0.039100 1.595500 -0.048600 " />
<Waypoint body="Head" p="0.060600 1.639000 -0.045600 " />
</Unit>
<Unit name="R_Splenius_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.502100 -0.086200 " />
<Waypoint body="Neck" p="-0.022400 1.555700 -0.056700 " />
<Waypoint body="Head" p="-0.039100 1.595500 -0.048600 " />
<Waypoint body="Head" p="-0.060600 1.639000 -0.045600 " />
</Unit>
<Unit name="L_Splenius_Cervicis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.406700 -0.120100 " />
<Waypoint body="Torso" p="0.035700 1.496300 -0.079500 " />
<Waypoint body="Neck" p="0.039800 1.546300 -0.039200 " />
<Waypoint body="Neck" p="0.037500 1.591800 -0.005600 " />
</Unit>
<Unit name="R_Splenius_Cervicis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.406700 -0.120100 " />
<Waypoint body="Torso" p="-0.035700 1.496300 -0.079500 " />
<Waypoint body="Neck" p="-0.039800 1.546300 -0.039200 " />
<Waypoint body="Neck" p="-0.037500 1.591800 -0.005600 " />
</Unit>
<Unit name="L_Splenius_Cervicis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.344600 -0.125800 " />
<Waypoint body="Torso" p="0.033000 1.415500 -0.112900 " />
<Waypoint body="Torso" p="0.048700 1.492700 -0.070600 " />
<Waypoint body="Neck" p="0.042000 1.540600 -0.028700 " />
<Waypoint body="Neck" p="0.027000 1.571000 -0.006300 " />
</Unit>
<Unit name="R_Splenius_Cervicis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.344600 -0.125800 " />
<Waypoint body="Torso" p="-0.033000 1.415500 -0.112900 " />
<Waypoint body="Torso" p="-0.048700 1.492700 -0.070600 " />
<Waypoint body="Neck" p="-0.042000 1.540600 -0.028700 " />
<Waypoint body="Neck" p="-0.027000 1.571000 -0.006300 " />
</Unit>
<Unit name="L_Sternocleidomastoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.056800 1.465000 0.017500 " />
<Waypoint body="Neck" p="0.054600 1.572800 -0.030900 " />
<Waypoint body="Head" p="0.049000 1.638500 -0.060800 " />
</Unit>
<Unit name="R_Sternocleidomastoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.056800 1.465000 0.017500 " />
<Waypoint body="Neck" p="-0.054600 1.572800 -0.030900 " />
<Waypoint body="Head" p="-0.049000 1.638500 -0.060800 " />
</Unit>
<Unit name="L_Subclavian" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.053000 1.448900 0.021900 " />
<Waypoint body="ShoulderL" p="0.136800 1.460600 -0.024200 " />
</Unit>
<Unit name="R_Subclavian" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.053000 1.448900 0.021900 " />
<Waypoint body="ShoulderR" p="-0.136800 1.460600 -0.024200 " />
</Unit>
<Unit name="L_Subscapularis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.094400 1.384800 -0.119400 " />
<Waypoint body="ShoulderL" p="0.153300 1.419200 -0.040900 " />
<Waypoint body="ArmL" p="0.203200 1.406600 -0.016300 " />
<Waypoint body="ArmL" p="0.201300 1.413300 -0.017700 " />
</Unit>
<Unit name="R_Subscapularis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.094400 1.384800 -0.119400 " />
<Waypoint body="ShoulderR" p="-0.153300 1.419200 -0.040900 " />
<Waypoint body="ArmR" p="-0.203200 1.406600 -0.016300 " />
<Waypoint body="ArmR" p="-0.201300 1.413300 -0.017700 " />
</Unit>
<Unit name="L_Superior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.061300 0.918700 -0.059200 " />
<Waypoint body="Pelvis" p="0.090400 0.922400 -0.061300 " />
<Waypoint body="FemurL" p="0.140200 0.921300 -0.024800 " />
</Unit>
<Unit name="R_Superior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.061300 0.918700 -0.059200 " />
<Waypoint body="Pelvis" p="-0.090400 0.922400 -0.061300 " />
<Waypoint body="FemurR" p="-0.140200 0.921300 -0.024800 " />
</Unit>
<Unit name="L_Supraspinatus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.093300 1.467600 -0.081400 " />
<Waypoint body="ShoulderL" p="0.169200 1.460100 -0.044700 " />
<Waypoint body="ArmL" p="0.177300 1.434600 -0.027700 " />
<Waypoint body="ArmL" p="0.182700 1.440100 -0.022100 " />
</Unit>
<Unit name="R_Supraspinatus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.093300 1.467600 -0.081400 " />
<Waypoint body="ShoulderR" p="-0.169200 1.460100 -0.044700 " />
<Waypoint body="ArmR" p="-0.177300 1.434600 -0.027700 " />
<Waypoint body="ArmR" p="-0.182700 1.440100 -0.022100 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.137300 1.062800 -0.023900 " />
<Waypoint body="FemurL" p="0.162800 0.923700 -0.024600 " />
<Waypoint body="FemurL" p="0.159900 0.811900 -0.004500 " />
<Waypoint body="FemurL" p="0.141700 0.555800 0.005600 " />
<Waypoint body="TibiaL" p="0.132200 0.482000 -0.007900 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.137300 1.062800 -0.023900 " />
<Waypoint body="FemurR" p="-0.162800 0.923700 -0.024600 " />
<Waypoint body="FemurR" p="-0.159900 0.811900 -0.004500 " />
<Waypoint body="FemurR" p="-0.141700 0.555800 0.005600 " />
<Waypoint body="TibiaR" p="-0.132200 0.482000 -0.007900 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata1" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.135400 1.030700 0.019200 " />
<Waypoint body="FemurL" p="0.115400 0.920800 0.055100 " />
<Waypoint body="FemurL" p="0.144300 0.607000 0.025000 " />
<Waypoint body="TibiaL" p="0.110600 0.542300 0.034200 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata1" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.135400 1.030700 0.019200 " />
<Waypoint body="FemurR" p="-0.115400 0.920800 0.055100 " />
<Waypoint body="FemurR" p="-0.144300 0.607000 0.025000 " />
<Waypoint body="TibiaR" p="-0.110600 0.542300 0.034200 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata2" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.142900 1.049200 0.003200 " />
<Waypoint body="FemurL" p="0.159000 0.917700 0.021900 " />
<Waypoint body="FemurL" p="0.134600 0.557100 0.015600 " />
<Waypoint body="TibiaL" p="0.121600 0.477400 0.004900 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata2" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.142900 1.049200 0.003200 " />
<Waypoint body="FemurR" p="-0.159000 0.917700 0.021900 " />
<Waypoint body="FemurR" p="-0.134600 0.557100 0.015600 " />
<Waypoint body="TibiaR" p="-0.121600 0.477400 0.004900 " />
</Unit>
<Unit name="L_Teres_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.100000 1.336900 -0.131100 " />
<Waypoint body="ShoulderL" p="0.159000 1.374300 -0.101300 " />
<Waypoint body="ArmL" p="0.250600 1.431000 -0.052200 " />
<Waypoint body="ArmL" p="0.243500 1.430100 -0.021200 " />
</Unit>
<Unit name="R_Teres_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.100000 1.336900 -0.131100 " />
<Waypoint body="ShoulderR" p="-0.159000 1.374300 -0.101300 " />
<Waypoint body="ArmR" p="-0.250600 1.431000 -0.052200 " />
<Waypoint body="ArmR" p="-0.243500 1.430100 -0.021200 " />
</Unit>
<Unit name="L_Tibialis_Anterior" f0="673.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.130300 0.488500 -0.010100 " />
<Waypoint body="TibiaL" p="0.072200 0.103100 -0.014000 " />
<Waypoint body="TalusL" p="0.055900 0.061300 -0.009100 " />
<Waypoint body="TalusL" p="0.066100 0.037300 0.024200 " />
</Unit>
<Unit name="R_Tibialis_Anterior" f0="673.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.130300 0.488500 -0.010100 " />
<Waypoint body="TibiaR" p="-0.072200 0.103100 -0.014000 " />
<Waypoint body="TalusR" p="-0.055900 0.061300 -0.009100 " />
<Waypoint body="TalusR" p="-0.066100 0.037300 0.024200 " />
</Unit>
<Unit name="L_Tibialis_Posterior" f0="905.600000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.104800 0.472500 -0.023600 " />
<Waypoint body="TibiaL" p="0.084700 0.137800 -0.050400 " />
<Waypoint body="TibiaL" p="0.053700 0.091000 -0.052300 " />
<Waypoint body="TalusL" p="0.059000 0.048800 -0.021300 " />
<Waypoint body="TalusL" p="0.089900 0.039200 0.010000 " />
</Unit>
<Unit name="R_Tibialis_Posterior" f0="905.600000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.104800 0.472500 -0.023600 " />
<Waypoint body="TibiaR" p="-0.084700 0.137800 -0.050400 " />
<Waypoint body="TibiaR" p="-0.053700 0.091000 -0.052300 " />
<Waypoint body="TalusR" p="-0.059000 0.048800 -0.021300 " />
<Waypoint body="TalusR" p="-0.089900 0.039200 0.010000 " />
</Unit>
<Unit name="L_Triceps_Lateral_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.243900 1.452400 -0.032900 " />
<Waypoint body="ArmL" p="0.319200 1.484700 -0.046100 " />
<Waypoint body="ArmL" p="0.488700 1.477900 -0.024200 " />
<Waypoint body="ForeArmL" p="0.523500 1.467000 -0.027000 " />
</Unit>
<Unit name="R_Triceps_Lateral_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.243900 1.452400 -0.032900 " />
<Waypoint body="ArmR" p="-0.319200 1.484700 -0.046100 " />
<Waypoint body="ArmR" p="-0.488700 1.477900 -0.024200 " />
<Waypoint body="ForeArmR" p="-0.523500 1.467000 -0.027000 " />
</Unit>
<Unit name="L_Triceps_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.174200 1.411500 -0.063300 " />
<Waypoint body="ArmL" p="0.256000 1.443300 -0.060300 " />
<Waypoint body="ArmL" p="0.341900 1.464700 -0.075600 " />
<Waypoint body="ArmL" p="0.475900 1.462800 -0.048200 " />
<Waypoint body="ForeArmL" p="0.517200 1.462700 -0.033400 " />
</Unit>
<Unit name="R_Triceps_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.174200 1.411500 -0.063300 " />
<Waypoint body="ArmR" p="-0.256000 1.443300 -0.060300 " />
<Waypoint body="ArmR" p="-0.341900 1.464700 -0.075600 " />
<Waypoint body="ArmR" p="-0.475900 1.462800 -0.048200 " />
<Waypoint body="ForeArmR" p="-0.517200 1.462700 -0.033400 " />
</Unit>
<Unit name="L_Triceps_Medial_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.292100 1.442600 -0.033800 " />
<Waypoint body="ArmL" p="0.435900 1.428200 -0.036500 " />
<Waypoint body="ForeArmL" p="0.518300 1.454400 -0.028300 " />
</Unit>
<Unit name="R_Triceps_Medial_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.292100 1.442600 -0.033800 " />
<Waypoint body="ArmR" p="-0.435900 1.428200 -0.036500 " />
<Waypoint body="ForeArmR" p="-0.518300 1.454400 -0.028300 " />
</Unit>
<Unit name="L_Vastus_Intermedius" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.114500 0.871100 0.000900 " />
<Waypoint body="FemurL" p="0.096900 0.811600 0.033600 " />
<Waypoint body="FemurL" p="0.082200 0.604300 0.036300 " />
<Waypoint body="TibiaL" p="0.079500 0.545000 0.026800 " />
</Unit>
<Unit name="R_Vastus_Intermedius" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.114500 0.871100 0.000900 " />
<Waypoint body="FemurR" p="-0.096900 0.811600 0.033600 " />
<Waypoint body="FemurR" p="-0.082200 0.604300 0.036300 " />
<Waypoint body="TibiaR" p="-0.079500 0.545000 0.026800 " />
</Unit>
<Unit name="L_Vastus_Intermedius1" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.124000 0.871400 0.002600 " />
<Waypoint body="FemurL" p="0.130600 0.790100 0.032900 " />
<Waypoint body="FemurL" p="0.118200 0.650700 0.036700 " />
<Waypoint body="TibiaL" p="0.095000 0.557500 0.032300 " />
</Unit>
<Unit name="R_Vastus_Intermedius1" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.124000 0.871400 0.002600 " />
<Waypoint body="FemurR" p="-0.130600 0.790100 0.032900 " />
<Waypoint body="FemurR" p="-0.118200 0.650700 0.036700 " />
<Waypoint body="TibiaR" p="-0.095000 0.557500 0.032300 " />
</Unit>
<Unit name="L_Vastus_Lateralis" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.148900 0.915000 0.001000 " />
<Waypoint body="FemurL" p="0.133900 0.882600 0.016900 " />
<Waypoint body="FemurL" p="0.105300 0.588000 0.039300 " />
<Waypoint body="TibiaL" p="0.088600 0.549500 0.035000 " />
</Unit>
<Unit name="R_Vastus_Lateralis" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.148900 0.915000 0.001000 " />
<Waypoint body="FemurR" p="-0.133900 0.882600 0.016900 " />
<Waypoint body="FemurR" p="-0.105300 0.588000 0.039300 " />
<Waypoint body="TibiaR" p="-0.088600 0.549500 0.035000 " />
</Unit>
<Unit name="L_Vastus_Lateralis1" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.159500 0.905200 -0.002200 " />
<Waypoint body="FemurL" p="0.153200 0.859200 -0.000000 " />
<Waypoint body="FemurL" p="0.136300 0.597400 0.008600 " />
<Waypoint body="TibiaL" p="0.102300 0.540500 0.035900 " />
</Unit>
<Unit name="R_Vastus_Lateralis1" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.159500 0.905200 -0.002200 " />
<Waypoint body="FemurR" p="-0.153200 0.859200 -0.000000 " />
<Waypoint body="FemurR" p="-0.136300 0.597400 0.008600 " />
<Waypoint body="TibiaR" p="-0.102300 0.540500 0.035900 " />
</Unit>
<Unit name="L_Vastus_Medialis" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118600 0.871200 0.002900 " />
<Waypoint body="FemurL" p="0.039000 0.680200 0.013200 " />
<Waypoint body="FemurL" p="0.043200 0.604100 0.004100 " />
<Waypoint body="TibiaL" p="0.074700 0.541400 0.024100 " />
</Unit>
<Unit name="R_Vastus_Medialis" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118600 0.871200 0.002900 " />
<Waypoint body="FemurR" p="-0.039000 0.680200 0.013200 " />
<Waypoint body="FemurR" p="-0.043200 0.604100 0.004100 " />
<Waypoint body="TibiaR" p="-0.074700 0.541400 0.024100 " />
</Unit>
<Unit name="L_Vastus_Medialis1" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.119100 0.867000 0.003500 " />
<Waypoint body="FemurL" p="0.080700 0.669600 0.048800 " />
<Waypoint body="TibiaL" p="0.087600 0.551300 0.035800 " />
</Unit>
<Unit name="R_Vastus_Medialis1" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.119100 0.867000 0.003500 " />
<Waypoint body="FemurR" p="-0.080700 0.669600 0.048800 " />
<Waypoint body="TibiaR" p="-0.087600 0.551300 0.035800 " />
</Unit>
<Unit name="L_Vastus_Medialis2" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118600 0.871200 0.002900 " />
<Waypoint body="FemurL" p="0.057800 0.647400 0.037200 " />
<Waypoint body="TibiaL" p="0.076800 0.546900 0.031200 " />
</Unit>
<Unit name="R_Vastus_Medialis2" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118600 0.871200 0.002900 " />
<Waypoint body="FemurR" p="-0.057800 0.647400 0.037200 " />
<Waypoint body="TibiaR" p="-0.076800 0.546900 0.031200 " />
</Unit>
<Unit name="L_iliacus" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068000 1.047100 -0.061100 " />
<Waypoint body="Pelvis" p="0.077600 0.942100 0.018000 " />
<Waypoint body="FemurL" p="0.094000 0.880500 -0.015000 " />
<Waypoint body="FemurL" p="0.111200 0.853400 -0.020900 " />
</Unit>
<Unit name="R_iliacus" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068000 1.047100 -0.061100 " />
<Waypoint body="Pelvis" p="-0.077600 0.942100 0.018000 " />
<Waypoint body="FemurR" p="-0.094000 0.880500 -0.015000 " />
<Waypoint body="FemurR" p="-0.111200 0.853400 -0.020900 " />
</Unit>
<Unit name="L_iliacus1" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.116900 1.069700 -0.032400 " />
<Waypoint body="Pelvis" p="0.084100 0.973000 0.013000 " />
<Waypoint body="Pelvis" p="0.086800 0.917100 0.029700 " />
<Waypoint body="FemurL" p="0.099600 0.877100 -0.009200 " />
<Waypoint body="FemurL" p="0.118700 0.867700 -0.022800 " />
</Unit>
<Unit name="R_iliacus1" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.116900 1.069700 -0.032400 " />
<Waypoint body="Pelvis" p="-0.084100 0.973000 0.013000 " />
<Waypoint body="Pelvis" p="-0.086800 0.917100 0.029700 " />
<Waypoint body="FemurR" p="-0.099600 0.877100 -0.009200 " />
<Waypoint body="FemurR" p="-0.118700 0.867700 -0.022800 " />
</Unit>
<Unit name="L_iliacus2" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.128500 1.033500 0.022400 " />
<Waypoint body="Pelvis" p="0.099900 0.973000 0.031300 " />
<Waypoint body="FemurL" p="0.102000 0.908800 0.014700 " />
<Waypoint body="FemurL" p="0.109200 0.863700 -0.013300 " />
</Unit>
<Unit name="R_iliacus2" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.128500 1.033500 0.022400 " />
<Waypoint body="Pelvis" p="-0.099900 0.973000 0.031300 " />
<Waypoint body="FemurR" p="-0.102000 0.908800 0.014700 " />
<Waypoint body="FemurR" p="-0.109200 0.863700 -0.013300 " />
</Unit>
<Unit name="L_iliocostalis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.003600 0.898900 -0.073600 " />
<Waypoint body="Pelvis" p="0.025200 1.026700 -0.101000 " />
<Waypoint body="Spine" p="0.052800 1.110900 -0.079000 " />
<Waypoint body="Torso" p="0.058200 1.174400 -0.093400 " />
<Waypoint body="Torso" p="0.063900 1.239200 -0.126200 " />
<Waypoint body="Torso" p="0.050100 1.433400 -0.104800 " />
<Waypoint body="Torso" p="0.041100 1.491600 -0.062400 " />
<Waypoint body="Neck" p="0.022400 1.538000 -0.010700 " />
</Unit>
<Unit name="R_iliocostalis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.003600 0.898900 -0.073600 " />
<Waypoint body="Pelvis" p="-0.025200 1.026700 -0.101000 " />
<Waypoint body="Spine" p="-0.052800 1.110900 -0.079000 " />
<Waypoint body="Torso" p="-0.058200 1.174400 -0.093400 " />
<Waypoint body="Torso" p="-0.063900 1.239200 -0.126200 " />
<Waypoint body="Torso" p="-0.050100 1.433400 -0.104800 " />
<Waypoint body="Torso" p="-0.041100 1.491600 -0.062400 " />
<Waypoint body="Neck" p="-0.022400 1.538000 -0.010700 " />
</Unit>
<Unit name="L_Rectus_Abdominis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.022100 0.922600 0.050800 " />
<Waypoint body="Pelvis" p="0.040200 1.029200 0.086100 " />
<Waypoint body="Torso" p="0.060100 1.110900 0.089400 " />
<Waypoint body="Torso" p="0.063500 1.170800 0.092300 " />
<Waypoint body="Torso" p="0.076200 1.304200 0.092900 " />
</Unit>
<Unit name="R_Rectus_Abdominis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.022100 0.922600 0.050800 " />
<Waypoint body="Pelvis" p="-0.040200 1.029200 0.086100 " />
<Waypoint body="Torso" p="-0.060100 1.110900 0.089400 " />
<Waypoint body="Torso" p="-0.063500 1.170800 0.092300 " />
<Waypoint body="Torso" p="-0.076200 1.304200 0.092900 " />
</Unit>
<Unit name="L_Serratus_Posterior_Inferior" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.000000 1.139400 -0.092000 " />
<Waypoint body="Torso" p="0.072300 1.156800 -0.084000 " />
<Waypoint body="Torso" p="0.080500 1.162800 -0.075700 " />
</Unit>
<Unit name="R_Serratus_Posterior_Inferior" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.000000 1.139400 -0.092000 " />
<Waypoint body="Torso" p="-0.072300 1.156800 -0.084000 " />
<Waypoint body="Torso" p="-0.080500 1.162800 -0.075700 " />
</Unit>
<Unit name="L_Transversus_Abdominis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.054100 1.043200 -0.092800 " />
<Waypoint body="Torso" p="0.063200 1.172600 -0.079300 " />
</Unit>
<Unit name="R_Transversus_Abdominis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.054100 1.043200 -0.092800 " />
<Waypoint body="Torso" p="-0.063200 1.172600 -0.079300 " />
</Unit>
<Unit name="L_Transversus_Abdominis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.135600 1.040700 0.017800 " />
<Waypoint body="Torso" p="0.111200 1.137900 -0.011800 " />
</Unit>
<Unit name="R_Transversus_Abdominis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.135600 1.040700 0.017800 " />
<Waypoint body="Torso" p="-0.111200 1.137900 -0.011800 " />
</Unit>
<Unit name="L_Transversus_Abdominis4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.016000 0.927700 0.053500 " />
<Waypoint body="Torso" p="0.038900 1.181000 0.093000 " />
<Waypoint body="Torso" p="0.021000 1.297800 0.093300 " />
</Unit>
<Unit name="R_Transversus_Abdominis4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.016000 0.927700 0.053500 " />
<Waypoint body="Torso" p="-0.038900 1.181000 0.093000 " />
<Waypoint body="Torso" p="-0.021000 1.297800 0.093300 " />
</Unit>
<Unit name="L_Trapezius" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.179900 -0.096800 " />
<Waypoint body="Torso" p="0.034800 1.279400 -0.128000 " />
<Waypoint body="Torso" p="0.080500 1.345200 -0.135600 " />
<Waypoint body="ShoulderL" p="0.131400 1.447600 -0.102400 " />
</Unit>
<Unit name="R_Trapezius" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.179900 -0.096800 " />
<Waypoint body="Torso" p="-0.034800 1.279400 -0.128000 " />
<Waypoint body="Torso" p="-0.080500 1.345200 -0.135600 " />
<Waypoint body="ShoulderR" p="-0.131400 1.447600 -0.102400 " />
</Unit>
<Unit name="L_Trapezius3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.437300 -0.119300 " />
<Waypoint body="ShoulderL" p="0.085900 1.476100 -0.103200 " />
<Waypoint body="ShoulderL" p="0.122700 1.472800 -0.092500 " />
<Waypoint body="ShoulderL" p="0.145500 1.455600 -0.091900 " />
</Unit>
<Unit name="R_Trapezius3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.437300 -0.119300 " />
<Waypoint body="ShoulderR" p="-0.085900 1.476100 -0.103200 " />
<Waypoint body="ShoulderR" p="-0.122700 1.472800 -0.092500 " />
<Waypoint body="ShoulderR" p="-0.145500 1.455600 -0.091900 " />
</Unit>
<Unit name="L_Trapezius5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.563000 -0.063600 " />
<Waypoint body="Neck" p="0.039300 1.549500 -0.062400 " />
<Waypoint body="ShoulderL" p="0.113300 1.496700 -0.064900 " />
<Waypoint body="ShoulderL" p="0.198900 1.460800 -0.056900 " />
</Unit>
<Unit name="R_Trapezius5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.563000 -0.063600 " />
<Waypoint body="Neck" p="-0.039300 1.549500 -0.062400 " />
<Waypoint body="ShoulderR" p="-0.113300 1.496700 -0.064900 " />
<Waypoint body="ShoulderR" p="-0.198900 1.460800 -0.056900 " />
</Unit>
</Muscle>
| 120,105 | XML | 66.36175 | 133 | 0.602073 |
NVlabs/DiffRL/envs/assets/snu/ground.xml | <Skeleton name="Ground">
<Node name="ground" parent="None" >
<Body type="Box" mass="15.0" size="1000.0 1.0 1000.0" contact="On" color="1.2 1.2 1.2 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 -0.49958 0.0"/>
</Body>
<Joint type="Weld">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.0 0.0"/>
</Joint>
</Node>
</Skeleton>
| 457 | XML | 40.63636 | 105 | 0.538293 |
NVlabs/DiffRL/optim/gd.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torch.optim.optimizer import Optimizer
class GD(Optimizer):
r"""Implements Pure Gradient Descent algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
"""
def __init__(self, params, lr=1e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(lr=lr)
super(GD, self).__init__(params, defaults)
def __setstate__(self, state):
super(GD, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
p.add_(p.grad, alpha = -group['lr'])
return loss | 1,572 | Python | 33.955555 | 79 | 0.632952 |
NVlabs/DiffRL/algorithms/shac.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from multiprocessing.sharedctypes import Value
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
import models.critic
from utils.common import *
import utils.torch_utils as tu
from utils.running_mean_std import RunningMeanStd
from utils.dataset import CriticDataset
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
class SHAC:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", True), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
print('num_envs = ', self.env.num_envs)
print('num_actions = ', self.env.num_actions)
print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.critic_method = cfg['params']['config'].get('critic_method', 'one-step') # ['one-step', 'td-lambda']
if self.critic_method == 'td-lambda':
self.lam = cfg['params']['config'].get('lambda', 0.95)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.critic_lr = float(cfg['params']['config']['critic_learning_rate'])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.target_critic_alpha = cfg['params']['config'].get('target_critic_alpha', 0.4)
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.ret_rms = None
if cfg['params']['config'].get('ret_rms', False):
self.ret_rms = RunningMeanStd(shape = (), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.critic_iterations = cfg['params']['config'].get('critic_iterations', 16)
self.num_batch = cfg['params']['config'].get('num_batch', 4)
self.batch_size = self.num_envs * self.steps_num // self.num_batch
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
self.critic_name = cfg["params"]["network"].get("critic", 'CriticMLP')
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
critic_fn = getattr(models.critic, self.critic_name)
self.critic = critic_fn(self.num_obs, cfg['params']['network'], device = self.device)
self.all_params = list(self.actor.parameters()) + list(self.critic.parameters())
self.target_critic = copy.deepcopy(self.critic)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), betas = cfg['params']['config']['betas'], lr = self.critic_lr)
# replay buffer
self.obs_buf = torch.zeros((self.steps_num, self.num_envs, self.num_obs), dtype = torch.float32, device = self.device)
self.rew_buf = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.done_mask = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.next_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.target_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.ret = torch.zeros((self.num_envs), dtype = torch.float32, device = self.device)
# for kl divergence computing
self.old_mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.old_sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
self.value_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
next_values = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
if self.ret_rms is not None:
ret_var = self.ret_rms.var.clone()
# initialize trajectory to cut off gradients between episodes.
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
# collect data for critic training
with torch.no_grad():
self.obs_buf[i] = obs.clone()
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
if self.ret_rms is not None:
# update ret rms
with torch.no_grad():
self.ret = self.ret * self.gamma + rew
self.ret_rms.update(self.ret)
rew = rew / torch.sqrt(ret_var + 1e-6)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
next_values[i + 1] = self.target_critic(obs).squeeze(-1)
for id in done_env_ids:
if torch.isnan(extra_info['obs_before_reset'][id]).sum() > 0 \
or torch.isinf(extra_info['obs_before_reset'][id]).sum() > 0 \
or (torch.abs(extra_info['obs_before_reset'][id]) > 1e6).sum() > 0: # ugly fix for nan values
next_values[i + 1, id] = 0.
elif self.episode_length[id] < self.max_episode_length: # early termination
next_values[i + 1, id] = 0.
else: # otherwise, use terminal value critic to estimate the long-term performance
if self.obs_rms is not None:
real_obs = obs_rms.normalize(extra_info['obs_before_reset'][id])
else:
real_obs = extra_info['obs_before_reset'][id]
next_values[i + 1, id] = self.target_critic(real_obs).squeeze(-1)
if (next_values[i + 1] > 1e6).sum() > 0 or (next_values[i + 1] < -1e6).sum() > 0:
print('next value error')
raise ValueError
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids] - self.gamma * gamma[done_env_ids] * next_values[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :] - self.gamma * gamma * next_values[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect data for critic training
with torch.no_grad():
self.rew_buf[i] = rew.clone()
if i < self.steps_num - 1:
self.done_mask[i] = done.clone().to(torch.float32)
else:
self.done_mask[i, :] = 1.
self.next_values[i] = next_values[i + 1].clone()
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
raise ValueError
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
if self.ret_rms is not None:
actor_loss = actor_loss * torch.sqrt(ret_var + 1e-6)
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
@torch.no_grad()
def compute_target_values(self):
if self.critic_method == 'one-step':
self.target_values = self.rew_buf + self.gamma * self.next_values
elif self.critic_method == 'td-lambda':
Ai = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
Bi = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
lam = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
for i in reversed(range(self.steps_num)):
lam = lam * self.lam * (1. - self.done_mask[i]) + self.done_mask[i]
Ai = (1.0 - self.done_mask[i]) * (self.lam * self.gamma * Ai + self.gamma * self.next_values[i] + (1. - lam) / (1. - self.lam) * self.rew_buf[i])
Bi = self.gamma * (self.next_values[i] * self.done_mask[i] + Bi * (1.0 - self.done_mask[i])) + self.rew_buf[i]
self.target_values[i] = (1.0 - self.lam) * Ai + lam * Bi
else:
raise NotImplementedError
def compute_critic_loss(self, batch_sample):
predicted_values = self.critic(batch_sample['obs']).squeeze(-1)
target_values = batch_sample['target_values']
critic_loss = ((predicted_values - target_values) ** 2).mean()
return critic_loss
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
# add timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("prepare critic dataset")
self.time_report.add_timer("actor training")
self.time_report.add_timer("critic training")
self.time_report.start_timer("algorithm")
# initializations
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
# sanity check
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
print('NaN gradient')
raise ValueError
self.time_report.end_timer("compute actor loss")
return actor_loss
# main training process
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
# learning rate schedule
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
critic_lr = (1e-5 - self.critic_lr) * float(epoch / self.max_epochs) + self.critic_lr
for param_group in self.critic_optimizer.param_groups:
param_group['lr'] = critic_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
# train critic
# prepare dataset
self.time_report.start_timer("prepare critic dataset")
with torch.no_grad():
self.compute_target_values()
dataset = CriticDataset(self.batch_size, self.obs_buf, self.target_values, drop_last = False)
self.time_report.end_timer("prepare critic dataset")
self.time_report.start_timer("critic training")
self.value_loss = 0.
for j in range(self.critic_iterations):
total_critic_loss = 0.
batch_cnt = 0
for i in range(len(dataset)):
batch_sample = dataset[i]
self.critic_optimizer.zero_grad()
training_critic_loss = self.compute_critic_loss(batch_sample)
training_critic_loss.backward()
# ugly fix for simulation nan problem
for params in self.critic.parameters():
params.grad.nan_to_num_(0.0, 0.0, 0.0)
if self.truncate_grad:
clip_grad_norm_(self.critic.parameters(), self.grad_norm)
self.critic_optimizer.step()
total_critic_loss += training_critic_loss
batch_cnt += 1
self.value_loss = (total_critic_loss / batch_cnt).detach().cpu().item()
print('value iter {}/{}, loss = {:7.6f}'.format(j + 1, self.critic_iterations, self.value_loss), end='\r')
self.time_report.end_timer("critic training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
self.writer.add_scalar('value_loss/step', self.value_loss, self.step_count)
self.writer.add_scalar('value_loss/iter', self.value_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, value loss {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.value_loss, self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
# update target critic
with torch.no_grad():
alpha = self.target_critic_alpha
for param, param_targ in zip(self.critic.parameters(), self.target_critic.parameters()):
param_targ.data.mul_(alpha)
param_targ.data.add_((1. - alpha) * param.data)
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.critic, self.target_critic, self.obs_rms, self.ret_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.critic = checkpoint[1].to(self.device)
self.target_critic = checkpoint[2].to(self.device)
self.obs_rms = checkpoint[3].to(self.device)
self.ret_rms = checkpoint[4].to(self.device) if checkpoint[4] is not None else checkpoint[4]
def close(self):
self.writer.close()
| 28,575 | Python | 48.439446 | 247 | 0.576378 |
NVlabs/DiffRL/algorithms/bptt.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
from optim.gd import GD
from utils.common import *
import utils.torch_utils as tu
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
from utils.running_mean_std import RunningMeanStd
class BPTT:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", False), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
print('num_envs = ', self.env.num_envs)
print('num_actions = ', self.env.num_actions)
print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.algo = cfg["params"]["algo"]['name'] # choices: ['gd', 'adam', 'SGD']
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
# JIE
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
import IPython
IPython.embed()
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
# timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("actor training")
self.time_report.start_timer("algorithm")
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
if torch.isnan(self.grad_norm_before_clip):
# JIE
print('here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NaN gradient')
import IPython
IPython.embed()
for params in self.actor.parameters():
params.grad.zero_()
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
self.save("nan_policy")
self.time_report.end_timer("compute actor loss")
return actor_loss
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
# self.save("latest_policy")
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.obs_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.obs_rms = checkpoint[1].to(self.device)
def close(self):
self.writer.close()
| 19,592 | Python | 45.209906 | 230 | 0.576868 |
NVlabs/DiffRL/externals/rl_games/setup.py | """Setup script for rl_games"""
import sys
import os
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
print(find_packages())
setup(name='rl-games',
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Denys88/rl_games",
packages = ['.','rl_games','docs'],
package_data={'rl_games':['*'],'docs':['*'],},
version='1.1.0',
author='Denys Makoviichuk, Viktor Makoviichuk',
author_email='[email protected], [email protected]',
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
#packages=["rlg"],
include_package_data=True,
install_requires=[
# this setup is only for pytorch
#
'gym>=0.17.2',
'numpy>=1.16.0',
'tensorboard>=1.14.0',
'tensorboardX>=1.6',
'setproctitle',
'psutil',
'pyyaml'
],
)
| 1,300 | Python | 27.91111 | 70 | 0.559231 |
NVlabs/DiffRL/externals/rl_games/runner.py | import numpy as np
import argparse, copy, os, yaml
import ray, signal
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
#import warnings
#warnings.filterwarnings("error")
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-tf", "--tf", required=False, help="run tensorflow runner", action='store_true')
ap.add_argument("-t", "--train", required=False, help="train network", action='store_true')
ap.add_argument("-p", "--play", required=False, help="play(test) network", action='store_true')
ap.add_argument("-c", "--checkpoint", required=False, help="path to checkpoint")
ap.add_argument("-f", "--file", required=True, help="path to config")
ap.add_argument("-na", "--num_actors", type=int, default=0, required=False,
help="number of envs running in parallel, if larger than 0 will overwrite the value in yaml config")
os.makedirs("nn", exist_ok=True)
os.makedirs("runs", exist_ok=True)
args = vars(ap.parse_args())
config_name = args['file']
print('Loading config: ', config_name)
with open(config_name, 'r') as stream:
config = yaml.safe_load(stream)
if args['num_actors'] > 0:
config['params']['config']['num_actors'] = args['num_actors']
if args['tf']:
from rl_games.tf14_runner import Runner
else:
from rl_games.torch_runner import Runner
ray.init(object_store_memory=1024*1024*1000)
#signal.signal(signal.SIGINT, exit_gracefully)
runner = Runner()
try:
runner.load(config)
except yaml.YAMLError as exc:
print(exc)
runner.reset()
runner.run(args)
ray.shutdown()
| 1,739 | Python | 33.799999 | 120 | 0.615296 |
NVlabs/DiffRL/externals/rl_games/README.md | # RL Games: High performance RL library
## Papers and related links
* Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning: https://arxiv.org/abs/2108.10470
* Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger: https://s2r2-ig.github.io/ https://arxiv.org/abs/2108.09779
* Is Independent Learning All You Need in the StarCraft Multi-Agent Challenge? <https://arxiv.org/abs/2011.09533>
## Some results on interesting environments
* [NVIDIA Isaac Gym](docs/ISAAC_GYM.md)




* [Starcraft 2 Multi Agents](docs/SMAC.md)
* [BRAX](docs/BRAX.md)
* [Old TF1.x results](docs/BRAX.md)
## Config file
* [Configuration](docs/CONFIG_PARAMS.md)
Implemented in Pytorch:
* PPO with the support of asymmetric actor-critic variant
* Support of end-to-end GPU accelerated training pipeline with Isaac Gym and Brax
* Masked actions support
* Multi-agent training, decentralized and centralized critic variants
* Self-play
Implemented in Tensorflow 1.x (not updates now):
* Rainbow DQN
* A2C
* PPO
# Installation
For maximum training performance a preliminary installation of Pytorch 1.9+ with CUDA 11.1 is highly recommended:
```conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c nvidia``` or:
```pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.htm```
Then:
```pip install rl-games```
# Training
**NVIDIA Isaac Gym**
Download and follow the installation instructions from https://developer.nvidia.com/isaac-gym
Run from ```python/rlgpu``` directory:
Ant
```python rlg_train.py --task Ant --headless```
```python rlg_train.py --task Ant --play --checkpoint nn/Ant.pth --num_envs 100```
Humanoid
```python rlg_train.py --task Humanoid --headless```
```python rlg_train.py --task Humanoid --play --checkpoint nn/Humanoid.pth --num_envs 100```
Shadow Hand block orientation task
```python rlg_train.py --task ShadowHand --headless```
```python rlg_train.py --task ShadowHand --play --checkpoint nn/ShadowHand.pth --num_envs 100```
**Atari Pong**
```python runner.py --train --file rl_games/configs/atari/ppo_pong.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_pong.yaml --checkpoint nn/PongNoFrameskip.pth```
**Brax Ant**
```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_ant.yaml --checkpoint nn/Ant_brax.pth```
# Release Notes
1.1.0
* Added to pypi: ```pip install rl-games```
* Added reporting env (sim) step fps, without policy inference. Improved naming.
* Renames in yaml config for better readability: steps_num to horizon_length amd lr_threshold to kl_threshold
# Troubleshouting
* Some of the supported envs are not installed with setup.py, you need to manually install them
* Starting from rl-games 1.1.0 old yaml configs won't be compatible with the new version:
* ```steps_num``` should be changed to ```horizon_length``` amd ```lr_threshold``` to ```kl_threshold```
| 3,558 | Markdown | 35.690721 | 151 | 0.737493 |
NVlabs/DiffRL/externals/rl_games/tests/simple_test.py | import pytest
def test_true():
assert True | 48 | Python | 8.799998 | 16 | 0.6875 |
NVlabs/DiffRL/externals/rl_games/docs/SMAC.md | ## Starcraft 2 Multiple Agents Results
* Starcraft 2 Multiple Agents Results with PPO (https://github.com/oxwhirl/smac)
* Every agent was controlled independently and has restricted information
* All the environments were trained with a default difficulty level 7
* No curriculum, just baseline PPO
* Full state information wasn't used for critic, actor and critic recieved the same agent observations
* Most results are significantly better by win rate and were trained on a single PC much faster than QMIX (https://arxiv.org/pdf/1902.04043.pdf), MAVEN (https://arxiv.org/pdf/1910.07483.pdf) or QTRAN
* No hyperparameter search
* 4 frames + conv1d actor-critic network
* Miniepoch num was set to 1, higher numbers didn't work
* Simple MLP networks didnot work good on hard envs
[](https://www.youtube.com/watch?v=F_IfFz-s-iQ)
# How to run configs:
# Pytorch
* ```python runner.py --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
# Tensorflow
* ```python runner.py --tf --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --tf --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
* ```tensorboard --logdir runs```
# Results on some environments:
* 2m_vs_1z took near 2 minutes to achive 100% WR
* corridor took near 2 hours for 95+% WR
* MMM2 4 hours for 90+% WR
* 6h_vs_8z got 82% WR after 8 hours of training
* 5m_vs_6m got 72% WR after 8 hours of training
# Plots:
FPS in these plots is calculated on per env basis except MMM2 (it was scaled by number of agents which is 10), to get a win rate per number of environmental steps info, the same as used in plots in QMIX, MAVEN, QTRAN or Deep Coordination Graphs (https://arxiv.org/pdf/1910.00091.pdf) papers FPS numbers under the horizontal axis should be devided by number of agents in player's team.
* 2m_vs_1z:

* 3s5z_vs_3s6z:

* 3s_vs_5z:

* corridor:

* 5m_vs_6m:

* MMM2:

| 2,266 | Markdown | 48.282608 | 384 | 0.735658 |
NVlabs/DiffRL/externals/rl_games/docs/OTHER.md | ## Old Tensorflow results
* Double dueling DQN vs DQN with the same parameters

Near 90 minutes to learn with this setup.
* Different DQN Configurations tests
Light grey is noisy 1-step dddqn.
Noisy 3-step dddqn was even faster.
Best network (configuration 5) needs near 20 minutes to learn, on NVIDIA 1080.
Currently the best setup for pong is noisy 3-step double dueling network.
In pong_runs.py different experiments could be found.
Less then 200k frames to take score > 18.

DQN has more optimistic Q value estimations.
# Other Games Results
This results are not stable. Just best games, for good average results you need to train network more then 10 million steps.
Some games need 50m steps.
* 5 million frames two step noisy double dueling dqn:
[](https://youtu.be/Lu9Cm9K_6ms)
* Random lucky game in Space Invaders after less then one hour learning:
[](https://www.youtube.com/watch?v=LO0RL437rh4)
# A2C and PPO Results
* More than 2 hours for Pong to achieve 20 score with one actor playing.
* 8 Hours for Supermario lvl1
[](https://www.youtube.com/watch?v=T9ujS3HIvMY)
* PPO with LSTM layers
[](https://www.youtube.com/watch?v=fjY4AWbmhHg)
 | 1,627 | Markdown | 36.860464 | 124 | 0.75968 |
NVlabs/DiffRL/externals/rl_games/docs/BRAX.md | # Brax (https://github.com/google/brax)
## How to run:
* **Ant** ```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
* **Humanoid** ```python runner.py --train --file rl_games/configs/brax/ppo_humanoid.yaml```
## Visualization:
* run **brax_visualization.ipynb**
## Results:
* **Ant** fps step: 1692066.6 fps total: 885603.1

* **Humanoid** fps step: 1244450.3 fps total: 661064.5

* **ur5e** fps step: 1116872.3 fps total: 627117.0


 | 672 | Markdown | 34.421051 | 92 | 0.671131 |
NVlabs/DiffRL/externals/rl_games/docs/ISAAC_GYM.md | ## Isaac Gym Results
https://developer.nvidia.com/isaac-gym
Coming.
| 69 | Markdown | 12.999997 | 38 | 0.753623 |
NVlabs/DiffRL/externals/rl_games/docs/CONFIG_PARAMS.md | # Yaml Config Description
Coming.
| 37 | Markdown | 8.499998 | 27 | 0.72973 |
NVlabs/DiffRL/externals/rl_games/rl_games/torch_runner.py | import numpy as np
import copy
import torch
import yaml
from rl_games import envs
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import model_builder
from rl_games.algos_torch import a2c_continuous
from rl_games.algos_torch import a2c_discrete
from rl_games.algos_torch import players
from rl_games.common.algo_observer import DefaultAlgoObserver
from rl_games.algos_torch import sac_agent
class Runner:
def __init__(self, algo_observer=None):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.DiscreteA2CAgent(**kwargs))
self.algo_factory.register_builder('sac', lambda **kwargs: sac_agent.SACAgent(**kwargs))
#self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('sac', lambda **kwargs : players.SACPlayer(**kwargs))
#self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.algo_observer = algo_observer
torch.backends.cudnn.benchmark = True
def reset(self):
pass
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
print('Found checkpoint')
print(params['load_path'])
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'])
self.config['network'] = self.model
self.config['logdir'] = params['general'].get('logdir', './')
has_rnd_net = self.config.get('rnd_config', None) != None
if has_rnd_net:
print('Adding RND Network')
network = self.model_builder.network_factory.create(params['config']['rnd_config']['network']['name'])
network.load(params['config']['rnd_config']['network'])
self.config['rnd_config']['network'] = network
has_central_value_net = self.config.get('central_value_config', None) != None
if has_central_value_net:
print('Adding Central Value Network')
network = self.model_builder.network_factory.create(params['config']['central_value_config']['network']['name'])
network.load(params['config']['central_value_config']['network'])
self.config['central_value_config']['network'] = network
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
if self.algo_observer is None:
self.algo_observer = DefaultAlgoObserver()
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
if self.load_check_point and (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args and args['checkpoint'] is not None:
if len(args['checkpoint']) > 0:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.create_player()
player.restore(self.load_path)
player.run()
else:
self.run_train() | 6,556 | Python | 43.304054 | 148 | 0.624619 |
NVlabs/DiffRL/externals/rl_games/rl_games/tf14_runner.py | import tensorflow as tf
import numpy as np
import yaml
import ray
import copy
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import model_builder
from rl_games.algos_tf14 import a2c_continuous
from rl_games.algos_tf14 import a2c_discrete
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14 import players
class Runner:
def __init__(self):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.A2CAgent(**kwargs))
self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.sess = None
def reset(self):
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
tf.reset_default_graph()
if self.sess:
self.sess.close()
self.sess = tf.InteractiveSession(config=config)
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
tf.set_random_seed(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'], is_torch=False)
self.config['network'] = self.model
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
ray.init(object_store_memory=1024*1024*1000)
shapes = env_configurations.get_obs_and_action_spaces_from_config(self.config)
obs_space = shapes['observation_space']
action_space = shapes['action_space']
print('obs_space:', obs_space)
print('action_space:', action_space)
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
if self.load_check_point or (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
player.restore(self.load_path)
player.run()
ray.shutdown()
| 5,099 | Python | 39.8 | 175 | 0.643656 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/test_network.py | import torch
from torch import nn
import torch.nn.functional as F
class TestNet(nn.Module):
def __init__(self, params, **kwargs):
nn.Module.__init__(self)
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
num_inputs = 0
assert(type(input_shape) is dict)
for k,v in input_shape.items():
num_inputs +=v[0]
self.central_value = params.get('central_value', False)
self.value_size = kwargs.pop('value_size', 1)
self.linear1 = nn.Linear(num_inputs, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, 64)
self.mean_linear = nn.Linear(64, actions_num)
self.value_linear = nn.Linear(64, 1)
def is_rnn(self):
return False
def forward(self, obs):
obs = obs['obs']
obs = torch.cat([obs['pos'], obs['info']], axis=-1)
x = F.relu(self.linear1(obs))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
action = self.mean_linear(x)
value = self.value_linear(x)
if self.central_value:
return value, None
return action, value, None
from rl_games.algos_torch.network_builder import NetworkBuilder
class TestNetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
return TestNet(self.params, **kwargs)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 1,596 | Python | 28.036363 | 63 | 0.589599 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/smac_env.py | import gym
import numpy as np
from smac.env import StarCraft2Env
class SMACEnv(gym.Env):
def __init__(self, name="3m", **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.reward_sparse = kwargs.get('reward_sparse', False)
self.use_central_value = kwargs.pop('central_value', False)
self.random_invalid_step = kwargs.pop('random_invalid_step', False)
self.replay_save_freq = kwargs.pop('replay_save_freq', 10000)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.env = StarCraft2Env(map_name=name, seed=self.seed, **kwargs)
self.env_info = self.env.get_env_info()
self._game_num = 0
self.n_actions = self.env_info["n_actions"]
self.n_agents = self.env_info["n_agents"]
self.action_space = gym.spaces.Discrete(self.n_actions)
one_hot_agents = 0
if self.apply_agent_ids:
one_hot_agents = self.n_agents
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['obs_shape']+one_hot_agents, ), dtype=np.float32)
self.state_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['state_shape'], ), dtype=np.float32)
self.obs_dict = {}
def _preproc_state_obs(self, state, obs):
# todo: remove from self
if self.apply_agent_ids:
num_agents = self.n_agents
obs = np.array(obs)
all_ids = np.eye(num_agents, dtype=np.float32)
obs = np.concatenate([obs, all_ids], axis=-1)
self.obs_dict["obs"] = np.array(obs)
self.obs_dict["state"] = np.array(state)
if self.use_central_value:
return self.obs_dict
else:
return self.obs_dict["obs"]
def get_number_of_agents(self):
return self.n_agents
def reset(self):
if self._game_num % self.replay_save_freq == 1:
print('saving replay')
self.env.save_replay()
self._game_num += 1
obs, state = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(state, obs)
return obs_dict
def _preproc_actions(self, actions):
actions = actions.copy()
rewards = np.zeros_like(actions)
mask = self.get_action_mask()
for ind, action in enumerate(actions, start=0):
avail_actions = np.nonzero(mask[ind])[0]
if action not in avail_actions:
actions[ind] = np.random.choice(avail_actions)
#rewards[ind] = -0.05
return actions, rewards
def step(self, actions):
fixed_rewards = None
if self.random_invalid_step:
actions, fixed_rewards = self._preproc_actions(actions)
reward, done, info = self.env.step(actions)
if done:
battle_won = info.get('battle_won', False)
if not battle_won and self.reward_sparse:
reward = -1.0
obs = self.env.get_obs()
state = self.env.get_state()
obses = self._preproc_state_obs(state, obs)
rewards = np.repeat (reward, self.n_agents)
dones = np.repeat (done, self.n_agents)
if fixed_rewards is not None:
rewards += fixed_rewards
return obses, rewards, dones, info
def get_action_mask(self):
return np.array(self.env.get_avail_actions(), dtype=np.bool)
def has_action_mask(self):
return not self.random_invalid_step
| 3,500 | Python | 34.01 | 133 | 0.587714 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/connect4_selfplay.py | import gym
import numpy as np
from pettingzoo.classic import connect_four_v0
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
class ConnectFourSelfPlay(gym.Env):
def __init__(self, name="connect_four_v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.is_human = kwargs.pop('is_human', False)
self.random_agent = kwargs.pop('random_agent', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.env = connect_four_v0.env()#gym.make(name, **kwargs)
self.action_space = self.env.action_spaces['player_0']
observation_space = self.env.observation_spaces['player_0']
shp = observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(shp[:-1] + (shp[-1] * 2,)), dtype=np.uint8)
self.obs_deque = deque([], maxlen=2)
self.agent_id = 0
def _get_legal_moves(self, agent_id):
name = 'player_0' if agent_id == 0 else 'player_1'
action_ids = self.env.infos[name]['legal_moves']
mask = np.zeros(self.action_space.n, dtype = np.bool)
mask[action_ids] = True
return mask, action_ids
def env_step(self, action):
obs = self.env.step(action)
info = {}
name = 'player_0' if self.agent_id == 0 else 'player_1'
reward = self.env.rewards[name]
done = self.env.dones[name]
return obs, reward, done, info
def get_obs(self):
return np.concatenate(self.obs_deque,-1).astype(np.uint8) * 255
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
self.agent_id = np.random.randint(2)
obs = self.env.reset()
self.obs_deque.append(obs)
self.obs_deque.append(obs)
if self.agent_id == 1:
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(0)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, _, _, _ = self.env_step(opponent_action)
self.obs_deque.append(obs)
return self.get_obs()
def create_agent(self, config):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
runner.load(config)
config = runner.get_prebuilt_config()
#'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ.pop('CUDA_VISIBLE_DEVICES')
self.agent = runner.create_player()
self.agent.model.eval()
def step(self, action):
obs, reward, done, info = self.env_step(action)
self.obs_deque.append(obs)
if done:
if reward == 1:
info['battle_won'] = 1
else:
info['battle_won'] = 0
return self.get_obs(), reward, done, info
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(1-self.agent_id)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, reward, done,_ = self.env_step(opponent_action)
if done:
if reward == -1:
info['battle_won'] = 0
else:
info['battle_won'] = 1
self.obs_deque.append(obs)
return self.get_obs(), reward, done, info
def render(self, mode='ansi'):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
def get_action_mask(self):
mask, _ = self._get_legal_moves(self.agent_id)
return mask
def has_action_mask(self):
return True | 4,505 | Python | 33.396946 | 113 | 0.552719 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/__init__.py |
from rl_games.envs.connect4_network import ConnectBuilder
from rl_games.envs.test_network import TestNetBuilder
from rl_games.algos_torch import model_builder
model_builder.register_network('connect4net', ConnectBuilder)
model_builder.register_network('testnet', TestNetBuilder) | 282 | Python | 30.444441 | 61 | 0.833333 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/connect4_network.py | import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.action_size = 7
self.conv1 = nn.Conv2d(4, 128, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, s):
s = s['obs'].contiguous()
#s = s.view(-1, 3, 6, 7) # batch_size x channels x board_x x board_y
s = F.relu(self.bn1(self.conv1(s)))
return s
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += residual
out = F.relu(out)
return out
class OutBlock(nn.Module):
def __init__(self):
super(OutBlock, self).__init__()
self.conv = nn.Conv2d(128, 3, kernel_size=1) # value head
self.bn = nn.BatchNorm2d(3)
self.fc1 = nn.Linear(3*6*7, 32)
self.fc2 = nn.Linear(32, 1)
self.conv1 = nn.Conv2d(128, 32, kernel_size=1) # policy head
self.bn1 = nn.BatchNorm2d(32)
self.fc = nn.Linear(6*7*32, 7)
def forward(self,s):
v = F.relu(self.bn(self.conv(s))) # value head
v = v.view(-1, 3*6*7) # batch_size X channel X height X width
v = F.relu(self.fc1(v))
v = F.relu(self.fc2(v))
v = torch.tanh(v)
p = F.relu(self.bn1(self.conv1(s))) # policy head
p = p.view(-1, 6*7*32)
p = self.fc(p)
return p, v, None
class ConnectNet(nn.Module):
def __init__(self, blocks):
super(ConnectNet, self).__init__()
self.blocks = blocks
self.conv = ConvBlock()
for block in range(self.blocks):
setattr(self, "res_%i" % block,ResBlock())
self.outblock = OutBlock()
def is_rnn(self):
return False
def forward(self,s):
s = s.permute((0, 3, 1, 2))
s = self.conv(s)
for block in range(self.blocks):
s = getattr(self, "res_%i" % block)(s)
s = self.outblock(s)
return s
from rl_games.algos_torch.network_builder import NetworkBuilder
class ConnectBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
self.blocks = params['blocks']
def build(self, name, **kwargs):
return ConnectNet(self.blocks)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 2,992 | Python | 28.93 | 78 | 0.558489 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/brax.py |
from rl_games.common.ivecenv import IVecEnv
import gym
import numpy as np
import torch
import torch.utils.dlpack as tpack
def jax_to_torch(tensor):
from jax._src.dlpack import (to_dlpack,)
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor
def torch_to_jax(tensor):
from jax._src.dlpack import (from_dlpack,)
tensor = tpack.to_dlpack(tensor)
tensor = from_dlpack(tensor)
return tensor
class BraxEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import brax
from brax import envs
import jax
import jax.numpy as jnp
self.batch_size = num_actors
env_fn = envs.create_fn(env_name=kwargs.pop('env_name', 'ant'))
self.env = env_fn(
action_repeat=1,
batch_size=num_actors,
episode_length=kwargs.pop('episode_length', 1000))
obs_high = np.inf * np.ones(self.env.observation_size)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
action_high = np.ones(self.env.action_size)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def step(first_state, state, action):
def test_done(a, b):
if a is first_state.done or a is first_state.metrics or a is first_state.reward:
return b
test_shape = [a.shape[0],] + [1 for _ in range(len(a.shape) - 1)]
return jnp.where(jnp.reshape(state.done, test_shape), a, b)
state = self.env.step(state, action)
state = jax.tree_multimap(test_done, first_state, state)
return state, state.obs, state.reward, state.done, {}
def reset(key):
state = self.env.reset(key)
return state, state.obs
self._reset = jax.jit(reset, backend='gpu')
self._step = jax.jit(step, backend='gpu')
def step(self, action):
action = torch_to_jax(action)
self.state, next_obs, reward, is_done, info = self._step(self.first_state, self.state, action)
#next_obs = np.asarray(next_obs).astype(np.float32)
#reward = np.asarray(reward).astype(np.float32)
#is_done = np.asarray(is_done).astype(np.long)
next_obs = jax_to_torch(next_obs)
reward = jax_to_torch(reward)
is_done = jax_to_torch(is_done)
return next_obs, reward, is_done, info
def reset(self):
import jax
import jax.numpy as jnp
rng = jax.random.PRNGKey(seed=0)
rng = jax.random.split(rng, self.batch_size)
self.first_state, _ = self._reset(rng)
self.state, obs = self._reset(rng)
#obs = np.asarray(obs).astype(np.float32)
return jax_to_torch(obs)
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_brax_env(**kwargs):
return BraxEnv("", kwargs.pop('num_actors', 256), **kwargs)
| 3,131 | Python | 32.677419 | 102 | 0.600767 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/multiwalker.py | import gym
import numpy as np
from pettingzoo.sisl import multiwalker_v6
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
import rl_games.envs.connect4_network
class MultiWalker(gym.Env):
def __init__(self, name="multiwalker", **kwargs):
gym.Env.__init__(self)
self.name = name
self.env = multiwalker_v6.parallel_env()
self.use_central_value = kwargs.pop('central_value', False)
self.use_prev_actions = kwargs.pop('use_prev_actions', False)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.add_timeouts = kwargs.pop('add_timeouts', False)
self.action_space = self.env.action_spaces['walker_0']
self.steps_count = 0
obs_len = self.env.observation_spaces['walker_0'].shape[0]
add_obs = 0
if self.apply_agent_ids:
add_obs = 3
if self.use_prev_actions:
obs_len += self.action_space.shape[0]
self.observation_space = gym.spaces.Box(-1, 1, shape =(obs_len + add_obs,))
if self.use_central_value:
self.state_space = gym.spaces.Box(-1, 1, shape =(obs_len*3,))
def step(self, action):
self.steps_count += 1
actions = {'walker_0' : action[0], 'walker_1' : action[1], 'walker_2' : action[2],}
obs, reward, done, info = self.env.step(actions)
if self.use_prev_actions:
obs = {
k: np.concatenate([v, actions[k]]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
rewards = np.stack([reward['walker_0'], reward['walker_1'], reward['walker_2']])
dones = np.stack([done['walker_0'], done['walker_1'], done['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses, rewards, dones, info
def reset(self):
obs = self.env.reset()
self.steps_count = 0
if self.use_prev_actions:
zero_actions = np.zeros(self.action_space.shape[0])
obs = {
k: np.concatenate([v, zero_actions]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses
def render(self, mode='ansi'):
self.env.render(mode)
def get_number_of_agents(self):
return 3
def has_action_mask(self):
return False | 3,195 | Python | 37.047619 | 91 | 0.554617 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/slimevolley_selfplay.py | import gym
import numpy as np
import slimevolleygym
import yaml
from rl_games.torch_runner import Runner
import os
class SlimeVolleySelfplay(gym.Env):
def __init__(self, name="SlimeVolleyDiscrete-v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.pos_scale = 1
self.neg_scale = kwargs.pop('neg_scale', 1)
self.sum_rewards = 0
self.env = gym.make(name, **kwargs)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
obs = self.env.reset()
self.opponent_obs = obs
self.sum_rewards = 0
return obs
def create_agent(self, config='rl_games/configs/ma/ppo_slime_self_play.yaml'):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
from rl_games.common.env_configurations import get_env_info
config['params']['config']['env_info'] = get_env_info(self)
runner.load(config)
config = runner.get_prebuilt_config()
'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
self.agent = runner.create_player()
def step(self, action):
op_obs = self.agent.obs_to_torch(self.opponent_obs)
opponent_action = self.agent.get_action(op_obs, self.is_determenistic).item()
obs, reward, done, info = self.env.step(action, opponent_action)
self.sum_rewards += reward
if reward < 0:
reward = reward * self.neg_scale
self.opponent_obs = info['otherObs']
if done:
info['battle_won'] = np.sign(self.sum_rewards)
return obs, reward, done, info
def render(self,mode):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
| 2,148 | Python | 32.578124 | 85 | 0.607542 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/test/__init__.py | import gym
gym.envs.register(
id='TestRnnEnv-v0',
entry_point='rl_games.envs.test.rnn_env:TestRNNEnv',
max_episode_steps=100500,
)
gym.envs.register(
id='TestAsymmetricEnv-v0',
entry_point='rl_games.envs.test.test_asymmetric_env:TestAsymmetricCritic'
) | 279 | Python | 22.333331 | 78 | 0.709677 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/test/rnn_env.py | import gym
import numpy as np
class TestRNNEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.obs_dict = {}
self.max_steps = kwargs.pop('max_steps', 21)
self.show_time = kwargs.pop('show_time', 1)
self.min_dist = kwargs.pop('min_dist', 2)
self.max_dist = kwargs.pop('max_dist', 8)
self.hide_object = kwargs.pop('hide_object', False)
self.use_central_value = kwargs.pop('use_central_value', False)
self.apply_dist_reward = kwargs.pop('apply_dist_reward', False)
self.apply_exploration_reward = kwargs.pop('apply_exploration_reward', False)
self.multi_head_value = kwargs.pop('multi_head_value', False)
if self.multi_head_value:
self.value_size = 2
else:
self.value_size = 1
self.multi_discrete_space = kwargs.pop('multi_discrete_space', False)
if self.multi_discrete_space:
self.action_space = gym.spaces.Tuple([gym.spaces.Discrete(2),gym.spaces.Discrete(3)])
else:
self.action_space = gym.spaces.Discrete(4)
self.multi_obs_space = kwargs.pop('multi_obs_space', False)
if self.multi_obs_space:
spaces = {
'pos': gym.spaces.Box(low=0, high=1, shape=(2, ), dtype=np.float32),
'info': gym.spaces.Box(low=0, high=1, shape=(4, ), dtype=np.float32),
}
self.observation_space = gym.spaces.Dict(spaces)
else:
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(6, ), dtype=np.float32)
self.state_space = self.observation_space
if self.apply_exploration_reward:
pass
self.reset()
def get_number_of_agents(self):
return 1
def reset(self):
self._curr_steps = 0
self._current_pos = [0,0]
bound = self.max_dist - self.min_dist
rand_dir = - 2 * np.random.randint(0, 2, (2,)) + 1
self._goal_pos = rand_dir * np.random.randint(self.min_dist, self.max_dist+1, (2,))
obs = np.concatenate([self._current_pos, self._goal_pos, [1, 0]], axis=None)
obs = obs.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
obses = {}
obses["obs"] = obs
obses["state"] = obs
else:
obses = obs
return obses
def step_categorical(self, action):
if self._curr_steps > 1:
if action == 0:
self._current_pos[0] += 1
if action == 1:
self._current_pos[0] -= 1
if action == 2:
self._current_pos[1] += 1
if action == 3:
self._current_pos[1] -= 1
def step_multi_categorical(self, action):
if self._curr_steps > 1:
if action[0] == 0:
self._current_pos[0] += 1
if action[0] == 1:
self._current_pos[0] -= 1
if action[1] == 0:
self._current_pos[1] += 1
if action[1] == 1:
self._current_pos[1] -= 1
if action[1] == 2:
pass
def step(self, action):
info = {}
self._curr_steps += 1
if self.multi_discrete_space:
self.step_multi_categorical(action)
else:
self.step_categorical(action)
reward = [0.0, 0.0]
done = False
dist = self._current_pos - self._goal_pos
if (dist**2).sum() < 0.0001:
reward[0] = 1.0
info = {'scores' : 1}
done = True
elif self._curr_steps == self.max_steps:
info = {'scores' : 0}
done = True
dist_coef = -0.1
if self.apply_dist_reward:
reward[1] = dist_coef * np.abs(dist).sum() / self.max_dist
show_object = 0
if self.hide_object:
obs = np.concatenate([self._current_pos, [0,0], [show_object, self._curr_steps]], axis=None)
else:
show_object = 1
obs = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obs = obs.astype(np.float32)
#state = state.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
state = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obses = {}
obses["obs"] = obs
if self.multi_obs_space:
obses["state"] = {
'pos': state[:2],
'info': state[2:]
}
else:
obses["state"] = state.astype(np.float32)
else:
obses = obs
if self.multi_head_value:
pass
else:
reward = reward[0] + reward[1]
return obses, np.array(reward).astype(np.float32), done, info
def has_action_mask(self):
return False | 5,217 | Python | 34.020134 | 115 | 0.500096 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.